problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_2863 | rasdani/github-patches | git_diff | Kinto__kinto-1786 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove colander deprecations
```
/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.6/site-packages/cornice/validators/_colander.py:110: DeprecationWarning: Setting schema to a class is deprecated. Set schema to an instance instead.
schema = _ensure_instantiated(schema)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/views/batch.py`
Content:
```
1 import logging
2
3 import colander
4 from cornice.validators import colander_validator
5 from pyramid import httpexceptions
6 from pyramid.security import NO_PERMISSION_REQUIRED
7
8 from kinto.core import errors
9 from kinto.core import Service
10 from kinto.core.errors import ErrorSchema
11 from kinto.core.utils import merge_dicts, build_request, build_response
12 from kinto.core.resource.viewset import CONTENT_TYPES
13
14
15 subrequest_logger = logging.getLogger('subrequest.summary')
16
17 valid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',
18 'POST', 'PUT', 'PATCH'))
19
20
21 def string_values(node, cstruct):
22 """Validate that a ``colander.Mapping`` only has strings in its values.
23
24 .. warning::
25
26 Should be associated to a ``colander.Mapping`` schema node.
27 """
28 are_strings = [isinstance(v, str) for v in cstruct.values()]
29 if not all(are_strings):
30 error_msg = '{} contains non string value'.format(cstruct)
31 raise colander.Invalid(node, error_msg)
32
33
34 class BatchRequestSchema(colander.MappingSchema):
35 method = colander.SchemaNode(colander.String(),
36 validator=valid_http_method,
37 missing=colander.drop)
38 path = colander.SchemaNode(colander.String(),
39 validator=colander.Regex('^/'))
40 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
41 validator=string_values,
42 missing=colander.drop)
43 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
44 missing=colander.drop)
45
46 @staticmethod
47 def schema_type():
48 return colander.Mapping(unknown='raise')
49
50
51 class BatchPayloadSchema(colander.MappingSchema):
52 defaults = BatchRequestSchema(missing=colander.drop).clone()
53 requests = colander.SchemaNode(colander.Sequence(),
54 BatchRequestSchema())
55
56 @staticmethod
57 def schema_type():
58 return colander.Mapping(unknown='raise')
59
60 def __init__(self, *args, **kwargs):
61 super().__init__(*args, **kwargs)
62 # On defaults, path is not mandatory.
63 self.get('defaults').get('path').missing = colander.drop
64
65 def deserialize(self, cstruct=colander.null):
66 """Preprocess received data to carefully merge defaults.
67 """
68 if cstruct is not colander.null:
69 defaults = cstruct.get('defaults')
70 requests = cstruct.get('requests')
71 if isinstance(defaults, dict) and isinstance(requests, list):
72 for request in requests:
73 if isinstance(request, dict):
74 merge_dicts(request, defaults)
75 return super().deserialize(cstruct)
76
77
78 class BatchRequest(colander.MappingSchema):
79 body = BatchPayloadSchema()
80
81
82 class BatchResponseSchema(colander.MappingSchema):
83 status = colander.SchemaNode(colander.Integer())
84 path = colander.SchemaNode(colander.String())
85 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
86 validator=string_values,
87 missing=colander.drop)
88 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
89 missing=colander.drop)
90
91
92 class BatchResponseBodySchema(colander.MappingSchema):
93 responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))
94
95
96 class BatchResponse(colander.MappingSchema):
97 body = BatchResponseBodySchema()
98
99
100 class ErrorResponseSchema(colander.MappingSchema):
101 body = ErrorSchema()
102
103
104 batch_responses = {
105 '200': BatchResponse(description='Return a list of operation responses.'),
106 '400': ErrorResponseSchema(description='The request was badly formatted.'),
107 'default': ErrorResponseSchema(description='an unknown error occurred.')
108 }
109
110 batch = Service(name='batch', path='/batch',
111 description='Batch operations')
112
113
114 @batch.post(schema=BatchRequest,
115 validators=(colander_validator,),
116 content_type=CONTENT_TYPES,
117 permission=NO_PERMISSION_REQUIRED,
118 tags=['Batch'], operation_id='batch',
119 response_schemas=batch_responses)
120 def post_batch(request):
121 requests = request.validated['body']['requests']
122
123 request.log_context(batch_size=len(requests))
124
125 limit = request.registry.settings['batch_max_requests']
126 if limit and len(requests) > int(limit):
127 error_msg = 'Number of requests is limited to {}'.format(limit)
128 request.errors.add('body', 'requests', error_msg)
129 return
130
131 if any([batch.path in req['path'] for req in requests]):
132 error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)
133 request.errors.add('body', 'requests', error_msg)
134 return
135
136 responses = []
137
138 for subrequest_spec in requests:
139 subrequest = build_request(request, subrequest_spec)
140
141 log_context = {**request.log_context(),
142 'path': subrequest.path,
143 'method': subrequest.method}
144 try:
145 # Invoke subrequest without individual transaction.
146 resp, subrequest = request.follow_subrequest(subrequest,
147 use_tweens=False)
148 except httpexceptions.HTTPException as e:
149 # Since some request in the batch failed, we need to stop the parent request
150 # through Pyramid's transaction manager. 5XX errors are already caught by
151 # pyramid_tm's commit_veto
152 # https://github.com/Kinto/kinto/issues/624
153 if e.status_code == 409:
154 request.tm.abort()
155
156 if e.content_type == 'application/json':
157 resp = e
158 else:
159 # JSONify raw Pyramid errors.
160 resp = errors.http_error(e)
161
162 subrequest_logger.info('subrequest.summary', extra=log_context)
163
164 dict_resp = build_response(resp, subrequest)
165 responses.append(dict_resp)
166
167 return {
168 'responses': responses
169 }
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py
--- a/kinto/core/views/batch.py
+++ b/kinto/core/views/batch.py
@@ -111,7 +111,7 @@
description='Batch operations')
[email protected](schema=BatchRequest,
[email protected](schema=BatchRequest(),
validators=(colander_validator,),
content_type=CONTENT_TYPES,
permission=NO_PERMISSION_REQUIRED,
| {"golden_diff": "diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py\n--- a/kinto/core/views/batch.py\n+++ b/kinto/core/views/batch.py\n@@ -111,7 +111,7 @@\n description='Batch operations')\n \n \[email protected](schema=BatchRequest,\[email protected](schema=BatchRequest(),\n validators=(colander_validator,),\n content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n", "issue": "Remove colander deprecations\n```\r\n/home/mathieu/Code/Mozilla/kinto/.venv/lib/python3.6/site-packages/cornice/validators/_colander.py:110: DeprecationWarning: Setting schema to a class is deprecated. Set schema to an instance instead.\r\n schema = _ensure_instantiated(schema)\r\n```\n", "before_files": [{"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\nfrom kinto.core.resource.viewset import CONTENT_TYPES\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest,\n validators=(colander_validator,),\n content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n # Since some request in the batch failed, we need to stop the parent request\n # through Pyramid's transaction manager. 5XX errors are already caught by\n # pyramid_tm's commit_veto\n # https://github.com/Kinto/kinto/issues/624\n if e.status_code == 409:\n request.tm.abort()\n\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n", "path": "kinto/core/views/batch.py"}], "after_files": [{"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\nfrom kinto.core.resource.viewset import CONTENT_TYPES\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest(),\n validators=(colander_validator,),\n content_type=CONTENT_TYPES,\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n # Since some request in the batch failed, we need to stop the parent request\n # through Pyramid's transaction manager. 5XX errors are already caught by\n # pyramid_tm's commit_veto\n # https://github.com/Kinto/kinto/issues/624\n if e.status_code == 409:\n request.tm.abort()\n\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n", "path": "kinto/core/views/batch.py"}]} | 1,961 | 100 |
gh_patches_debug_85 | rasdani/github-patches | git_diff | pwndbg__pwndbg-616 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bp breaks on pie binaries before run
<!--
Before reporting a new issue, make sure that we do not have any duplicates already open.
If there is one it might be good to take part in the discussion there.
Please make sure you have checked that the issue persists on LATEST pwndbg version.
Below is a template for BUG REPORTS.
Don't include it if this is a FEATURE REQUEST.
-->
### Description
In the windbg commands while setting a breakpoint using `bp` the address is converted to int
https://github.com/pwndbg/pwndbg/blob/ca17c6dbb0d1bc40ef060331aa48dad0675c5df9/pwndbg/commands/windbg.py#L297
using `pwndbg.inthook.xint`. This verifies if the argument is actually an address but in case of pie binaries it could be possible that the address is loaded later and then the argument is casted to `uint32_t`
### Steps to reproduce
```sh
[/tmp] tail hello.c
#include <stdio.h>
int main(int argc, char **argv) {
puts("Hello World");
return 0;
}
[/tmp] make hello 17:41:43
cc hello.c -o hello
[/tmp] gdb -q hello 17:41:47
pwndbg: loaded 177 commands. Type pwndbg [filter] for a list.
pwndbg: created $rebase, $ida gdb functions (can be used with print/break)
Reading symbols from hello...(no debugging symbols found)...done.
pwndbg> bp 0x00005555555546b0
Breakpoint 1 at 0x555546b0
pwndbg> bl
Num Type Disp Enb Address What
1 breakpoint keep y 0x00000000555546b0
pwndbg> r
Starting program: /tmp/hello
Warning:
Cannot insert breakpoint 1.
Cannot access memory at address 0x555546b0
```
### My setup
```sh
[pwndbg] git --no-pager log -1 --stat 18:07:21 ☁ dev ☀
commit ca17c6dbb0d1bc40ef060331aa48dad0675c5df9
Author: Alisson Bezerra <[email protected]>
Date: Tue Apr 9 05:54:00 2019 -0300
Add xuntil command (#604)
pwndbg/commands/peda.py | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
[pwndbg] lsb_release -a 18:08:01 ☁ dev ☀
No LSB modules are available.
Distributor ID: Debian
Description: Debian GNU/Linux 9.8 (stretch)
Release: 9.8
Codename: stretch
[pwndbg] gdb -q 18:10:56 ☁ dev ☀
pwndbg: loaded 178 commands. Type pwndbg [filter] for a list.
pwndbg: created $rebase, $ida gdb functions (can be used with print/break)
pwndbg> show version
GNU gdb (Debian 7.12-6) 7.12.0.20161007-git
Copyright (C) 2016 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law. Type "show copying"
and "show warranty" for details.
This GDB was configured as "x86_64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<http://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word".
pwndbg> py import sys; print(sys.version)
3.5.3 (default, Sep 27 2018, 17:25:39)
[GCC 6.3.0 20170516]
pwndbg>
```
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/typeinfo.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Common types, and routines for manually loading types from file
5 via GCC.
6 """
7 from __future__ import absolute_import
8 from __future__ import division
9 from __future__ import print_function
10 from __future__ import unicode_literals
11
12 import glob
13 import os
14 import subprocess
15 import sys
16 import tempfile
17
18 import gdb
19
20 import pwndbg.events
21 import pwndbg.gcc
22 import pwndbg.memoize
23
24 module = sys.modules[__name__]
25
26
27 def is_pointer(value):
28 type = value
29
30 if isinstance(value, gdb.Value):
31 type = value.type
32
33 type = type.strip_typedefs()
34 return type.code == gdb.TYPE_CODE_PTR
35
36
37 def lookup_types(*types):
38 for type_str in types:
39 try:
40 return gdb.lookup_type(type_str)
41 except Exception as e:
42 exc = e
43 raise exc
44
45
46 @pwndbg.events.start
47 @pwndbg.events.stop
48 def update():
49
50 module.char = gdb.lookup_type('char')
51 module.ulong = lookup_types('unsigned long', 'uint', 'u32')
52 module.long = lookup_types('long', 'int', 'i32')
53 module.uchar = lookup_types('unsigned char', 'ubyte', 'u8')
54 module.ushort = lookup_types('unsigned short', 'ushort', 'u16')
55 module.uint = lookup_types('unsigned int', 'uint', 'u32')
56 module.void = lookup_types('void', '()')
57 module.uint8 = module.uchar
58 module.uint16 = module.ushort
59 module.uint32 = module.uint
60 module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64')
61
62 module.int8 = lookup_types('char', 'i8')
63 module.int16 = lookup_types('short', 'i16')
64 module.int32 = lookup_types('int', 'i32')
65 module.int64 = lookup_types('long long', 'long', 'i64')
66
67 module.ssize_t = module.long
68 module.size_t = module.ulong
69
70 module.pvoid = void.pointer()
71 module.ppvoid = pvoid.pointer()
72 module.pchar = char.pointer()
73
74 module.ptrsize = pvoid.sizeof
75
76 if pvoid.sizeof == 4: module.ptrdiff = uint32
77 if pvoid.sizeof == 8: module.ptrdiff = uint64
78
79 module.null = gdb.Value(0).cast(void)
80
81 # Call it once so we load all of the types
82 update()
83
84 tempdir = tempfile.gettempdir() + '/pwndbg'
85 if not os.path.exists(tempdir):
86 os.mkdir(tempdir)
87
88 # Trial and error until things work
89 blacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',
90 'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',
91 'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',
92 'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',
93 'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',
94 'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',
95 'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',
96 'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',
97 'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',
98 'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']
99
100 def load(name):
101 """Load symbol by name from headers in standard system include directory"""
102 try:
103 return gdb.lookup_type(name)
104 except gdb.error:
105 pass
106
107 # s, _ = gdb.lookup_symbol(name)
108
109 # Try to find an architecture-specific include path
110 arch = pwndbg.arch.current.split(':')[0]
111
112 include_dir = glob.glob('/usr/%s*/include' % arch)
113
114 if include_dir:
115 include_dir = include_dir[0]
116 else:
117 include_dir = '/usr/include'
118
119 source = '#include <fstream>\n'
120
121 for subdir in ['', 'sys', 'netinet']:
122 dirname = os.path.join(include_dir, subdir)
123 for path in glob.glob(os.path.join(dirname, '*.h')):
124 if any(b in path for b in blacklist):
125 continue
126 print(path)
127 source += '#include "%s"\n' % path
128
129
130 source += '''
131 {name} foo;
132 '''.format(**locals())
133
134 filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))
135
136 with open(filename, 'w+') as f:
137 f.write(source)
138 f.flush()
139 os.fsync(f.fileno())
140
141 compile(filename)
142
143 return gdb.lookup_type(name)
144
145 def compile(filename=None, address=0):
146 """Compile and extract symbols from specified file"""
147 if filename is None:
148 print("Specify a filename to compile.")
149 return
150
151 objectname = os.path.splitext(filename)[0] + ".o"
152
153 if not os.path.exists(objectname):
154 gcc = pwndbg.gcc.which()
155 gcc += ['-w', '-c', '-g', filename, '-o', objectname]
156 try:
157 subprocess.check_output(gcc)
158 except subprocess.CalledProcessError as e:
159 return
160
161 add_symbol_file(objectname, address)
162
163 def add_symbol_file(filename=None, address=0):
164 """Read additional symbol table information from the object file filename"""
165 if filename is None:
166 print("Specify a symbol file to add.")
167 return
168
169 with pwndbg.events.Pause():
170 gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)
171
172 def read_gdbvalue(type_name, addr):
173 """ Read the memory contents at addr and interpret them as a GDB value with the given type """
174 gdb_type = pwndbg.typeinfo.load(type_name)
175 return gdb.Value(addr).cast(gdb_type.pointer()).dereference()
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/typeinfo.py b/pwndbg/typeinfo.py
--- a/pwndbg/typeinfo.py
+++ b/pwndbg/typeinfo.py
@@ -43,6 +43,7 @@
raise exc
[email protected]_objfile
@pwndbg.events.start
@pwndbg.events.stop
def update():
| {"golden_diff": "diff --git a/pwndbg/typeinfo.py b/pwndbg/typeinfo.py\n--- a/pwndbg/typeinfo.py\n+++ b/pwndbg/typeinfo.py\n@@ -43,6 +43,7 @@\n raise exc\n \n \[email protected]_objfile\n @pwndbg.events.start\n @pwndbg.events.stop\n def update():\n", "issue": "bp breaks on pie binaries before run\n<!--\r\nBefore reporting a new issue, make sure that we do not have any duplicates already open.\r\nIf there is one it might be good to take part in the discussion there.\r\n\r\nPlease make sure you have checked that the issue persists on LATEST pwndbg version.\r\n\r\nBelow is a template for BUG REPORTS.\r\nDon't include it if this is a FEATURE REQUEST.\r\n-->\r\n\r\n\r\n### Description\r\nIn the windbg commands while setting a breakpoint using `bp` the address is converted to int\r\n\r\nhttps://github.com/pwndbg/pwndbg/blob/ca17c6dbb0d1bc40ef060331aa48dad0675c5df9/pwndbg/commands/windbg.py#L297\r\n\r\nusing `pwndbg.inthook.xint`. This verifies if the argument is actually an address but in case of pie binaries it could be possible that the address is loaded later and then the argument is casted to `uint32_t`\r\n### Steps to reproduce\r\n```sh\r\n[/tmp] tail hello.c \r\n#include <stdio.h>\r\n\r\nint main(int argc, char **argv) {\r\n puts(\"Hello World\");\r\n return 0;\r\n}\r\n[/tmp] make hello 17:41:43 \r\ncc hello.c -o hello\r\n[/tmp] gdb -q hello 17:41:47 \r\npwndbg: loaded 177 commands. Type pwndbg [filter] for a list.\r\npwndbg: created $rebase, $ida gdb functions (can be used with print/break)\r\nReading symbols from hello...(no debugging symbols found)...done.\r\npwndbg> bp 0x00005555555546b0\r\nBreakpoint 1 at 0x555546b0\r\npwndbg> bl\r\nNum Type Disp Enb Address What\r\n1 breakpoint keep y 0x00000000555546b0 \r\npwndbg> r\r\nStarting program: /tmp/hello \r\nWarning:\r\nCannot insert breakpoint 1.\r\nCannot access memory at address 0x555546b0\r\n\r\n```\r\n\r\n### My setup\r\n```sh\r\n[pwndbg] git --no-pager log -1 --stat 18:07:21 \u2601 dev \u2600\r\ncommit ca17c6dbb0d1bc40ef060331aa48dad0675c5df9\r\nAuthor: Alisson Bezerra <[email protected]>\r\nDate: Tue Apr 9 05:54:00 2019 -0300\r\n\r\n Add xuntil command (#604)\r\n\r\n pwndbg/commands/peda.py | 23 +++++++++++++++++++++++\r\n 1 file changed, 23 insertions(+)\r\n[pwndbg] lsb_release -a 18:08:01 \u2601 dev \u2600\r\nNo LSB modules are available.\r\nDistributor ID:\tDebian\r\nDescription:\tDebian GNU/Linux 9.8 (stretch)\r\nRelease:\t9.8\r\nCodename:\tstretch\r\n[pwndbg] gdb -q 18:10:56 \u2601 dev \u2600\r\npwndbg: loaded 178 commands. Type pwndbg [filter] for a list.\r\npwndbg: created $rebase, $ida gdb functions (can be used with print/break)\r\npwndbg> show version\r\nGNU gdb (Debian 7.12-6) 7.12.0.20161007-git\r\nCopyright (C) 2016 Free Software Foundation, Inc.\r\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\r\nThis is free software: you are free to change and redistribute it.\r\nThere is NO WARRANTY, to the extent permitted by law. Type \"show copying\"\r\nand \"show warranty\" for details.\r\nThis GDB was configured as \"x86_64-linux-gnu\".\r\nType \"show configuration\" for configuration details.\r\nFor bug reporting instructions, please see:\r\n<http://www.gnu.org/software/gdb/bugs/>.\r\nFind the GDB manual and other documentation resources online at:\r\n<http://www.gnu.org/software/gdb/documentation/>.\r\nFor help, type \"help\".\r\nType \"apropos word\" to search for commands related to \"word\".\r\npwndbg> py import sys; print(sys.version)\r\n3.5.3 (default, Sep 27 2018, 17:25:39) \r\n[GCC 6.3.0 20170516]\r\npwndbg> \r\n\r\n```\r\n<!--\r\nShow us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).\r\n\r\nNOTE: We are currently supporting only Ubuntu installations.\r\nIt is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).\r\nIf you would like to change this situation - help us improving pwndbg and supporting other distros!\r\n\r\nThis can be displayed in pwndbg through `version` command.\r\n\r\nIf it is somehow unavailable, use:\r\n* `show version` - for gdb\r\n* `py import sys; print(sys.version)` - for python\r\n* pwndbg version/git commit id\r\n-->\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCommon types, and routines for manually loading types from file\nvia GCC.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.gcc\nimport pwndbg.memoize\n\nmodule = sys.modules[__name__]\n\n\ndef is_pointer(value):\n type = value\n\n if isinstance(value, gdb.Value):\n type = value.type\n\n type = type.strip_typedefs()\n return type.code == gdb.TYPE_CODE_PTR\n\n\ndef lookup_types(*types):\n for type_str in types:\n try:\n return gdb.lookup_type(type_str)\n except Exception as e:\n exc = e\n raise exc\n\n\[email protected]\[email protected]\ndef update():\n\n module.char = gdb.lookup_type('char')\n module.ulong = lookup_types('unsigned long', 'uint', 'u32')\n module.long = lookup_types('long', 'int', 'i32')\n module.uchar = lookup_types('unsigned char', 'ubyte', 'u8')\n module.ushort = lookup_types('unsigned short', 'ushort', 'u16')\n module.uint = lookup_types('unsigned int', 'uint', 'u32')\n module.void = lookup_types('void', '()')\n module.uint8 = module.uchar\n module.uint16 = module.ushort\n module.uint32 = module.uint\n module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64')\n\n module.int8 = lookup_types('char', 'i8')\n module.int16 = lookup_types('short', 'i16')\n module.int32 = lookup_types('int', 'i32')\n module.int64 = lookup_types('long long', 'long', 'i64')\n\n module.ssize_t = module.long\n module.size_t = module.ulong\n\n module.pvoid = void.pointer()\n module.ppvoid = pvoid.pointer()\n module.pchar = char.pointer()\n\n module.ptrsize = pvoid.sizeof\n\n if pvoid.sizeof == 4: module.ptrdiff = uint32\n if pvoid.sizeof == 8: module.ptrdiff = uint64\n\n module.null = gdb.Value(0).cast(void)\n\n# Call it once so we load all of the types\nupdate()\n\ntempdir = tempfile.gettempdir() + '/pwndbg'\nif not os.path.exists(tempdir):\n os.mkdir(tempdir)\n\n# Trial and error until things work\nblacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',\n'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',\n'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',\n'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',\n'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',\n'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',\n'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',\n'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',\n'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',\n'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']\n\ndef load(name):\n \"\"\"Load symbol by name from headers in standard system include directory\"\"\"\n try:\n return gdb.lookup_type(name)\n except gdb.error:\n pass\n\n # s, _ = gdb.lookup_symbol(name)\n\n # Try to find an architecture-specific include path\n arch = pwndbg.arch.current.split(':')[0]\n\n include_dir = glob.glob('/usr/%s*/include' % arch)\n\n if include_dir:\n include_dir = include_dir[0]\n else:\n include_dir = '/usr/include'\n\n source = '#include <fstream>\\n'\n\n for subdir in ['', 'sys', 'netinet']:\n dirname = os.path.join(include_dir, subdir)\n for path in glob.glob(os.path.join(dirname, '*.h')):\n if any(b in path for b in blacklist):\n continue\n print(path)\n source += '#include \"%s\"\\n' % path\n\n\n source += '''\n{name} foo;\n'''.format(**locals())\n\n filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))\n\n with open(filename, 'w+') as f:\n f.write(source)\n f.flush()\n os.fsync(f.fileno())\n\n compile(filename)\n\n return gdb.lookup_type(name)\n\ndef compile(filename=None, address=0):\n \"\"\"Compile and extract symbols from specified file\"\"\"\n if filename is None:\n print(\"Specify a filename to compile.\")\n return\n\n objectname = os.path.splitext(filename)[0] + \".o\"\n\n if not os.path.exists(objectname):\n gcc = pwndbg.gcc.which()\n gcc += ['-w', '-c', '-g', filename, '-o', objectname]\n try:\n subprocess.check_output(gcc)\n except subprocess.CalledProcessError as e:\n return\n\n add_symbol_file(objectname, address)\n\ndef add_symbol_file(filename=None, address=0):\n \"\"\"Read additional symbol table information from the object file filename\"\"\"\n if filename is None:\n print(\"Specify a symbol file to add.\")\n return\n\n with pwndbg.events.Pause():\n gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)\n\ndef read_gdbvalue(type_name, addr):\n \"\"\" Read the memory contents at addr and interpret them as a GDB value with the given type \"\"\"\n gdb_type = pwndbg.typeinfo.load(type_name)\n return gdb.Value(addr).cast(gdb_type.pointer()).dereference()\n", "path": "pwndbg/typeinfo.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCommon types, and routines for manually loading types from file\nvia GCC.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.gcc\nimport pwndbg.memoize\n\nmodule = sys.modules[__name__]\n\n\ndef is_pointer(value):\n type = value\n\n if isinstance(value, gdb.Value):\n type = value.type\n\n type = type.strip_typedefs()\n return type.code == gdb.TYPE_CODE_PTR\n\n\ndef lookup_types(*types):\n for type_str in types:\n try:\n return gdb.lookup_type(type_str)\n except Exception as e:\n exc = e\n raise exc\n\n\[email protected]_objfile\[email protected]\[email protected]\ndef update():\n\n module.char = gdb.lookup_type('char')\n module.ulong = lookup_types('unsigned long', 'uint', 'u32')\n module.long = lookup_types('long', 'int', 'i32')\n module.uchar = lookup_types('unsigned char', 'ubyte', 'u8')\n module.ushort = lookup_types('unsigned short', 'ushort', 'u16')\n module.uint = lookup_types('unsigned int', 'uint', 'u32')\n module.void = lookup_types('void', '()')\n module.uint8 = module.uchar\n module.uint16 = module.ushort\n module.uint32 = module.uint\n module.uint64 = lookup_types('unsigned long long', 'ulong', 'u64')\n\n module.int8 = lookup_types('char', 'i8')\n module.int16 = lookup_types('short', 'i16')\n module.int32 = lookup_types('int', 'i32')\n module.int64 = lookup_types('long long', 'long', 'i64')\n\n module.ssize_t = module.long\n module.size_t = module.ulong\n\n module.pvoid = void.pointer()\n module.ppvoid = pvoid.pointer()\n module.pchar = char.pointer()\n\n module.ptrsize = pvoid.sizeof\n\n if pvoid.sizeof == 4: module.ptrdiff = uint32\n if pvoid.sizeof == 8: module.ptrdiff = uint64\n\n module.null = gdb.Value(0).cast(void)\n\n# Call it once so we load all of the types\nupdate()\n\ntempdir = tempfile.gettempdir() + '/pwndbg'\nif not os.path.exists(tempdir):\n os.mkdir(tempdir)\n\n# Trial and error until things work\nblacklist = ['regexp.h', 'xf86drm.h', 'libxl_json.h', 'xf86drmMode.h',\n'caca0.h', 'xenguest.h', '_libxl_types_json.h', 'term_entry.h', 'slcurses.h',\n'pcreposix.h', 'sudo_plugin.h', 'tic.h', 'sys/elf.h', 'sys/vm86.h',\n'xenctrlosdep.h', 'xenctrl.h', 'cursesf.h', 'cursesm.h', 'gdbm.h', 'dbm.h',\n'gcrypt-module.h', 'term.h', 'gmpxx.h', 'pcap/namedb.h', 'pcap-namedb.h',\n'evr.h', 'mpc.h', 'fdt.h', 'mpfr.h', 'evrpc.h', 'png.h', 'zlib.h', 'pngconf.h',\n'libelfsh.h', 'libmjollnir.h', 'hwloc.h', 'ares.h', 'revm.h', 'ares_rules.h',\n'libunwind-ptrace.h', 'libui.h', 'librevm-color.h', 'libedfmt.h','revm-objects.h',\n'libetrace.h', 'revm-io.h','libasm-mips.h','libstderesi.h','libasm.h','libaspect.h',\n'libunwind.h','libmjollnir-objects.h','libunwind-coredump.h','libunwind-dynamic.h']\n\ndef load(name):\n \"\"\"Load symbol by name from headers in standard system include directory\"\"\"\n try:\n return gdb.lookup_type(name)\n except gdb.error:\n pass\n\n # s, _ = gdb.lookup_symbol(name)\n\n # Try to find an architecture-specific include path\n arch = pwndbg.arch.current.split(':')[0]\n\n include_dir = glob.glob('/usr/%s*/include' % arch)\n\n if include_dir:\n include_dir = include_dir[0]\n else:\n include_dir = '/usr/include'\n\n source = '#include <fstream>\\n'\n\n for subdir in ['', 'sys', 'netinet']:\n dirname = os.path.join(include_dir, subdir)\n for path in glob.glob(os.path.join(dirname, '*.h')):\n if any(b in path for b in blacklist):\n continue\n print(path)\n source += '#include \"%s\"\\n' % path\n\n\n source += '''\n{name} foo;\n'''.format(**locals())\n\n filename = '%s/%s_%s.cc' % (tempdir, arch, '-'.join(name.split()))\n\n with open(filename, 'w+') as f:\n f.write(source)\n f.flush()\n os.fsync(f.fileno())\n\n compile(filename)\n\n return gdb.lookup_type(name)\n\ndef compile(filename=None, address=0):\n \"\"\"Compile and extract symbols from specified file\"\"\"\n if filename is None:\n print(\"Specify a filename to compile.\")\n return\n\n objectname = os.path.splitext(filename)[0] + \".o\"\n\n if not os.path.exists(objectname):\n gcc = pwndbg.gcc.which()\n gcc += ['-w', '-c', '-g', filename, '-o', objectname]\n try:\n subprocess.check_output(gcc)\n except subprocess.CalledProcessError as e:\n return\n\n add_symbol_file(objectname, address)\n\ndef add_symbol_file(filename=None, address=0):\n \"\"\"Read additional symbol table information from the object file filename\"\"\"\n if filename is None:\n print(\"Specify a symbol file to add.\")\n return\n\n with pwndbg.events.Pause():\n gdb.execute('add-symbol-file %s %s' % (filename, address), from_tty=False, to_string=True)\n\ndef read_gdbvalue(type_name, addr):\n \"\"\" Read the memory contents at addr and interpret them as a GDB value with the given type \"\"\"\n gdb_type = pwndbg.typeinfo.load(type_name)\n return gdb.Value(addr).cast(gdb_type.pointer()).dereference()\n", "path": "pwndbg/typeinfo.py"}]} | 3,365 | 80 |
gh_patches_debug_7338 | rasdani/github-patches | git_diff | mne-tools__mne-bids-1069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`key_press_event` is deprecated, needs to be replaced in our code
see: https://github.com/mne-tools/mne-bids/actions/runs/3145484445/jobs/5112832235#step:14:1665
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/conf.py`
Content:
```
1 """Configure details for documentation with sphinx."""
2 import os
3 import sys
4 from datetime import date
5
6 import sphinx_gallery # noqa: F401
7 from sphinx_gallery.sorting import ExampleTitleSortKey
8
9 import mne_bids
10
11
12 # If extensions (or modules to document with autodoc) are in another directory,
13 # add these directories to sys.path here. If the directory is relative to the
14 # documentation root, use os.path.abspath to make it absolute, like shown here.
15 curdir = os.path.dirname(__file__)
16 sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))
17 sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
18
19
20 # -- General configuration ------------------------------------------------
21
22 # If your documentation needs a minimal Sphinx version, state it here.
23 #
24 # needs_sphinx = '1.0'
25
26 # Add any Sphinx extension module names here, as strings. They can be
27 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
28 # ones.
29 extensions = [
30 'sphinx.ext.githubpages',
31 'sphinx.ext.autodoc',
32 'sphinx.ext.mathjax',
33 'sphinx.ext.viewcode',
34 'sphinx.ext.autosummary',
35 'sphinx.ext.doctest',
36 'sphinx.ext.intersphinx',
37 'sphinx_gallery.gen_gallery',
38 'numpydoc',
39 'sphinx_copybutton',
40 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py
41 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py
42 ]
43
44 # configure sphinx-copybutton
45 copybutton_prompt_text = r">>> |\.\.\. |\$ "
46 copybutton_prompt_is_regexp = True
47
48 # configure numpydoc
49 numpydoc_xref_param_type = True
50 numpydoc_class_members_toctree = False
51 numpydoc_attributes_as_param_list = True
52 numpydoc_xref_aliases = {
53 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',
54 'path-like': ':term:`path-like <mne:path-like>`',
55 'array-like': ':term:`array-like <mne:array-like>`',
56 'int': ':class:`int <python:int>`',
57 'bool': ':class:`bool <python:bool>`',
58 'float': ':class:`float <python:float>`',
59 'list': ':class:`list <python:list>`',
60 'tuple': ':class:`tuple <python:tuple>`',
61 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',
62 }
63 numpydoc_xref_ignore = {
64 # words
65 'instance', 'instances', 'of'
66 }
67
68
69 # generate autosummary even if no references
70 autosummary_generate = True
71 autodoc_default_options = {'inherited-members': None}
72 default_role = 'autolink' # XXX silently allows bad syntax, someone should fix
73
74 # configure linkcheck
75 # https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder
76 linkcheck_retries = 2
77 linkcheck_rate_limit_timeout = 15.0
78 linkcheck_ignore = [
79 r'https://www.researchgate.net/profile/.*',
80 ]
81
82 # The suffix(es) of source filenames.
83 # You can specify multiple suffix as a list of string:
84 #
85 # source_suffix = ['.rst', '.md']
86 source_suffix = '.rst'
87
88 # The master toctree document.
89 master_doc = 'index'
90
91 # General information about the project.
92 project = u'MNE-BIDS'
93 td = date.today()
94 copyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,
95 td.isoformat())
96
97 author = u'MNE Developers'
98
99 # The version info for the project you're documenting, acts as replacement for
100 # |version| and |release|, also used in various other places throughout the
101 # built documents.
102 #
103 # The short X.Y version.
104 version = mne_bids.__version__
105 # The full version, including alpha/beta/rc tags.
106 release = version
107
108 # List of patterns, relative to source directory, that match files and
109 # directories to ignore when looking for source files.
110 # This patterns also effect to html_static_path and html_extra_path
111 exclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',
112 '.DS_Store']
113
114 # HTML options (e.g., theme)
115 html_show_sourcelink = False
116 html_copy_source = False
117
118 html_theme = 'pydata_sphinx_theme'
119
120 # Add any paths that contain templates here, relative to this directory.
121 templates_path = ['_templates']
122 html_static_path = ['_static']
123 html_css_files = ['style.css']
124
125 # Theme options are theme-specific and customize the look and feel of a theme
126 # further. For a list of options available for each theme, see the
127 # documentation.
128 html_theme_options = {
129 'icon_links': [
130 dict(name='GitHub',
131 url='https://github.com/mne-tools/mne-bids',
132 icon='fab fa-github-square'),
133 dict(name='Discourse',
134 url='https://mne.discourse.group/tags/mne-bids',
135 icon='fab fa-discourse'),
136 ],
137 'icon_links_label': 'Quick Links', # for screen reader
138 'use_edit_page_button': False,
139 'navigation_with_keys': False,
140 'show_toc_level': 1,
141 'navbar_end': ['version-switcher', 'navbar-icon-links'],
142 }
143
144 html_context = {
145 'versions_dropdown': {
146 'dev': 'v0.11 (devel)',
147 'stable': 'v0.10 (stable)',
148 'v0.9': 'v0.9',
149 'v0.8': 'v0.8',
150 'v0.7': 'v0.7',
151 'v0.6': 'v0.6',
152 'v0.5': 'v0.5',
153 'v0.4': 'v0.4',
154 'v0.3': 'v0.3',
155 'v0.2': 'v0.2',
156 'v0.1': 'v0.1',
157 },
158 }
159
160 html_sidebars = {}
161
162 # Example configuration for intersphinx: refer to the Python standard library.
163 intersphinx_mapping = {
164 'python': ('https://docs.python.org/3', None),
165 'mne': ('https://mne.tools/dev', None),
166 'numpy': ('https://numpy.org/devdocs', None),
167 'scipy': ('https://scipy.github.io/devdocs', None),
168 'matplotlib': ('https://matplotlib.org', None),
169 'nilearn': ('http://nilearn.github.io/stable', None),
170 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),
171 'nibabel': ('https://nipy.org/nibabel', None),
172 }
173 intersphinx_timeout = 5
174
175 # Resolve binder filepath_prefix. From the docs:
176 # "A prefix to append to the filepath in the Binder links. You should use this
177 # if you will store your built documentation in a sub-folder of a repository,
178 # instead of in the root."
179 # we will store dev docs in a `dev` subdirectory and all other docs in a
180 # directory "v" + version_str. E.g., "v0.3"
181 if 'dev' in version:
182 filepath_prefix = 'dev'
183 else:
184 filepath_prefix = 'v{}'.format(version)
185
186 sphinx_gallery_conf = {
187 'doc_module': 'mne_bids',
188 'reference_url': {
189 'mne_bids': None,
190 },
191 'backreferences_dir': 'generated',
192 'examples_dirs': '../examples',
193 'within_subsection_order': ExampleTitleSortKey,
194 'gallery_dirs': 'auto_examples',
195 'filename_pattern': '^((?!sgskip).)*$',
196 'binder': {
197 # Required keys
198 'org': 'mne-tools',
199 'repo': 'mne-bids',
200 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
201 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).
202 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.
203 'dependencies': [
204 '../test_requirements.txt',
205 './requirements.txt',
206 ],
207 }
208 }
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -52,7 +52,7 @@
numpydoc_xref_aliases = {
'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',
'path-like': ':term:`path-like <mne:path-like>`',
- 'array-like': ':term:`array-like <mne:array-like>`',
+ 'array-like': ':term:`array_like <numpy:array_like>`',
'int': ':class:`int <python:int>`',
'bool': ':class:`bool <python:bool>`',
'float': ':class:`float <python:float>`',
| {"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -52,7 +52,7 @@\n numpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n- 'array-like': ':term:`array-like <mne:array-like>`',\n+ 'array-like': ':term:`array_like <numpy:array_like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n", "issue": "`key_press_event` is deprecated, needs to be replaced in our code\nsee: https://github.com/mne-tools/mne-bids/actions/runs/3145484445/jobs/5112832235#step:14:1665\r\n\r\n\n", "before_files": [{"content": "\"\"\"Configure details for documentation with sphinx.\"\"\"\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_gallery # noqa: F401\nfrom sphinx_gallery.sorting import ExampleTitleSortKey\n\nimport mne_bids\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurdir = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))\nsys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx_gallery.gen_gallery',\n 'numpydoc',\n 'sphinx_copybutton',\n 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py\n 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py\n]\n\n# configure sphinx-copybutton\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\n\n# configure numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n 'array-like': ':term:`array-like <mne:array-like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n 'list': ':class:`list <python:list>`',\n 'tuple': ':class:`tuple <python:tuple>`',\n 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',\n}\nnumpydoc_xref_ignore = {\n # words\n 'instance', 'instances', 'of'\n}\n\n\n# generate autosummary even if no references\nautosummary_generate = True\nautodoc_default_options = {'inherited-members': None}\ndefault_role = 'autolink' # XXX silently allows bad syntax, someone should fix\n\n# configure linkcheck\n# https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder\nlinkcheck_retries = 2\nlinkcheck_rate_limit_timeout = 15.0\nlinkcheck_ignore = [\n r'https://www.researchgate.net/profile/.*',\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MNE-BIDS'\ntd = date.today()\ncopyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',\n '.DS_Store']\n\n# HTML options (e.g., theme)\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\nhtml_theme = 'pydata_sphinx_theme'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nhtml_css_files = ['style.css']\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'icon_links': [\n dict(name='GitHub',\n url='https://github.com/mne-tools/mne-bids',\n icon='fab fa-github-square'),\n dict(name='Discourse',\n url='https://mne.discourse.group/tags/mne-bids',\n icon='fab fa-discourse'),\n ],\n 'icon_links_label': 'Quick Links', # for screen reader\n 'use_edit_page_button': False,\n 'navigation_with_keys': False,\n 'show_toc_level': 1,\n 'navbar_end': ['version-switcher', 'navbar-icon-links'],\n}\n\nhtml_context = {\n 'versions_dropdown': {\n 'dev': 'v0.11 (devel)',\n 'stable': 'v0.10 (stable)',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\n 'v0.6': 'v0.6',\n 'v0.5': 'v0.5',\n 'v0.4': 'v0.4',\n 'v0.3': 'v0.3',\n 'v0.2': 'v0.2',\n 'v0.1': 'v0.1',\n },\n}\n\nhtml_sidebars = {}\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n 'nilearn': ('http://nilearn.github.io/stable', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),\n 'nibabel': ('https://nipy.org/nibabel', None),\n}\nintersphinx_timeout = 5\n\n# Resolve binder filepath_prefix. From the docs:\n# \"A prefix to append to the filepath in the Binder links. You should use this\n# if you will store your built documentation in a sub-folder of a repository,\n# instead of in the root.\"\n# we will store dev docs in a `dev` subdirectory and all other docs in a\n# directory \"v\" + version_str. E.g., \"v0.3\"\nif 'dev' in version:\n filepath_prefix = 'dev'\nelse:\n filepath_prefix = 'v{}'.format(version)\n\nsphinx_gallery_conf = {\n 'doc_module': 'mne_bids',\n 'reference_url': {\n 'mne_bids': None,\n },\n 'backreferences_dir': 'generated',\n 'examples_dirs': '../examples',\n 'within_subsection_order': ExampleTitleSortKey,\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.\n 'dependencies': [\n '../test_requirements.txt',\n './requirements.txt',\n ],\n }\n}\n", "path": "doc/conf.py"}], "after_files": [{"content": "\"\"\"Configure details for documentation with sphinx.\"\"\"\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_gallery # noqa: F401\nfrom sphinx_gallery.sorting import ExampleTitleSortKey\n\nimport mne_bids\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurdir = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))\nsys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx_gallery.gen_gallery',\n 'numpydoc',\n 'sphinx_copybutton',\n 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py\n 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py\n]\n\n# configure sphinx-copybutton\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\n\n# configure numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n 'array-like': ':term:`array_like <numpy:array_like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n 'list': ':class:`list <python:list>`',\n 'tuple': ':class:`tuple <python:tuple>`',\n 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',\n}\nnumpydoc_xref_ignore = {\n # words\n 'instance', 'instances', 'of'\n}\n\n\n# generate autosummary even if no references\nautosummary_generate = True\nautodoc_default_options = {'inherited-members': None}\ndefault_role = 'autolink' # XXX silently allows bad syntax, someone should fix\n\n# configure linkcheck\n# https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder\nlinkcheck_retries = 2\nlinkcheck_rate_limit_timeout = 15.0\nlinkcheck_ignore = [\n r'https://www.researchgate.net/profile/.*',\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MNE-BIDS'\ntd = date.today()\ncopyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',\n '.DS_Store']\n\n# HTML options (e.g., theme)\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\nhtml_theme = 'pydata_sphinx_theme'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nhtml_css_files = ['style.css']\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'icon_links': [\n dict(name='GitHub',\n url='https://github.com/mne-tools/mne-bids',\n icon='fab fa-github-square'),\n dict(name='Discourse',\n url='https://mne.discourse.group/tags/mne-bids',\n icon='fab fa-discourse'),\n ],\n 'icon_links_label': 'Quick Links', # for screen reader\n 'use_edit_page_button': False,\n 'navigation_with_keys': False,\n 'show_toc_level': 1,\n 'navbar_end': ['version-switcher', 'navbar-icon-links'],\n}\n\nhtml_context = {\n 'versions_dropdown': {\n 'dev': 'v0.11 (devel)',\n 'stable': 'v0.10 (stable)',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\n 'v0.6': 'v0.6',\n 'v0.5': 'v0.5',\n 'v0.4': 'v0.4',\n 'v0.3': 'v0.3',\n 'v0.2': 'v0.2',\n 'v0.1': 'v0.1',\n },\n}\n\nhtml_sidebars = {}\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n 'nilearn': ('http://nilearn.github.io/stable', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),\n 'nibabel': ('https://nipy.org/nibabel', None),\n}\nintersphinx_timeout = 5\n\n# Resolve binder filepath_prefix. From the docs:\n# \"A prefix to append to the filepath in the Binder links. You should use this\n# if you will store your built documentation in a sub-folder of a repository,\n# instead of in the root.\"\n# we will store dev docs in a `dev` subdirectory and all other docs in a\n# directory \"v\" + version_str. E.g., \"v0.3\"\nif 'dev' in version:\n filepath_prefix = 'dev'\nelse:\n filepath_prefix = 'v{}'.format(version)\n\nsphinx_gallery_conf = {\n 'doc_module': 'mne_bids',\n 'reference_url': {\n 'mne_bids': None,\n },\n 'backreferences_dir': 'generated',\n 'examples_dirs': '../examples',\n 'within_subsection_order': ExampleTitleSortKey,\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.\n 'dependencies': [\n '../test_requirements.txt',\n './requirements.txt',\n ],\n }\n}\n", "path": "doc/conf.py"}]} | 2,730 | 160 |
gh_patches_debug_41848 | rasdani/github-patches | git_diff | conan-io__conan-center-index-7552 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[android-ndk] Fix broken symlinks
Specify library name and version: **android-ndk/r23**
The NDK r23 zip file contains the symlinks now, but tools.get() can't extract the links properly.
https://github.com/conan-io/conan/issues/9469
---
- [x] I've read the [guidelines](https://github.com/conan-io/conan-center-index/blob/master/docs/how_to_add_packages.md) for contributing.
- [x] I've followed the [PEP8](https://www.python.org/dev/peps/pep-0008/) style guides for Python code in the recipes.
- [x] I've used the [latest](https://github.com/conan-io/conan/releases/latest) Conan client version.
- [x] I've tried at least one configuration locally with the
[conan-center hook](https://github.com/conan-io/hooks.git) activated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/android-ndk/all/conanfile.py`
Content:
```
1 from conans import ConanFile, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4
5 required_conan_version = ">=1.33.0"
6
7
8 class AndroidNDKConan(ConanFile):
9 name = "android-ndk"
10 description = "The Android NDK is a toolset that lets you implement parts of your app in " \
11 "native code, using languages such as C and C++"
12 url = "https://github.com/conan-io/conan-center-index"
13 homepage = "https://developer.android.com/ndk/"
14 topics = ("android", "NDK", "toolchain", "compiler")
15 license = "Apache-2.0"
16
17 settings = "os", "arch"
18
19 short_paths = True
20 exports_sources = "cmake-wrapper.cmd", "cmake-wrapper"
21
22 @property
23 def _source_subfolder(self):
24 return "source_subfolder"
25
26 def _settings_os_supported(self):
27 return self.conan_data["sources"][self.version]["url"].get(str(self.settings.os)) is not None
28
29 def _settings_arch_supported(self):
30 return self.conan_data["sources"][self.version]["url"].get(str(self.settings.os), {}).get(str(self.settings.arch)) is not None
31
32 def validate(self):
33 if not self._settings_os_supported():
34 raise ConanInvalidConfiguration(f"os={self.settings.os} is not supported by {self.name} (no binaries are available)")
35 if not self._settings_arch_supported():
36 raise ConanInvalidConfiguration(f"os,arch={self.settings.os},{self.settings.arch} is not supported by {self.name} (no binaries are available)")
37
38 def build(self):
39 tools.get(**self.conan_data["sources"][self.version]["url"][str(self.settings.os)][str(self.settings.arch)],
40 destination=self._source_subfolder, strip_root=True)
41
42 def package(self):
43 self.copy("*", src=self._source_subfolder, dst=".", keep_path=True, symlinks=True)
44 self.copy("*NOTICE", src=self._source_subfolder, dst="licenses")
45 self.copy("*NOTICE.toolchain", src=self._source_subfolder, dst="licenses")
46 self.copy("cmake-wrapper.cmd")
47 self.copy("cmake-wrapper")
48 self._fix_permissions()
49
50 # from here on, everything is assumed to run in 2 profile mode, using this android-ndk recipe as a build requirement
51
52 @property
53 def _platform(self):
54 return {
55 "Linux": "linux",
56 "Macos": "darwin",
57 "Windows": "windows",
58 }.get(str(self.settings_build.os))
59
60 @property
61 def _android_abi(self):
62 return {
63 "armv7": "armeabi-v7a",
64 "armv8": "arm64-v8a",
65 "x86": "x86",
66 "x86_64": "x86_64",
67 }.get(str(self.settings_target.arch))
68
69 @property
70 def _llvm_triplet(self):
71 arch = {
72 "armv7": "arm",
73 "armv8": "aarch64",
74 "x86": "i686",
75 "x86_64": "x86_64",
76 }.get(str(self.settings_target.arch))
77 abi = "androideabi" if self.settings_target.arch == "armv7" else "android"
78 return f"{arch}-linux-{abi}"
79
80 @property
81 def _clang_triplet(self):
82 arch = {
83 "armv7": "armv7a",
84 "armv8": "aarch64",
85 "x86": "i686",
86 "x86_64": "x86_64",
87 }.get(str(self.settings_target.arch))
88 abi = "androideabi" if self.settings_target.arch == "armv7" else "android"
89 return f"{arch}-linux-{abi}"
90
91 def _fix_permissions(self):
92 if os.name != "posix":
93 return
94 for root, _, files in os.walk(self.package_folder):
95 for filename in files:
96 filename = os.path.join(root, filename)
97 with open(filename, "rb") as f:
98 sig = f.read(4)
99 if type(sig) is str:
100 sig = [ord(s) for s in sig]
101 else:
102 sig = [s for s in sig]
103 if len(sig) > 2 and sig[0] == 0x23 and sig[1] == 0x21:
104 self.output.info(f"chmod on script file: '{filename}'")
105 self._chmod_plus_x(filename)
106 elif sig == [0x7F, 0x45, 0x4C, 0x46]:
107 self.output.info(f"chmod on ELF file: '{filename}'")
108 self._chmod_plus_x(filename)
109 elif sig == [0xCA, 0xFE, 0xBA, 0xBE] or \
110 sig == [0xBE, 0xBA, 0xFE, 0xCA] or \
111 sig == [0xFE, 0xED, 0xFA, 0xCF] or \
112 sig == [0xCF, 0xFA, 0xED, 0xFE] or \
113 sig == [0xFE, 0xEF, 0xFA, 0xCE] or \
114 sig == [0xCE, 0xFA, 0xED, 0xFE]:
115 self.output.info(f"chmod on Mach-O file: '{filename}'")
116 self._chmod_plus_x(filename)
117
118 @property
119 def _host(self):
120 return f"{self._platform}_{self.settings.arch}"
121
122 @property
123 def _ndk_root(self):
124 return os.path.join(self.package_folder, "toolchains", "llvm", "prebuilt", self._host)
125
126 def _tool_name(self, tool):
127 if "clang" in tool:
128 suffix = ".cmd" if self.settings_build.os == "Windows" else ""
129 return f"{self._clang_triplet}{self.settings_target.os.api_level}-{tool}{suffix}"
130 else:
131 suffix = ".exe" if self.settings_build.os == "Windows" else ""
132 return f"{self._llvm_triplet}-{tool}{suffix}"
133
134 @property
135 def _cmake_system_processor(self):
136 cmake_system_processor = {
137 "x86_64": "x86_64",
138 "x86": "i686",
139 "mips": "mips",
140 "mips64": "mips64",
141 }.get(str(self.settings.arch))
142 if self.settings_target.arch == "armv8":
143 cmake_system_processor = "aarch64"
144 elif "armv7" in str(self.settings.arch):
145 cmake_system_processor = "armv7-a"
146 elif "armv6" in str(self.settings.arch):
147 cmake_system_processor = "armv6"
148 elif "armv5" in str(self.settings.arch):
149 cmake_system_processor = "armv5te"
150 return cmake_system_processor
151
152 def _define_tool_var(self, name, value):
153 ndk_bin = os.path.join(self._ndk_root, "bin")
154 path = os.path.join(ndk_bin, self._tool_name(value))
155 self.output.info(f"Creating {name} environment variable: {path}")
156 return path
157
158 @staticmethod
159 def _chmod_plus_x(filename):
160 if os.name == "posix":
161 os.chmod(filename, os.stat(filename).st_mode | 0o111)
162
163 def package_info(self):
164 # test shall pass, so this runs also in the build as build requirement context
165 # ndk-build: https://developer.android.com/ndk/guides/ndk-build
166 self.env_info.PATH.append(self.package_folder)
167
168 # You should use the ANDROID_NDK_ROOT environment variable to indicate where the NDK is located.
169 # That's what most NDK-related scripts use (inside the NDK, and outside of it).
170 # https://groups.google.com/g/android-ndk/c/qZjhOaynHXc
171 self.output.info(f"Creating ANDROID_NDK_ROOT environment variable: {self.package_folder}")
172 self.env_info.ANDROID_NDK_ROOT = self.package_folder
173
174 self.output.info(f"Creating ANDROID_NDK_HOME environment variable: {self.package_folder}")
175 self.env_info.ANDROID_NDK_HOME = self.package_folder
176
177 # this is not enough, I can kill that .....
178 if not hasattr(self, "settings_target"):
179 return
180
181 # interestingly I can reach that with
182 # conan test --profile:build nsdk-default --profile:host default /Users/a4z/elux/conan/myrecipes/android-ndk/all/test_package android-ndk/r21d@
183 if self.settings_target is None:
184 return
185
186 # And if we are not building for Android, why bother at all
187 if not self.settings_target.os == "Android":
188 self.output.warn(f"You've added {self.name}/{self.version} as a build requirement, while os={self.settings_targe.os} != Android")
189 return
190
191 cmake_system_processor = self._cmake_system_processor
192 if cmake_system_processor:
193 self.output.info(f"Creating CONAN_CMAKE_SYSTEM_PROCESSOR environment variable: {cmake_system_processor}")
194 self.env_info.CONAN_CMAKE_SYSTEM_PROCESSOR = cmake_system_processor
195 else:
196 self.output.warn("Could not find a valid CMAKE_SYSTEM_PROCESSOR variable, supported by CMake")
197
198 self.output.info(f"Creating NDK_ROOT environment variable: {self._ndk_root}")
199 self.env_info.NDK_ROOT = self._ndk_root
200
201 self.output.info(f"Creating CHOST environment variable: {self._llvm_triplet}")
202 self.env_info.CHOST = self._llvm_triplet
203
204 ndk_sysroot = os.path.join(self._ndk_root, "sysroot")
205 self.output.info(f"Creating CONAN_CMAKE_FIND_ROOT_PATH environment variable: {ndk_sysroot}")
206 self.env_info.CONAN_CMAKE_FIND_ROOT_PATH = ndk_sysroot
207
208 self.output.info(f"Creating SYSROOT environment variable: {ndk_sysroot}")
209 self.env_info.SYSROOT = ndk_sysroot
210
211 self.output.info(f"Creating self.cpp_info.sysroot: {ndk_sysroot}")
212 self.cpp_info.sysroot = ndk_sysroot
213
214 self.output.info(f"Creating ANDROID_NATIVE_API_LEVEL environment variable: {self.settings_target.os.api_level}")
215 self.env_info.ANDROID_NATIVE_API_LEVEL = str(self.settings_target.os.api_level)
216
217 self._chmod_plus_x(os.path.join(self.package_folder, "cmake-wrapper"))
218 cmake_wrapper = "cmake-wrapper.cmd" if self.settings.os == "Windows" else "cmake-wrapper"
219 cmake_wrapper = os.path.join(self.package_folder, cmake_wrapper)
220 self.output.info(f"Creating CONAN_CMAKE_PROGRAM environment variable: {cmake_wrapper}")
221 self.env_info.CONAN_CMAKE_PROGRAM = cmake_wrapper
222
223 toolchain = os.path.join(self.package_folder, "build", "cmake", "android.toolchain.cmake")
224 self.output.info(f"Creating CONAN_CMAKE_TOOLCHAIN_FILE environment variable: {toolchain}")
225 self.env_info.CONAN_CMAKE_TOOLCHAIN_FILE = toolchain
226
227 self.env_info.CC = self._define_tool_var("CC", "clang")
228 self.env_info.CXX = self._define_tool_var("CXX", "clang++")
229 self.env_info.LD = self._define_tool_var("LD", "ld")
230 self.env_info.AR = self._define_tool_var("AR", "ar")
231 self.env_info.AS = self._define_tool_var("AS", "as")
232 self.env_info.RANLIB = self._define_tool_var("RANLIB", "ranlib")
233 self.env_info.STRIP = self._define_tool_var("STRIP", "strip")
234 self.env_info.ADDR2LINE = self._define_tool_var("ADDR2LINE", "addr2line")
235 self.env_info.NM = self._define_tool_var("NM", "nm")
236 self.env_info.OBJCOPY = self._define_tool_var("OBJCOPY", "objcopy")
237 self.env_info.OBJDUMP = self._define_tool_var("OBJDUMP", "objdump")
238 self.env_info.READELF = self._define_tool_var("READELF", "readelf")
239 self.env_info.ELFEDIT = self._define_tool_var("ELFEDIT", "elfedit")
240
241 self.env_info.ANDROID_PLATFORM = f"android-{self.settings_target.os.api_level}"
242 self.env_info.ANDROID_TOOLCHAIN = "clang"
243 self.env_info.ANDROID_ABI = self._android_abi
244 libcxx_str = str(self.settings_target.compiler.libcxx)
245 self.env_info.ANDROID_STL = libcxx_str if libcxx_str.startswith("c++_") else "c++_shared"
246
247 self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "BOTH"
248 self.env_info.CMAKE_FIND_ROOT_PATH_MODE_LIBRARY = "BOTH"
249 self.env_info.CMAKE_FIND_ROOT_PATH_MODE_INCLUDE = "BOTH"
250 self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PACKAGE = "BOTH"
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/android-ndk/all/conanfile.py b/recipes/android-ndk/all/conanfile.py
--- a/recipes/android-ndk/all/conanfile.py
+++ b/recipes/android-ndk/all/conanfile.py
@@ -1,14 +1,14 @@
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import os
+import shutil
required_conan_version = ">=1.33.0"
class AndroidNDKConan(ConanFile):
name = "android-ndk"
- description = "The Android NDK is a toolset that lets you implement parts of your app in " \
- "native code, using languages such as C and C++"
+ description = "The Android NDK is a toolset that lets you implement parts of your app in native code, using languages such as C and C++"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://developer.android.com/ndk/"
topics = ("android", "NDK", "toolchain", "compiler")
@@ -36,8 +36,12 @@
raise ConanInvalidConfiguration(f"os,arch={self.settings.os},{self.settings.arch} is not supported by {self.name} (no binaries are available)")
def build(self):
- tools.get(**self.conan_data["sources"][self.version]["url"][str(self.settings.os)][str(self.settings.arch)],
- destination=self._source_subfolder, strip_root=True)
+ if self.version in ['r23']:
+ data = self.conan_data["sources"][self.version]["url"][str(self.settings.os)][str(self.settings.arch)]
+ unzip_fix_symlinks(url=data["url"], target_folder=self._source_subfolder, sha256=data["sha256"])
+ else:
+ tools.get(**self.conan_data["sources"][self.version]["url"][str(self.settings.os)][str(self.settings.arch)],
+ destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("*", src=self._source_subfolder, dst=".", keep_path=True, symlinks=True)
@@ -117,7 +121,7 @@
@property
def _host(self):
- return f"{self._platform}_{self.settings.arch}"
+ return f"{self._platform}-{self.settings.arch}"
@property
def _ndk_root(self):
@@ -248,3 +252,40 @@
self.env_info.CMAKE_FIND_ROOT_PATH_MODE_LIBRARY = "BOTH"
self.env_info.CMAKE_FIND_ROOT_PATH_MODE_INCLUDE = "BOTH"
self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PACKAGE = "BOTH"
+
+
+def unzip_fix_symlinks(url, target_folder, sha256):
+ # Python's built-in module 'zipfile' won't handle symlinks (https://bugs.python.org/issue37921)
+ # Most of the logic borrowed from this PR https://github.com/conan-io/conan/pull/8100
+
+ filename = "android_sdk.zip"
+ tools.download(url, filename)
+ tools.check_sha256(filename, sha256)
+ tools.unzip(filename, destination=target_folder, strip_root=True)
+
+ def is_symlink_zipinfo(zi):
+ return (zi.external_attr >> 28) == 0xA
+
+ full_path = os.path.normpath(target_folder)
+ import zipfile
+ with zipfile.ZipFile(filename, "r") as z:
+ zip_info = z.infolist()
+
+ names = [n.replace("\\", "/") for n in z.namelist()]
+ common_folder = os.path.commonprefix(names).split("/", 1)[0]
+
+ for file_ in zip_info:
+ if is_symlink_zipinfo(file_):
+ rel_path = os.path.relpath(file_.filename, common_folder)
+ full_name = os.path.join(full_path, rel_path)
+ target = tools.load(full_name)
+ os.unlink(full_name)
+
+ try:
+ os.symlink(target, full_name)
+ except OSError:
+ if not os.path.isabs(target):
+ target = os.path.normpath(os.path.join(os.path.dirname(full_name), target))
+ shutil.copy2(target, full_name)
+
+ os.unlink(filename)
| {"golden_diff": "diff --git a/recipes/android-ndk/all/conanfile.py b/recipes/android-ndk/all/conanfile.py\n--- a/recipes/android-ndk/all/conanfile.py\n+++ b/recipes/android-ndk/all/conanfile.py\n@@ -1,14 +1,14 @@\n from conans import ConanFile, tools\n from conans.errors import ConanInvalidConfiguration\n import os\n+import shutil\n \n required_conan_version = \">=1.33.0\"\n \n \n class AndroidNDKConan(ConanFile):\n name = \"android-ndk\"\n- description = \"The Android NDK is a toolset that lets you implement parts of your app in \" \\\n- \"native code, using languages such as C and C++\"\n+ description = \"The Android NDK is a toolset that lets you implement parts of your app in native code, using languages such as C and C++\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://developer.android.com/ndk/\"\n topics = (\"android\", \"NDK\", \"toolchain\", \"compiler\")\n@@ -36,8 +36,12 @@\n raise ConanInvalidConfiguration(f\"os,arch={self.settings.os},{self.settings.arch} is not supported by {self.name} (no binaries are available)\")\n \n def build(self):\n- tools.get(**self.conan_data[\"sources\"][self.version][\"url\"][str(self.settings.os)][str(self.settings.arch)],\n- destination=self._source_subfolder, strip_root=True)\n+ if self.version in ['r23']:\n+ data = self.conan_data[\"sources\"][self.version][\"url\"][str(self.settings.os)][str(self.settings.arch)]\n+ unzip_fix_symlinks(url=data[\"url\"], target_folder=self._source_subfolder, sha256=data[\"sha256\"])\n+ else:\n+ tools.get(**self.conan_data[\"sources\"][self.version][\"url\"][str(self.settings.os)][str(self.settings.arch)],\n+ destination=self._source_subfolder, strip_root=True)\n \n def package(self):\n self.copy(\"*\", src=self._source_subfolder, dst=\".\", keep_path=True, symlinks=True)\n@@ -117,7 +121,7 @@\n \n @property\n def _host(self):\n- return f\"{self._platform}_{self.settings.arch}\"\n+ return f\"{self._platform}-{self.settings.arch}\"\n \n @property\n def _ndk_root(self):\n@@ -248,3 +252,40 @@\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_LIBRARY = \"BOTH\"\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_INCLUDE = \"BOTH\"\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PACKAGE = \"BOTH\"\n+\n+\n+def unzip_fix_symlinks(url, target_folder, sha256):\n+ # Python's built-in module 'zipfile' won't handle symlinks (https://bugs.python.org/issue37921)\n+ # Most of the logic borrowed from this PR https://github.com/conan-io/conan/pull/8100\n+\n+ filename = \"android_sdk.zip\"\n+ tools.download(url, filename)\n+ tools.check_sha256(filename, sha256)\n+ tools.unzip(filename, destination=target_folder, strip_root=True)\n+\n+ def is_symlink_zipinfo(zi):\n+ return (zi.external_attr >> 28) == 0xA\n+\n+ full_path = os.path.normpath(target_folder)\n+ import zipfile\n+ with zipfile.ZipFile(filename, \"r\") as z:\n+ zip_info = z.infolist()\n+ \n+ names = [n.replace(\"\\\\\", \"/\") for n in z.namelist()]\n+ common_folder = os.path.commonprefix(names).split(\"/\", 1)[0]\n+\n+ for file_ in zip_info:\n+ if is_symlink_zipinfo(file_):\n+ rel_path = os.path.relpath(file_.filename, common_folder)\n+ full_name = os.path.join(full_path, rel_path)\n+ target = tools.load(full_name)\n+ os.unlink(full_name)\n+\n+ try:\n+ os.symlink(target, full_name)\n+ except OSError:\n+ if not os.path.isabs(target):\n+ target = os.path.normpath(os.path.join(os.path.dirname(full_name), target))\n+ shutil.copy2(target, full_name)\n+\n+ os.unlink(filename)\n", "issue": "[android-ndk] Fix broken symlinks\nSpecify library name and version: **android-ndk/r23**\r\n\r\nThe NDK r23 zip file contains the symlinks now, but tools.get() can't extract the links properly.\r\nhttps://github.com/conan-io/conan/issues/9469\r\n \r\n\r\n---\r\n\r\n- [x] I've read the [guidelines](https://github.com/conan-io/conan-center-index/blob/master/docs/how_to_add_packages.md) for contributing.\r\n- [x] I've followed the [PEP8](https://www.python.org/dev/peps/pep-0008/) style guides for Python code in the recipes.\r\n- [x] I've used the [latest](https://github.com/conan-io/conan/releases/latest) Conan client version.\r\n- [x] I've tried at least one configuration locally with the\r\n [conan-center hook](https://github.com/conan-io/hooks.git) activated.\r\n\n", "before_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass AndroidNDKConan(ConanFile):\n name = \"android-ndk\"\n description = \"The Android NDK is a toolset that lets you implement parts of your app in \" \\\n \"native code, using languages such as C and C++\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://developer.android.com/ndk/\"\n topics = (\"android\", \"NDK\", \"toolchain\", \"compiler\")\n license = \"Apache-2.0\"\n\n settings = \"os\", \"arch\"\n\n short_paths = True\n exports_sources = \"cmake-wrapper.cmd\", \"cmake-wrapper\"\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def _settings_os_supported(self):\n return self.conan_data[\"sources\"][self.version][\"url\"].get(str(self.settings.os)) is not None\n\n def _settings_arch_supported(self):\n return self.conan_data[\"sources\"][self.version][\"url\"].get(str(self.settings.os), {}).get(str(self.settings.arch)) is not None\n\n def validate(self):\n if not self._settings_os_supported():\n raise ConanInvalidConfiguration(f\"os={self.settings.os} is not supported by {self.name} (no binaries are available)\")\n if not self._settings_arch_supported():\n raise ConanInvalidConfiguration(f\"os,arch={self.settings.os},{self.settings.arch} is not supported by {self.name} (no binaries are available)\")\n\n def build(self):\n tools.get(**self.conan_data[\"sources\"][self.version][\"url\"][str(self.settings.os)][str(self.settings.arch)],\n destination=self._source_subfolder, strip_root=True)\n\n def package(self):\n self.copy(\"*\", src=self._source_subfolder, dst=\".\", keep_path=True, symlinks=True)\n self.copy(\"*NOTICE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(\"*NOTICE.toolchain\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(\"cmake-wrapper.cmd\")\n self.copy(\"cmake-wrapper\")\n self._fix_permissions()\n\n # from here on, everything is assumed to run in 2 profile mode, using this android-ndk recipe as a build requirement\n\n @property\n def _platform(self):\n return {\n \"Linux\": \"linux\",\n \"Macos\": \"darwin\",\n \"Windows\": \"windows\",\n }.get(str(self.settings_build.os))\n\n @property\n def _android_abi(self):\n return {\n \"armv7\": \"armeabi-v7a\",\n \"armv8\": \"arm64-v8a\",\n \"x86\": \"x86\",\n \"x86_64\": \"x86_64\",\n }.get(str(self.settings_target.arch))\n\n @property\n def _llvm_triplet(self):\n arch = {\n \"armv7\": \"arm\",\n \"armv8\": \"aarch64\",\n \"x86\": \"i686\",\n \"x86_64\": \"x86_64\",\n }.get(str(self.settings_target.arch))\n abi = \"androideabi\" if self.settings_target.arch == \"armv7\" else \"android\"\n return f\"{arch}-linux-{abi}\"\n\n @property\n def _clang_triplet(self):\n arch = {\n \"armv7\": \"armv7a\",\n \"armv8\": \"aarch64\",\n \"x86\": \"i686\",\n \"x86_64\": \"x86_64\",\n }.get(str(self.settings_target.arch))\n abi = \"androideabi\" if self.settings_target.arch == \"armv7\" else \"android\"\n return f\"{arch}-linux-{abi}\"\n\n def _fix_permissions(self):\n if os.name != \"posix\":\n return\n for root, _, files in os.walk(self.package_folder):\n for filename in files:\n filename = os.path.join(root, filename)\n with open(filename, \"rb\") as f:\n sig = f.read(4)\n if type(sig) is str:\n sig = [ord(s) for s in sig]\n else:\n sig = [s for s in sig]\n if len(sig) > 2 and sig[0] == 0x23 and sig[1] == 0x21:\n self.output.info(f\"chmod on script file: '{filename}'\")\n self._chmod_plus_x(filename)\n elif sig == [0x7F, 0x45, 0x4C, 0x46]:\n self.output.info(f\"chmod on ELF file: '{filename}'\")\n self._chmod_plus_x(filename)\n elif sig == [0xCA, 0xFE, 0xBA, 0xBE] or \\\n sig == [0xBE, 0xBA, 0xFE, 0xCA] or \\\n sig == [0xFE, 0xED, 0xFA, 0xCF] or \\\n sig == [0xCF, 0xFA, 0xED, 0xFE] or \\\n sig == [0xFE, 0xEF, 0xFA, 0xCE] or \\\n sig == [0xCE, 0xFA, 0xED, 0xFE]:\n self.output.info(f\"chmod on Mach-O file: '{filename}'\")\n self._chmod_plus_x(filename)\n\n @property\n def _host(self):\n return f\"{self._platform}_{self.settings.arch}\"\n\n @property\n def _ndk_root(self):\n return os.path.join(self.package_folder, \"toolchains\", \"llvm\", \"prebuilt\", self._host)\n\n def _tool_name(self, tool):\n if \"clang\" in tool:\n suffix = \".cmd\" if self.settings_build.os == \"Windows\" else \"\"\n return f\"{self._clang_triplet}{self.settings_target.os.api_level}-{tool}{suffix}\"\n else:\n suffix = \".exe\" if self.settings_build.os == \"Windows\" else \"\"\n return f\"{self._llvm_triplet}-{tool}{suffix}\"\n\n @property\n def _cmake_system_processor(self):\n cmake_system_processor = {\n \"x86_64\": \"x86_64\",\n \"x86\": \"i686\",\n \"mips\": \"mips\",\n \"mips64\": \"mips64\",\n }.get(str(self.settings.arch))\n if self.settings_target.arch == \"armv8\":\n cmake_system_processor = \"aarch64\"\n elif \"armv7\" in str(self.settings.arch):\n cmake_system_processor = \"armv7-a\"\n elif \"armv6\" in str(self.settings.arch):\n cmake_system_processor = \"armv6\"\n elif \"armv5\" in str(self.settings.arch):\n cmake_system_processor = \"armv5te\"\n return cmake_system_processor\n\n def _define_tool_var(self, name, value):\n ndk_bin = os.path.join(self._ndk_root, \"bin\")\n path = os.path.join(ndk_bin, self._tool_name(value))\n self.output.info(f\"Creating {name} environment variable: {path}\")\n return path\n\n @staticmethod\n def _chmod_plus_x(filename):\n if os.name == \"posix\":\n os.chmod(filename, os.stat(filename).st_mode | 0o111)\n\n def package_info(self):\n # test shall pass, so this runs also in the build as build requirement context\n # ndk-build: https://developer.android.com/ndk/guides/ndk-build\n self.env_info.PATH.append(self.package_folder)\n\n # You should use the ANDROID_NDK_ROOT environment variable to indicate where the NDK is located.\n # That's what most NDK-related scripts use (inside the NDK, and outside of it).\n # https://groups.google.com/g/android-ndk/c/qZjhOaynHXc\n self.output.info(f\"Creating ANDROID_NDK_ROOT environment variable: {self.package_folder}\")\n self.env_info.ANDROID_NDK_ROOT = self.package_folder\n\n self.output.info(f\"Creating ANDROID_NDK_HOME environment variable: {self.package_folder}\")\n self.env_info.ANDROID_NDK_HOME = self.package_folder\n\n # this is not enough, I can kill that .....\n if not hasattr(self, \"settings_target\"):\n return\n\n # interestingly I can reach that with\n # conan test --profile:build nsdk-default --profile:host default /Users/a4z/elux/conan/myrecipes/android-ndk/all/test_package android-ndk/r21d@\n if self.settings_target is None:\n return\n\n # And if we are not building for Android, why bother at all\n if not self.settings_target.os == \"Android\":\n self.output.warn(f\"You've added {self.name}/{self.version} as a build requirement, while os={self.settings_targe.os} != Android\")\n return\n\n cmake_system_processor = self._cmake_system_processor\n if cmake_system_processor:\n self.output.info(f\"Creating CONAN_CMAKE_SYSTEM_PROCESSOR environment variable: {cmake_system_processor}\")\n self.env_info.CONAN_CMAKE_SYSTEM_PROCESSOR = cmake_system_processor\n else:\n self.output.warn(\"Could not find a valid CMAKE_SYSTEM_PROCESSOR variable, supported by CMake\")\n\n self.output.info(f\"Creating NDK_ROOT environment variable: {self._ndk_root}\")\n self.env_info.NDK_ROOT = self._ndk_root\n\n self.output.info(f\"Creating CHOST environment variable: {self._llvm_triplet}\")\n self.env_info.CHOST = self._llvm_triplet\n\n ndk_sysroot = os.path.join(self._ndk_root, \"sysroot\")\n self.output.info(f\"Creating CONAN_CMAKE_FIND_ROOT_PATH environment variable: {ndk_sysroot}\")\n self.env_info.CONAN_CMAKE_FIND_ROOT_PATH = ndk_sysroot\n\n self.output.info(f\"Creating SYSROOT environment variable: {ndk_sysroot}\")\n self.env_info.SYSROOT = ndk_sysroot\n\n self.output.info(f\"Creating self.cpp_info.sysroot: {ndk_sysroot}\")\n self.cpp_info.sysroot = ndk_sysroot\n\n self.output.info(f\"Creating ANDROID_NATIVE_API_LEVEL environment variable: {self.settings_target.os.api_level}\")\n self.env_info.ANDROID_NATIVE_API_LEVEL = str(self.settings_target.os.api_level)\n\n self._chmod_plus_x(os.path.join(self.package_folder, \"cmake-wrapper\"))\n cmake_wrapper = \"cmake-wrapper.cmd\" if self.settings.os == \"Windows\" else \"cmake-wrapper\"\n cmake_wrapper = os.path.join(self.package_folder, cmake_wrapper)\n self.output.info(f\"Creating CONAN_CMAKE_PROGRAM environment variable: {cmake_wrapper}\")\n self.env_info.CONAN_CMAKE_PROGRAM = cmake_wrapper\n\n toolchain = os.path.join(self.package_folder, \"build\", \"cmake\", \"android.toolchain.cmake\")\n self.output.info(f\"Creating CONAN_CMAKE_TOOLCHAIN_FILE environment variable: {toolchain}\")\n self.env_info.CONAN_CMAKE_TOOLCHAIN_FILE = toolchain\n\n self.env_info.CC = self._define_tool_var(\"CC\", \"clang\")\n self.env_info.CXX = self._define_tool_var(\"CXX\", \"clang++\")\n self.env_info.LD = self._define_tool_var(\"LD\", \"ld\")\n self.env_info.AR = self._define_tool_var(\"AR\", \"ar\")\n self.env_info.AS = self._define_tool_var(\"AS\", \"as\")\n self.env_info.RANLIB = self._define_tool_var(\"RANLIB\", \"ranlib\")\n self.env_info.STRIP = self._define_tool_var(\"STRIP\", \"strip\")\n self.env_info.ADDR2LINE = self._define_tool_var(\"ADDR2LINE\", \"addr2line\")\n self.env_info.NM = self._define_tool_var(\"NM\", \"nm\")\n self.env_info.OBJCOPY = self._define_tool_var(\"OBJCOPY\", \"objcopy\")\n self.env_info.OBJDUMP = self._define_tool_var(\"OBJDUMP\", \"objdump\")\n self.env_info.READELF = self._define_tool_var(\"READELF\", \"readelf\")\n self.env_info.ELFEDIT = self._define_tool_var(\"ELFEDIT\", \"elfedit\")\n\n self.env_info.ANDROID_PLATFORM = f\"android-{self.settings_target.os.api_level}\"\n self.env_info.ANDROID_TOOLCHAIN = \"clang\"\n self.env_info.ANDROID_ABI = self._android_abi\n libcxx_str = str(self.settings_target.compiler.libcxx)\n self.env_info.ANDROID_STL = libcxx_str if libcxx_str.startswith(\"c++_\") else \"c++_shared\"\n\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PROGRAM = \"BOTH\"\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_LIBRARY = \"BOTH\"\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_INCLUDE = \"BOTH\"\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PACKAGE = \"BOTH\"\n", "path": "recipes/android-ndk/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport shutil\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass AndroidNDKConan(ConanFile):\n name = \"android-ndk\"\n description = \"The Android NDK is a toolset that lets you implement parts of your app in native code, using languages such as C and C++\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://developer.android.com/ndk/\"\n topics = (\"android\", \"NDK\", \"toolchain\", \"compiler\")\n license = \"Apache-2.0\"\n\n settings = \"os\", \"arch\"\n\n short_paths = True\n exports_sources = \"cmake-wrapper.cmd\", \"cmake-wrapper\"\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def _settings_os_supported(self):\n return self.conan_data[\"sources\"][self.version][\"url\"].get(str(self.settings.os)) is not None\n\n def _settings_arch_supported(self):\n return self.conan_data[\"sources\"][self.version][\"url\"].get(str(self.settings.os), {}).get(str(self.settings.arch)) is not None\n\n def validate(self):\n if not self._settings_os_supported():\n raise ConanInvalidConfiguration(f\"os={self.settings.os} is not supported by {self.name} (no binaries are available)\")\n if not self._settings_arch_supported():\n raise ConanInvalidConfiguration(f\"os,arch={self.settings.os},{self.settings.arch} is not supported by {self.name} (no binaries are available)\")\n\n def build(self):\n if self.version in ['r23']:\n data = self.conan_data[\"sources\"][self.version][\"url\"][str(self.settings.os)][str(self.settings.arch)]\n unzip_fix_symlinks(url=data[\"url\"], target_folder=self._source_subfolder, sha256=data[\"sha256\"])\n else:\n tools.get(**self.conan_data[\"sources\"][self.version][\"url\"][str(self.settings.os)][str(self.settings.arch)],\n destination=self._source_subfolder, strip_root=True)\n\n def package(self):\n self.copy(\"*\", src=self._source_subfolder, dst=\".\", keep_path=True, symlinks=True)\n self.copy(\"*NOTICE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(\"*NOTICE.toolchain\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(\"cmake-wrapper.cmd\")\n self.copy(\"cmake-wrapper\")\n self._fix_permissions()\n\n # from here on, everything is assumed to run in 2 profile mode, using this android-ndk recipe as a build requirement\n\n @property\n def _platform(self):\n return {\n \"Linux\": \"linux\",\n \"Macos\": \"darwin\",\n \"Windows\": \"windows\",\n }.get(str(self.settings_build.os))\n\n @property\n def _android_abi(self):\n return {\n \"armv7\": \"armeabi-v7a\",\n \"armv8\": \"arm64-v8a\",\n \"x86\": \"x86\",\n \"x86_64\": \"x86_64\",\n }.get(str(self.settings_target.arch))\n\n @property\n def _llvm_triplet(self):\n arch = {\n \"armv7\": \"arm\",\n \"armv8\": \"aarch64\",\n \"x86\": \"i686\",\n \"x86_64\": \"x86_64\",\n }.get(str(self.settings_target.arch))\n abi = \"androideabi\" if self.settings_target.arch == \"armv7\" else \"android\"\n return f\"{arch}-linux-{abi}\"\n\n @property\n def _clang_triplet(self):\n arch = {\n \"armv7\": \"armv7a\",\n \"armv8\": \"aarch64\",\n \"x86\": \"i686\",\n \"x86_64\": \"x86_64\",\n }.get(str(self.settings_target.arch))\n abi = \"androideabi\" if self.settings_target.arch == \"armv7\" else \"android\"\n return f\"{arch}-linux-{abi}\"\n\n def _fix_permissions(self):\n if os.name != \"posix\":\n return\n for root, _, files in os.walk(self.package_folder):\n for filename in files:\n filename = os.path.join(root, filename)\n with open(filename, \"rb\") as f:\n sig = f.read(4)\n if type(sig) is str:\n sig = [ord(s) for s in sig]\n else:\n sig = [s for s in sig]\n if len(sig) > 2 and sig[0] == 0x23 and sig[1] == 0x21:\n self.output.info(f\"chmod on script file: '{filename}'\")\n self._chmod_plus_x(filename)\n elif sig == [0x7F, 0x45, 0x4C, 0x46]:\n self.output.info(f\"chmod on ELF file: '{filename}'\")\n self._chmod_plus_x(filename)\n elif sig == [0xCA, 0xFE, 0xBA, 0xBE] or \\\n sig == [0xBE, 0xBA, 0xFE, 0xCA] or \\\n sig == [0xFE, 0xED, 0xFA, 0xCF] or \\\n sig == [0xCF, 0xFA, 0xED, 0xFE] or \\\n sig == [0xFE, 0xEF, 0xFA, 0xCE] or \\\n sig == [0xCE, 0xFA, 0xED, 0xFE]:\n self.output.info(f\"chmod on Mach-O file: '{filename}'\")\n self._chmod_plus_x(filename)\n\n @property\n def _host(self):\n return f\"{self._platform}-{self.settings.arch}\"\n\n @property\n def _ndk_root(self):\n return os.path.join(self.package_folder, \"toolchains\", \"llvm\", \"prebuilt\", self._host)\n\n def _tool_name(self, tool):\n if \"clang\" in tool:\n suffix = \".cmd\" if self.settings_build.os == \"Windows\" else \"\"\n return f\"{self._clang_triplet}{self.settings_target.os.api_level}-{tool}{suffix}\"\n else:\n suffix = \".exe\" if self.settings_build.os == \"Windows\" else \"\"\n return f\"{self._llvm_triplet}-{tool}{suffix}\"\n\n @property\n def _cmake_system_processor(self):\n cmake_system_processor = {\n \"x86_64\": \"x86_64\",\n \"x86\": \"i686\",\n \"mips\": \"mips\",\n \"mips64\": \"mips64\",\n }.get(str(self.settings.arch))\n if self.settings_target.arch == \"armv8\":\n cmake_system_processor = \"aarch64\"\n elif \"armv7\" in str(self.settings.arch):\n cmake_system_processor = \"armv7-a\"\n elif \"armv6\" in str(self.settings.arch):\n cmake_system_processor = \"armv6\"\n elif \"armv5\" in str(self.settings.arch):\n cmake_system_processor = \"armv5te\"\n return cmake_system_processor\n\n def _define_tool_var(self, name, value):\n ndk_bin = os.path.join(self._ndk_root, \"bin\")\n path = os.path.join(ndk_bin, self._tool_name(value))\n self.output.info(f\"Creating {name} environment variable: {path}\")\n return path\n\n @staticmethod\n def _chmod_plus_x(filename):\n if os.name == \"posix\":\n os.chmod(filename, os.stat(filename).st_mode | 0o111)\n\n def package_info(self):\n # test shall pass, so this runs also in the build as build requirement context\n # ndk-build: https://developer.android.com/ndk/guides/ndk-build\n self.env_info.PATH.append(self.package_folder)\n\n # You should use the ANDROID_NDK_ROOT environment variable to indicate where the NDK is located.\n # That's what most NDK-related scripts use (inside the NDK, and outside of it).\n # https://groups.google.com/g/android-ndk/c/qZjhOaynHXc\n self.output.info(f\"Creating ANDROID_NDK_ROOT environment variable: {self.package_folder}\")\n self.env_info.ANDROID_NDK_ROOT = self.package_folder\n\n self.output.info(f\"Creating ANDROID_NDK_HOME environment variable: {self.package_folder}\")\n self.env_info.ANDROID_NDK_HOME = self.package_folder\n\n # this is not enough, I can kill that .....\n if not hasattr(self, \"settings_target\"):\n return\n\n # interestingly I can reach that with\n # conan test --profile:build nsdk-default --profile:host default /Users/a4z/elux/conan/myrecipes/android-ndk/all/test_package android-ndk/r21d@\n if self.settings_target is None:\n return\n\n # And if we are not building for Android, why bother at all\n if not self.settings_target.os == \"Android\":\n self.output.warn(f\"You've added {self.name}/{self.version} as a build requirement, while os={self.settings_targe.os} != Android\")\n return\n\n cmake_system_processor = self._cmake_system_processor\n if cmake_system_processor:\n self.output.info(f\"Creating CONAN_CMAKE_SYSTEM_PROCESSOR environment variable: {cmake_system_processor}\")\n self.env_info.CONAN_CMAKE_SYSTEM_PROCESSOR = cmake_system_processor\n else:\n self.output.warn(\"Could not find a valid CMAKE_SYSTEM_PROCESSOR variable, supported by CMake\")\n\n self.output.info(f\"Creating NDK_ROOT environment variable: {self._ndk_root}\")\n self.env_info.NDK_ROOT = self._ndk_root\n\n self.output.info(f\"Creating CHOST environment variable: {self._llvm_triplet}\")\n self.env_info.CHOST = self._llvm_triplet\n\n ndk_sysroot = os.path.join(self._ndk_root, \"sysroot\")\n self.output.info(f\"Creating CONAN_CMAKE_FIND_ROOT_PATH environment variable: {ndk_sysroot}\")\n self.env_info.CONAN_CMAKE_FIND_ROOT_PATH = ndk_sysroot\n\n self.output.info(f\"Creating SYSROOT environment variable: {ndk_sysroot}\")\n self.env_info.SYSROOT = ndk_sysroot\n\n self.output.info(f\"Creating self.cpp_info.sysroot: {ndk_sysroot}\")\n self.cpp_info.sysroot = ndk_sysroot\n\n self.output.info(f\"Creating ANDROID_NATIVE_API_LEVEL environment variable: {self.settings_target.os.api_level}\")\n self.env_info.ANDROID_NATIVE_API_LEVEL = str(self.settings_target.os.api_level)\n\n self._chmod_plus_x(os.path.join(self.package_folder, \"cmake-wrapper\"))\n cmake_wrapper = \"cmake-wrapper.cmd\" if self.settings.os == \"Windows\" else \"cmake-wrapper\"\n cmake_wrapper = os.path.join(self.package_folder, cmake_wrapper)\n self.output.info(f\"Creating CONAN_CMAKE_PROGRAM environment variable: {cmake_wrapper}\")\n self.env_info.CONAN_CMAKE_PROGRAM = cmake_wrapper\n\n toolchain = os.path.join(self.package_folder, \"build\", \"cmake\", \"android.toolchain.cmake\")\n self.output.info(f\"Creating CONAN_CMAKE_TOOLCHAIN_FILE environment variable: {toolchain}\")\n self.env_info.CONAN_CMAKE_TOOLCHAIN_FILE = toolchain\n\n self.env_info.CC = self._define_tool_var(\"CC\", \"clang\")\n self.env_info.CXX = self._define_tool_var(\"CXX\", \"clang++\")\n self.env_info.LD = self._define_tool_var(\"LD\", \"ld\")\n self.env_info.AR = self._define_tool_var(\"AR\", \"ar\")\n self.env_info.AS = self._define_tool_var(\"AS\", \"as\")\n self.env_info.RANLIB = self._define_tool_var(\"RANLIB\", \"ranlib\")\n self.env_info.STRIP = self._define_tool_var(\"STRIP\", \"strip\")\n self.env_info.ADDR2LINE = self._define_tool_var(\"ADDR2LINE\", \"addr2line\")\n self.env_info.NM = self._define_tool_var(\"NM\", \"nm\")\n self.env_info.OBJCOPY = self._define_tool_var(\"OBJCOPY\", \"objcopy\")\n self.env_info.OBJDUMP = self._define_tool_var(\"OBJDUMP\", \"objdump\")\n self.env_info.READELF = self._define_tool_var(\"READELF\", \"readelf\")\n self.env_info.ELFEDIT = self._define_tool_var(\"ELFEDIT\", \"elfedit\")\n\n self.env_info.ANDROID_PLATFORM = f\"android-{self.settings_target.os.api_level}\"\n self.env_info.ANDROID_TOOLCHAIN = \"clang\"\n self.env_info.ANDROID_ABI = self._android_abi\n libcxx_str = str(self.settings_target.compiler.libcxx)\n self.env_info.ANDROID_STL = libcxx_str if libcxx_str.startswith(\"c++_\") else \"c++_shared\"\n\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PROGRAM = \"BOTH\"\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_LIBRARY = \"BOTH\"\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_INCLUDE = \"BOTH\"\n self.env_info.CMAKE_FIND_ROOT_PATH_MODE_PACKAGE = \"BOTH\"\n\n\ndef unzip_fix_symlinks(url, target_folder, sha256):\n # Python's built-in module 'zipfile' won't handle symlinks (https://bugs.python.org/issue37921)\n # Most of the logic borrowed from this PR https://github.com/conan-io/conan/pull/8100\n\n filename = \"android_sdk.zip\"\n tools.download(url, filename)\n tools.check_sha256(filename, sha256)\n tools.unzip(filename, destination=target_folder, strip_root=True)\n\n def is_symlink_zipinfo(zi):\n return (zi.external_attr >> 28) == 0xA\n\n full_path = os.path.normpath(target_folder)\n import zipfile\n with zipfile.ZipFile(filename, \"r\") as z:\n zip_info = z.infolist()\n \n names = [n.replace(\"\\\\\", \"/\") for n in z.namelist()]\n common_folder = os.path.commonprefix(names).split(\"/\", 1)[0]\n\n for file_ in zip_info:\n if is_symlink_zipinfo(file_):\n rel_path = os.path.relpath(file_.filename, common_folder)\n full_name = os.path.join(full_path, rel_path)\n target = tools.load(full_name)\n os.unlink(full_name)\n\n try:\n os.symlink(target, full_name)\n except OSError:\n if not os.path.isabs(target):\n target = os.path.normpath(os.path.join(os.path.dirname(full_name), target))\n shutil.copy2(target, full_name)\n\n os.unlink(filename)\n", "path": "recipes/android-ndk/all/conanfile.py"}]} | 4,024 | 977 |
gh_patches_debug_22523 | rasdani/github-patches | git_diff | borgbackup__borg-1207 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: acl_access value must be surrogate-escaped str or None
`acl_get` in `platform/linux.pyx` returns type of `bytes` for `acl_access`, `acl_default`, `acl_extended`, `acl_nfs4` on Linux x86_64. Those `bytes` are not converted to `str` afterwards.
Relevant stacktrace:
```
File "/usr/lib/python3.5/site-packages/borg/archiver.py", line 332, in _process
status = archive.process_file(path, st, cache, self.ignore_inode)
File "/usr/lib/python3.5/site-packages/borg/archive.py", line 669, in process_file
item.update(self.stat_attrs(st, path))
File "/usr/lib/python3.5/site-packages/borg/item.py", line 40, in update
setattr(self, self._check_key(k), v)
File "/usr/lib/python3.5/site-packages/borg/item.py", line 95, in _set
raise TypeError(type_error_msg)
TypeError: acl_access value must be surrogate-escaped str or None
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/borg/item.py`
Content:
```
1 from .constants import ITEM_KEYS
2 from .helpers import safe_encode, safe_decode
3 from .helpers import bigint_to_int, int_to_bigint
4 from .helpers import StableDict
5
6
7 class PropDict:
8 """
9 Manage a dictionary via properties.
10
11 - initialization by giving a dict or kw args
12 - on initialization, normalize dict keys to be str type
13 - access dict via properties, like: x.key_name
14 - membership check via: 'key_name' in x
15 - optionally, encode when setting a value
16 - optionally, decode when getting a value
17 - be safe against typos in key names: check against VALID_KEYS
18 - when setting a value: check type of value
19 """
20 VALID_KEYS = None # override with <set of str> in child class
21
22 __slots__ = ("_dict", ) # avoid setting attributes not supported by properties
23
24 def __init__(self, data_dict=None, internal_dict=None, **kw):
25 if data_dict is None:
26 data = kw
27 elif not isinstance(data_dict, dict):
28 raise TypeError("data_dict must be dict")
29 else:
30 data = data_dict
31 self._dict = {}
32 self.update_internal(internal_dict or {})
33 self.update(data)
34
35 def update(self, d):
36 for k, v in d.items():
37 if isinstance(k, bytes):
38 k = k.decode()
39 setattr(self, self._check_key(k), v)
40
41 def update_internal(self, d):
42 for k, v in d.items():
43 if isinstance(k, bytes):
44 k = k.decode()
45 self._dict[k] = v
46
47 def __eq__(self, other):
48 return self.as_dict() == other.as_dict()
49
50 def __repr__(self):
51 return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict)
52
53 def as_dict(self):
54 """return the internal dictionary"""
55 return StableDict(self._dict)
56
57 def _check_key(self, key):
58 """make sure key is of type str and known"""
59 if not isinstance(key, str):
60 raise TypeError("key must be str")
61 if key not in self.VALID_KEYS:
62 raise ValueError("key '%s' is not a valid key" % key)
63 return key
64
65 def __contains__(self, key):
66 """do we have this key?"""
67 return self._check_key(key) in self._dict
68
69 def get(self, key, default=None):
70 """get value for key, return default if key does not exist"""
71 return getattr(self, self._check_key(key), default)
72
73 @staticmethod
74 def _make_property(key, value_type, value_type_name=None, encode=None, decode=None):
75 """return a property that deals with self._dict[key]"""
76 assert isinstance(key, str)
77 if value_type_name is None:
78 value_type_name = value_type.__name__
79 doc = "%s (%s)" % (key, value_type_name)
80 type_error_msg = "%s value must be %s" % (key, value_type_name)
81 attr_error_msg = "attribute %s not found" % key
82
83 def _get(self):
84 try:
85 value = self._dict[key]
86 except KeyError:
87 raise AttributeError(attr_error_msg) from None
88 if decode is not None:
89 value = decode(value)
90 return value
91
92 def _set(self, value):
93 if not isinstance(value, value_type):
94 raise TypeError(type_error_msg)
95 if encode is not None:
96 value = encode(value)
97 self._dict[key] = value
98
99 def _del(self):
100 try:
101 del self._dict[key]
102 except KeyError:
103 raise AttributeError(attr_error_msg) from None
104
105 return property(_get, _set, _del, doc=doc)
106
107
108 class Item(PropDict):
109 """
110 Item abstraction that deals with validation and the low-level details internally:
111
112 Items are created either from msgpack unpacker output, from another dict, from kwargs or
113 built step-by-step by setting attributes.
114
115 msgpack gives us a dict with bytes-typed keys, just give it to Item(d) and use item.key_name later.
116 msgpack gives us byte-typed values for stuff that should be str, we automatically decode when getting
117 such a property and encode when setting it.
118
119 If an Item shall be serialized, give as_dict() method output to msgpack packer.
120 """
121
122 VALID_KEYS = ITEM_KEYS | {'deleted', 'nlink', } # str-typed keys
123
124 __slots__ = ("_dict", ) # avoid setting attributes not supported by properties
125
126 # properties statically defined, so that IDEs can know their names:
127
128 path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
129 source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
130 user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
131 group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
132
133 acl_access = PropDict._make_property('acl_access', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
134 acl_default = PropDict._make_property('acl_default', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
135 acl_extended = PropDict._make_property('acl_extended', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
136 acl_nfs4 = PropDict._make_property('acl_nfs4', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
137
138 mode = PropDict._make_property('mode', int)
139 uid = PropDict._make_property('uid', int)
140 gid = PropDict._make_property('gid', int)
141 rdev = PropDict._make_property('rdev', int)
142 bsdflags = PropDict._make_property('bsdflags', int)
143
144 atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)
145 ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)
146 mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)
147
148 hardlink_master = PropDict._make_property('hardlink_master', bool)
149
150 chunks = PropDict._make_property('chunks', (list, type(None)), 'list or None')
151
152 xattrs = PropDict._make_property('xattrs', StableDict)
153
154 deleted = PropDict._make_property('deleted', bool)
155 nlink = PropDict._make_property('nlink', int)
156
157
158 class EncryptedKey(PropDict):
159 """
160 EncryptedKey abstraction that deals with validation and the low-level details internally:
161
162 A EncryptedKey is created either from msgpack unpacker output, from another dict, from kwargs or
163 built step-by-step by setting attributes.
164
165 msgpack gives us a dict with bytes-typed keys, just give it to EncryptedKey(d) and use enc_key.xxx later.
166
167 If a EncryptedKey shall be serialized, give as_dict() method output to msgpack packer.
168 """
169
170 VALID_KEYS = {'version', 'algorithm', 'iterations', 'salt', 'hash', 'data'} # str-typed keys
171
172 __slots__ = ("_dict", ) # avoid setting attributes not supported by properties
173
174 version = PropDict._make_property('version', int)
175 algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode)
176 iterations = PropDict._make_property('iterations', int)
177 salt = PropDict._make_property('salt', bytes)
178 hash = PropDict._make_property('hash', bytes)
179 data = PropDict._make_property('data', bytes)
180
181
182 class Key(PropDict):
183 """
184 Key abstraction that deals with validation and the low-level details internally:
185
186 A Key is created either from msgpack unpacker output, from another dict, from kwargs or
187 built step-by-step by setting attributes.
188
189 msgpack gives us a dict with bytes-typed keys, just give it to Key(d) and use key.xxx later.
190
191 If a Key shall be serialized, give as_dict() method output to msgpack packer.
192 """
193
194 VALID_KEYS = {'version', 'repository_id', 'enc_key', 'enc_hmac_key', 'id_key', 'chunk_seed'} # str-typed keys
195
196 __slots__ = ("_dict", ) # avoid setting attributes not supported by properties
197
198 version = PropDict._make_property('version', int)
199 repository_id = PropDict._make_property('repository_id', bytes)
200 enc_key = PropDict._make_property('enc_key', bytes)
201 enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes)
202 id_key = PropDict._make_property('id_key', bytes)
203 chunk_seed = PropDict._make_property('chunk_seed', int)
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/borg/item.py b/src/borg/item.py
--- a/src/borg/item.py
+++ b/src/borg/item.py
@@ -130,10 +130,10 @@
user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
- acl_access = PropDict._make_property('acl_access', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
- acl_default = PropDict._make_property('acl_default', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
- acl_extended = PropDict._make_property('acl_extended', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
- acl_nfs4 = PropDict._make_property('acl_nfs4', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
+ acl_access = PropDict._make_property('acl_access', bytes)
+ acl_default = PropDict._make_property('acl_default', bytes)
+ acl_extended = PropDict._make_property('acl_extended', bytes)
+ acl_nfs4 = PropDict._make_property('acl_nfs4', bytes)
mode = PropDict._make_property('mode', int)
uid = PropDict._make_property('uid', int)
| {"golden_diff": "diff --git a/src/borg/item.py b/src/borg/item.py\n--- a/src/borg/item.py\n+++ b/src/borg/item.py\n@@ -130,10 +130,10 @@\n user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n \n- acl_access = PropDict._make_property('acl_access', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n- acl_default = PropDict._make_property('acl_default', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n- acl_extended = PropDict._make_property('acl_extended', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n- acl_nfs4 = PropDict._make_property('acl_nfs4', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n+ acl_access = PropDict._make_property('acl_access', bytes)\n+ acl_default = PropDict._make_property('acl_default', bytes)\n+ acl_extended = PropDict._make_property('acl_extended', bytes)\n+ acl_nfs4 = PropDict._make_property('acl_nfs4', bytes)\n \n mode = PropDict._make_property('mode', int)\n uid = PropDict._make_property('uid', int)\n", "issue": "TypeError: acl_access value must be surrogate-escaped str or None\n`acl_get` in `platform/linux.pyx` returns type of `bytes` for `acl_access`, `acl_default`, `acl_extended`, `acl_nfs4` on Linux x86_64. Those `bytes` are not converted to `str` afterwards.\n\nRelevant stacktrace:\n\n```\nFile \"/usr/lib/python3.5/site-packages/borg/archiver.py\", line 332, in _process\nstatus = archive.process_file(path, st, cache, self.ignore_inode)\nFile \"/usr/lib/python3.5/site-packages/borg/archive.py\", line 669, in process_file\nitem.update(self.stat_attrs(st, path))\nFile \"/usr/lib/python3.5/site-packages/borg/item.py\", line 40, in update\nsetattr(self, self._check_key(k), v)\nFile \"/usr/lib/python3.5/site-packages/borg/item.py\", line 95, in _set\nraise TypeError(type_error_msg)\nTypeError: acl_access value must be surrogate-escaped str or None\n```\n\n", "before_files": [{"content": "from .constants import ITEM_KEYS\nfrom .helpers import safe_encode, safe_decode\nfrom .helpers import bigint_to_int, int_to_bigint\nfrom .helpers import StableDict\n\n\nclass PropDict:\n \"\"\"\n Manage a dictionary via properties.\n\n - initialization by giving a dict or kw args\n - on initialization, normalize dict keys to be str type\n - access dict via properties, like: x.key_name\n - membership check via: 'key_name' in x\n - optionally, encode when setting a value\n - optionally, decode when getting a value\n - be safe against typos in key names: check against VALID_KEYS\n - when setting a value: check type of value\n \"\"\"\n VALID_KEYS = None # override with <set of str> in child class\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n def __init__(self, data_dict=None, internal_dict=None, **kw):\n if data_dict is None:\n data = kw\n elif not isinstance(data_dict, dict):\n raise TypeError(\"data_dict must be dict\")\n else:\n data = data_dict\n self._dict = {}\n self.update_internal(internal_dict or {})\n self.update(data)\n\n def update(self, d):\n for k, v in d.items():\n if isinstance(k, bytes):\n k = k.decode()\n setattr(self, self._check_key(k), v)\n\n def update_internal(self, d):\n for k, v in d.items():\n if isinstance(k, bytes):\n k = k.decode()\n self._dict[k] = v\n\n def __eq__(self, other):\n return self.as_dict() == other.as_dict()\n\n def __repr__(self):\n return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict)\n\n def as_dict(self):\n \"\"\"return the internal dictionary\"\"\"\n return StableDict(self._dict)\n\n def _check_key(self, key):\n \"\"\"make sure key is of type str and known\"\"\"\n if not isinstance(key, str):\n raise TypeError(\"key must be str\")\n if key not in self.VALID_KEYS:\n raise ValueError(\"key '%s' is not a valid key\" % key)\n return key\n\n def __contains__(self, key):\n \"\"\"do we have this key?\"\"\"\n return self._check_key(key) in self._dict\n\n def get(self, key, default=None):\n \"\"\"get value for key, return default if key does not exist\"\"\"\n return getattr(self, self._check_key(key), default)\n\n @staticmethod\n def _make_property(key, value_type, value_type_name=None, encode=None, decode=None):\n \"\"\"return a property that deals with self._dict[key]\"\"\"\n assert isinstance(key, str)\n if value_type_name is None:\n value_type_name = value_type.__name__\n doc = \"%s (%s)\" % (key, value_type_name)\n type_error_msg = \"%s value must be %s\" % (key, value_type_name)\n attr_error_msg = \"attribute %s not found\" % key\n\n def _get(self):\n try:\n value = self._dict[key]\n except KeyError:\n raise AttributeError(attr_error_msg) from None\n if decode is not None:\n value = decode(value)\n return value\n\n def _set(self, value):\n if not isinstance(value, value_type):\n raise TypeError(type_error_msg)\n if encode is not None:\n value = encode(value)\n self._dict[key] = value\n\n def _del(self):\n try:\n del self._dict[key]\n except KeyError:\n raise AttributeError(attr_error_msg) from None\n\n return property(_get, _set, _del, doc=doc)\n\n\nclass Item(PropDict):\n \"\"\"\n Item abstraction that deals with validation and the low-level details internally:\n\n Items are created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to Item(d) and use item.key_name later.\n msgpack gives us byte-typed values for stuff that should be str, we automatically decode when getting\n such a property and encode when setting it.\n\n If an Item shall be serialized, give as_dict() method output to msgpack packer.\n \"\"\"\n\n VALID_KEYS = ITEM_KEYS | {'deleted', 'nlink', } # str-typed keys\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n # properties statically defined, so that IDEs can know their names:\n\n path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n\n acl_access = PropDict._make_property('acl_access', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n acl_default = PropDict._make_property('acl_default', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n acl_extended = PropDict._make_property('acl_extended', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n acl_nfs4 = PropDict._make_property('acl_nfs4', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n\n mode = PropDict._make_property('mode', int)\n uid = PropDict._make_property('uid', int)\n gid = PropDict._make_property('gid', int)\n rdev = PropDict._make_property('rdev', int)\n bsdflags = PropDict._make_property('bsdflags', int)\n\n atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)\n ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)\n mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)\n\n hardlink_master = PropDict._make_property('hardlink_master', bool)\n\n chunks = PropDict._make_property('chunks', (list, type(None)), 'list or None')\n\n xattrs = PropDict._make_property('xattrs', StableDict)\n\n deleted = PropDict._make_property('deleted', bool)\n nlink = PropDict._make_property('nlink', int)\n\n\nclass EncryptedKey(PropDict):\n \"\"\"\n EncryptedKey abstraction that deals with validation and the low-level details internally:\n\n A EncryptedKey is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to EncryptedKey(d) and use enc_key.xxx later.\n\n If a EncryptedKey shall be serialized, give as_dict() method output to msgpack packer.\n \"\"\"\n\n VALID_KEYS = {'version', 'algorithm', 'iterations', 'salt', 'hash', 'data'} # str-typed keys\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n version = PropDict._make_property('version', int)\n algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode)\n iterations = PropDict._make_property('iterations', int)\n salt = PropDict._make_property('salt', bytes)\n hash = PropDict._make_property('hash', bytes)\n data = PropDict._make_property('data', bytes)\n\n\nclass Key(PropDict):\n \"\"\"\n Key abstraction that deals with validation and the low-level details internally:\n\n A Key is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to Key(d) and use key.xxx later.\n\n If a Key shall be serialized, give as_dict() method output to msgpack packer.\n \"\"\"\n\n VALID_KEYS = {'version', 'repository_id', 'enc_key', 'enc_hmac_key', 'id_key', 'chunk_seed'} # str-typed keys\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n version = PropDict._make_property('version', int)\n repository_id = PropDict._make_property('repository_id', bytes)\n enc_key = PropDict._make_property('enc_key', bytes)\n enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes)\n id_key = PropDict._make_property('id_key', bytes)\n chunk_seed = PropDict._make_property('chunk_seed', int)\n", "path": "src/borg/item.py"}], "after_files": [{"content": "from .constants import ITEM_KEYS\nfrom .helpers import safe_encode, safe_decode\nfrom .helpers import bigint_to_int, int_to_bigint\nfrom .helpers import StableDict\n\n\nclass PropDict:\n \"\"\"\n Manage a dictionary via properties.\n\n - initialization by giving a dict or kw args\n - on initialization, normalize dict keys to be str type\n - access dict via properties, like: x.key_name\n - membership check via: 'key_name' in x\n - optionally, encode when setting a value\n - optionally, decode when getting a value\n - be safe against typos in key names: check against VALID_KEYS\n - when setting a value: check type of value\n \"\"\"\n VALID_KEYS = None # override with <set of str> in child class\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n def __init__(self, data_dict=None, internal_dict=None, **kw):\n if data_dict is None:\n data = kw\n elif not isinstance(data_dict, dict):\n raise TypeError(\"data_dict must be dict\")\n else:\n data = data_dict\n self._dict = {}\n self.update_internal(internal_dict or {})\n self.update(data)\n\n def update(self, d):\n for k, v in d.items():\n if isinstance(k, bytes):\n k = k.decode()\n setattr(self, self._check_key(k), v)\n\n def update_internal(self, d):\n for k, v in d.items():\n if isinstance(k, bytes):\n k = k.decode()\n self._dict[k] = v\n\n def __eq__(self, other):\n return self.as_dict() == other.as_dict()\n\n def __repr__(self):\n return '%s(internal_dict=%r)' % (self.__class__.__name__, self._dict)\n\n def as_dict(self):\n \"\"\"return the internal dictionary\"\"\"\n return StableDict(self._dict)\n\n def _check_key(self, key):\n \"\"\"make sure key is of type str and known\"\"\"\n if not isinstance(key, str):\n raise TypeError(\"key must be str\")\n if key not in self.VALID_KEYS:\n raise ValueError(\"key '%s' is not a valid key\" % key)\n return key\n\n def __contains__(self, key):\n \"\"\"do we have this key?\"\"\"\n return self._check_key(key) in self._dict\n\n def get(self, key, default=None):\n \"\"\"get value for key, return default if key does not exist\"\"\"\n return getattr(self, self._check_key(key), default)\n\n @staticmethod\n def _make_property(key, value_type, value_type_name=None, encode=None, decode=None):\n \"\"\"return a property that deals with self._dict[key]\"\"\"\n assert isinstance(key, str)\n if value_type_name is None:\n value_type_name = value_type.__name__\n doc = \"%s (%s)\" % (key, value_type_name)\n type_error_msg = \"%s value must be %s\" % (key, value_type_name)\n attr_error_msg = \"attribute %s not found\" % key\n\n def _get(self):\n try:\n value = self._dict[key]\n except KeyError:\n raise AttributeError(attr_error_msg) from None\n if decode is not None:\n value = decode(value)\n return value\n\n def _set(self, value):\n if not isinstance(value, value_type):\n raise TypeError(type_error_msg)\n if encode is not None:\n value = encode(value)\n self._dict[key] = value\n\n def _del(self):\n try:\n del self._dict[key]\n except KeyError:\n raise AttributeError(attr_error_msg) from None\n\n return property(_get, _set, _del, doc=doc)\n\n\nclass Item(PropDict):\n \"\"\"\n Item abstraction that deals with validation and the low-level details internally:\n\n Items are created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to Item(d) and use item.key_name later.\n msgpack gives us byte-typed values for stuff that should be str, we automatically decode when getting\n such a property and encode when setting it.\n\n If an Item shall be serialized, give as_dict() method output to msgpack packer.\n \"\"\"\n\n VALID_KEYS = ITEM_KEYS | {'deleted', 'nlink', } # str-typed keys\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n # properties statically defined, so that IDEs can know their names:\n\n path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)\n user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)\n\n acl_access = PropDict._make_property('acl_access', bytes)\n acl_default = PropDict._make_property('acl_default', bytes)\n acl_extended = PropDict._make_property('acl_extended', bytes)\n acl_nfs4 = PropDict._make_property('acl_nfs4', bytes)\n\n mode = PropDict._make_property('mode', int)\n uid = PropDict._make_property('uid', int)\n gid = PropDict._make_property('gid', int)\n rdev = PropDict._make_property('rdev', int)\n bsdflags = PropDict._make_property('bsdflags', int)\n\n atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)\n ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)\n mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)\n\n hardlink_master = PropDict._make_property('hardlink_master', bool)\n\n chunks = PropDict._make_property('chunks', (list, type(None)), 'list or None')\n\n xattrs = PropDict._make_property('xattrs', StableDict)\n\n deleted = PropDict._make_property('deleted', bool)\n nlink = PropDict._make_property('nlink', int)\n\n\nclass EncryptedKey(PropDict):\n \"\"\"\n EncryptedKey abstraction that deals with validation and the low-level details internally:\n\n A EncryptedKey is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to EncryptedKey(d) and use enc_key.xxx later.\n\n If a EncryptedKey shall be serialized, give as_dict() method output to msgpack packer.\n \"\"\"\n\n VALID_KEYS = {'version', 'algorithm', 'iterations', 'salt', 'hash', 'data'} # str-typed keys\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n version = PropDict._make_property('version', int)\n algorithm = PropDict._make_property('algorithm', str, encode=str.encode, decode=bytes.decode)\n iterations = PropDict._make_property('iterations', int)\n salt = PropDict._make_property('salt', bytes)\n hash = PropDict._make_property('hash', bytes)\n data = PropDict._make_property('data', bytes)\n\n\nclass Key(PropDict):\n \"\"\"\n Key abstraction that deals with validation and the low-level details internally:\n\n A Key is created either from msgpack unpacker output, from another dict, from kwargs or\n built step-by-step by setting attributes.\n\n msgpack gives us a dict with bytes-typed keys, just give it to Key(d) and use key.xxx later.\n\n If a Key shall be serialized, give as_dict() method output to msgpack packer.\n \"\"\"\n\n VALID_KEYS = {'version', 'repository_id', 'enc_key', 'enc_hmac_key', 'id_key', 'chunk_seed'} # str-typed keys\n\n __slots__ = (\"_dict\", ) # avoid setting attributes not supported by properties\n\n version = PropDict._make_property('version', int)\n repository_id = PropDict._make_property('repository_id', bytes)\n enc_key = PropDict._make_property('enc_key', bytes)\n enc_hmac_key = PropDict._make_property('enc_hmac_key', bytes)\n id_key = PropDict._make_property('id_key', bytes)\n chunk_seed = PropDict._make_property('chunk_seed', int)\n", "path": "src/borg/item.py"}]} | 3,044 | 352 |
gh_patches_debug_17373 | rasdani/github-patches | git_diff | falconry__falcon-559 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing HTTP status codes
There's a few status codes from here http://tools.ietf.org/html/rfc6585 that could be great including.
429 Too many requests for instance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/status_codes.py`
Content:
```
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 HTTP_100 = '100 Continue'
17 HTTP_CONTINUE = HTTP_100
18 HTTP_101 = '101 Switching Protocols'
19 HTTP_SWITCHING_PROTOCOLS = HTTP_101
20
21 HTTP_200 = '200 OK'
22 HTTP_OK = HTTP_200
23 HTTP_201 = '201 Created'
24 HTTP_CREATED = HTTP_201
25 HTTP_202 = '202 Accepted'
26 HTTP_ACCEPTED = HTTP_202
27 HTTP_203 = '203 Non-Authoritative Information'
28 HTTP_NON_AUTHORITATIVE_INFORMATION = HTTP_203
29 HTTP_204 = '204 No Content'
30 HTTP_NO_CONTENT = HTTP_204
31 HTTP_205 = '205 Reset Content'
32 HTTP_RESET_CONTENT = HTTP_205
33 HTTP_206 = '206 Partial Content'
34 HTTP_PARTIAL_CONTENT = HTTP_206
35 HTTP_226 = '226 IM Used'
36 HTTP_IM_USED = HTTP_226
37
38 HTTP_300 = '300 Multiple Choices'
39 HTTP_MULTIPLE_CHOICES = HTTP_300
40 HTTP_301 = '301 Moved Permanently'
41 HTTP_MOVED_PERMANENTLY = HTTP_301
42 HTTP_302 = '302 Found'
43 HTTP_FOUND = HTTP_302
44 HTTP_303 = '303 See Other'
45 HTTP_SEE_OTHER = HTTP_303
46 HTTP_304 = '304 Not Modified'
47 HTTP_NOT_MODIFIED = HTTP_304
48 HTTP_305 = '305 Use Proxy'
49 HTTP_USE_PROXY = HTTP_305
50 HTTP_307 = '307 Temporary Redirect'
51 HTTP_TEMPORARY_REDIRECT = HTTP_307
52
53 HTTP_400 = '400 Bad Request'
54 HTTP_BAD_REQUEST = HTTP_400
55 HTTP_401 = '401 Unauthorized' # <-- Really means "unauthenticated"
56 HTTP_UNAUTHORIZED = HTTP_401
57 HTTP_402 = '402 Payment Required'
58 HTTP_PAYMENT_REQUIRED = HTTP_402
59 HTTP_403 = '403 Forbidden' # <-- Really means "unauthorized"
60 HTTP_FORBIDDEN = HTTP_403
61 HTTP_404 = '404 Not Found'
62 HTTP_NOT_FOUND = HTTP_404
63 HTTP_405 = '405 Method Not Allowed'
64 HTTP_METHOD_NOT_ALLOWED = HTTP_405
65 HTTP_406 = '406 Not Acceptable'
66 HTTP_NOT_ACCEPTABLE = HTTP_406
67 HTTP_407 = '407 Proxy Authentication Required'
68 HTTP_PROXY_AUTHENTICATION_REQUIRED = HTTP_407
69 HTTP_408 = '408 Request Time-out'
70 HTTP_REQUEST_TIMEOUT = HTTP_408
71 HTTP_409 = '409 Conflict'
72 HTTP_CONFLICT = HTTP_409
73 HTTP_410 = '410 Gone'
74 HTTP_GONE = HTTP_410
75 HTTP_411 = '411 Length Required'
76 HTTP_LENGTH_REQUIRED = HTTP_411
77 HTTP_412 = '412 Precondition Failed'
78 HTTP_PRECONDITION_FAILED = HTTP_412
79 HTTP_413 = '413 Payload Too Large'
80 HTTP_REQUEST_ENTITY_TOO_LARGE = HTTP_413
81 HTTP_414 = '414 URI Too Long'
82 HTTP_REQUEST_URI_TOO_LONG = HTTP_414
83 HTTP_415 = '415 Unsupported Media Type'
84 HTTP_UNSUPPORTED_MEDIA_TYPE = HTTP_415
85 HTTP_416 = '416 Range Not Satisfiable'
86 HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = HTTP_416
87 HTTP_417 = '417 Expectation Failed'
88 HTTP_EXPECTATION_FAILED = HTTP_417
89 HTTP_418 = "418 I'm a teapot"
90 HTTP_IM_A_TEAPOT = HTTP_418
91 HTTP_426 = '426 Upgrade Required'
92 HTTP_UPGRADE_REQUIRED = HTTP_426
93
94 HTTP_500 = '500 Internal Server Error'
95 HTTP_INTERNAL_SERVER_ERROR = HTTP_500
96 HTTP_501 = '501 Not Implemented'
97 HTTP_NOT_IMPLEMENTED = HTTP_501
98 HTTP_502 = '502 Bad Gateway'
99 HTTP_BAD_GATEWAY = HTTP_502
100 HTTP_503 = '503 Service Unavailable'
101 HTTP_SERVICE_UNAVAILABLE = HTTP_503
102 HTTP_504 = '504 Gateway Time-out'
103 HTTP_GATEWAY_TIMEOUT = HTTP_504
104 HTTP_505 = '505 HTTP Version not supported'
105 HTTP_HTTP_VERSION_NOT_SUPPORTED = HTTP_505
106
107 # 70X - Inexcusable
108 HTTP_701 = '701 Meh'
109 HTTP_702 = '702 Emacs'
110 HTTP_703 = '703 Explosion'
111
112 # 71X - Novelty Implementations
113 HTTP_710 = '710 PHP'
114 HTTP_711 = '711 Convenience Store'
115 HTTP_712 = '712 NoSQL'
116 HTTP_719 = '719 I am not a teapot'
117
118 # 72X - Edge Cases
119 HTTP_720 = '720 Unpossible'
120 HTTP_721 = '721 Known Unknowns'
121 HTTP_722 = '722 Unknown Unknowns'
122 HTTP_723 = '723 Tricky'
123 HTTP_724 = '724 This line should be unreachable'
124 HTTP_725 = '725 It works on my machine'
125 HTTP_726 = "726 It's a feature, not a bug"
126 HTTP_727 = '727 32 bits is plenty'
127
128 # 74X - Meme Driven
129 HTTP_740 = '740 Computer says no'
130 HTTP_741 = '741 Compiling'
131 HTTP_742 = '742 A kitten dies'
132 HTTP_743 = '743 I thought I knew regular expressions'
133 HTTP_744 = '744 Y U NO write integration tests?'
134 HTTP_745 = ("745 I don't always test my code, but when I do"
135 "I do it in production")
136 HTTP_748 = '748 Confounded by Ponies'
137 HTTP_749 = '749 Reserved for Chuck Norris'
138
139 # 75X - Syntax Errors
140 HTTP_750 = "750 Didn't bother to compile it"
141 HTTP_753 = '753 Syntax Error'
142 HTTP_754 = '754 Too many semi-colons'
143 HTTP_755 = '755 Not enough semi-colons'
144 HTTP_759 = '759 Unexpected T_PAAMAYIM_NEKUDOTAYIM'
145
146 # 77X - Predictable Problems
147 HTTP_771 = '771 Cached for too long'
148 HTTP_772 = '772 Not cached long enough'
149 HTTP_773 = '773 Not cached at all'
150 HTTP_774 = '774 Why was this cached?'
151 HTTP_776 = '776 Error on the Exception'
152 HTTP_777 = '777 Coincidence'
153 HTTP_778 = '778 Off By One Error'
154 HTTP_779 = '779 Off By Too Many To Count Error'
155
156 # 78X - Somebody Else's Problem
157 HTTP_780 = '780 Project owner not responding'
158 HTTP_781 = '781 Operations'
159 HTTP_782 = '782 QA'
160 HTTP_783 = '783 It was a customer request, honestly'
161 HTTP_784 = '784 Management, obviously'
162 HTTP_785 = '785 TPS Cover Sheet not attached'
163 HTTP_786 = '786 Try it now'
164
165 # 79X - Internet crashed
166 HTTP_791 = '791 The Internet shut down due to copyright restrictions'
167 HTTP_792 = '792 Climate change driven catastrophic weather event'
168 HTTP_797 = '797 This is the last page of the Internet. Go back'
169 HTTP_799 = '799 End of the world'
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/status_codes.py b/falcon/status_codes.py
--- a/falcon/status_codes.py
+++ b/falcon/status_codes.py
@@ -90,6 +90,12 @@
HTTP_IM_A_TEAPOT = HTTP_418
HTTP_426 = '426 Upgrade Required'
HTTP_UPGRADE_REQUIRED = HTTP_426
+HTTP_428 = '428 Precondition Required'
+HTTP_PRECONDITION_REQUIRED = HTTP_428
+HTTP_429 = '429 Too Many Requests'
+HTTP_TOO_MANY_REQUESTS = HTTP_429
+HTTP_431 = '431 Request Header Fields Too Large'
+HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = HTTP_431
HTTP_500 = '500 Internal Server Error'
HTTP_INTERNAL_SERVER_ERROR = HTTP_500
@@ -103,6 +109,8 @@
HTTP_GATEWAY_TIMEOUT = HTTP_504
HTTP_505 = '505 HTTP Version not supported'
HTTP_HTTP_VERSION_NOT_SUPPORTED = HTTP_505
+HTTP_511 = '511 Network Authentication Required'
+HTTP_NETWORK_AUTHENTICATION_REQUIRED = HTTP_511
# 70X - Inexcusable
HTTP_701 = '701 Meh'
| {"golden_diff": "diff --git a/falcon/status_codes.py b/falcon/status_codes.py\n--- a/falcon/status_codes.py\n+++ b/falcon/status_codes.py\n@@ -90,6 +90,12 @@\n HTTP_IM_A_TEAPOT = HTTP_418\n HTTP_426 = '426 Upgrade Required'\n HTTP_UPGRADE_REQUIRED = HTTP_426\n+HTTP_428 = '428 Precondition Required'\n+HTTP_PRECONDITION_REQUIRED = HTTP_428\n+HTTP_429 = '429 Too Many Requests'\n+HTTP_TOO_MANY_REQUESTS = HTTP_429\n+HTTP_431 = '431 Request Header Fields Too Large'\n+HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = HTTP_431\n \n HTTP_500 = '500 Internal Server Error'\n HTTP_INTERNAL_SERVER_ERROR = HTTP_500\n@@ -103,6 +109,8 @@\n HTTP_GATEWAY_TIMEOUT = HTTP_504\n HTTP_505 = '505 HTTP Version not supported'\n HTTP_HTTP_VERSION_NOT_SUPPORTED = HTTP_505\n+HTTP_511 = '511 Network Authentication Required'\n+HTTP_NETWORK_AUTHENTICATION_REQUIRED = HTTP_511\n \n # 70X - Inexcusable\n HTTP_701 = '701 Meh'\n", "issue": "Missing HTTP status codes\nThere's a few status codes from here http://tools.ietf.org/html/rfc6585 that could be great including.\n\n429 Too many requests for instance.\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nHTTP_100 = '100 Continue'\nHTTP_CONTINUE = HTTP_100\nHTTP_101 = '101 Switching Protocols'\nHTTP_SWITCHING_PROTOCOLS = HTTP_101\n\nHTTP_200 = '200 OK'\nHTTP_OK = HTTP_200\nHTTP_201 = '201 Created'\nHTTP_CREATED = HTTP_201\nHTTP_202 = '202 Accepted'\nHTTP_ACCEPTED = HTTP_202\nHTTP_203 = '203 Non-Authoritative Information'\nHTTP_NON_AUTHORITATIVE_INFORMATION = HTTP_203\nHTTP_204 = '204 No Content'\nHTTP_NO_CONTENT = HTTP_204\nHTTP_205 = '205 Reset Content'\nHTTP_RESET_CONTENT = HTTP_205\nHTTP_206 = '206 Partial Content'\nHTTP_PARTIAL_CONTENT = HTTP_206\nHTTP_226 = '226 IM Used'\nHTTP_IM_USED = HTTP_226\n\nHTTP_300 = '300 Multiple Choices'\nHTTP_MULTIPLE_CHOICES = HTTP_300\nHTTP_301 = '301 Moved Permanently'\nHTTP_MOVED_PERMANENTLY = HTTP_301\nHTTP_302 = '302 Found'\nHTTP_FOUND = HTTP_302\nHTTP_303 = '303 See Other'\nHTTP_SEE_OTHER = HTTP_303\nHTTP_304 = '304 Not Modified'\nHTTP_NOT_MODIFIED = HTTP_304\nHTTP_305 = '305 Use Proxy'\nHTTP_USE_PROXY = HTTP_305\nHTTP_307 = '307 Temporary Redirect'\nHTTP_TEMPORARY_REDIRECT = HTTP_307\n\nHTTP_400 = '400 Bad Request'\nHTTP_BAD_REQUEST = HTTP_400\nHTTP_401 = '401 Unauthorized' # <-- Really means \"unauthenticated\"\nHTTP_UNAUTHORIZED = HTTP_401\nHTTP_402 = '402 Payment Required'\nHTTP_PAYMENT_REQUIRED = HTTP_402\nHTTP_403 = '403 Forbidden' # <-- Really means \"unauthorized\"\nHTTP_FORBIDDEN = HTTP_403\nHTTP_404 = '404 Not Found'\nHTTP_NOT_FOUND = HTTP_404\nHTTP_405 = '405 Method Not Allowed'\nHTTP_METHOD_NOT_ALLOWED = HTTP_405\nHTTP_406 = '406 Not Acceptable'\nHTTP_NOT_ACCEPTABLE = HTTP_406\nHTTP_407 = '407 Proxy Authentication Required'\nHTTP_PROXY_AUTHENTICATION_REQUIRED = HTTP_407\nHTTP_408 = '408 Request Time-out'\nHTTP_REQUEST_TIMEOUT = HTTP_408\nHTTP_409 = '409 Conflict'\nHTTP_CONFLICT = HTTP_409\nHTTP_410 = '410 Gone'\nHTTP_GONE = HTTP_410\nHTTP_411 = '411 Length Required'\nHTTP_LENGTH_REQUIRED = HTTP_411\nHTTP_412 = '412 Precondition Failed'\nHTTP_PRECONDITION_FAILED = HTTP_412\nHTTP_413 = '413 Payload Too Large'\nHTTP_REQUEST_ENTITY_TOO_LARGE = HTTP_413\nHTTP_414 = '414 URI Too Long'\nHTTP_REQUEST_URI_TOO_LONG = HTTP_414\nHTTP_415 = '415 Unsupported Media Type'\nHTTP_UNSUPPORTED_MEDIA_TYPE = HTTP_415\nHTTP_416 = '416 Range Not Satisfiable'\nHTTP_REQUESTED_RANGE_NOT_SATISFIABLE = HTTP_416\nHTTP_417 = '417 Expectation Failed'\nHTTP_EXPECTATION_FAILED = HTTP_417\nHTTP_418 = \"418 I'm a teapot\"\nHTTP_IM_A_TEAPOT = HTTP_418\nHTTP_426 = '426 Upgrade Required'\nHTTP_UPGRADE_REQUIRED = HTTP_426\n\nHTTP_500 = '500 Internal Server Error'\nHTTP_INTERNAL_SERVER_ERROR = HTTP_500\nHTTP_501 = '501 Not Implemented'\nHTTP_NOT_IMPLEMENTED = HTTP_501\nHTTP_502 = '502 Bad Gateway'\nHTTP_BAD_GATEWAY = HTTP_502\nHTTP_503 = '503 Service Unavailable'\nHTTP_SERVICE_UNAVAILABLE = HTTP_503\nHTTP_504 = '504 Gateway Time-out'\nHTTP_GATEWAY_TIMEOUT = HTTP_504\nHTTP_505 = '505 HTTP Version not supported'\nHTTP_HTTP_VERSION_NOT_SUPPORTED = HTTP_505\n\n# 70X - Inexcusable\nHTTP_701 = '701 Meh'\nHTTP_702 = '702 Emacs'\nHTTP_703 = '703 Explosion'\n\n# 71X - Novelty Implementations\nHTTP_710 = '710 PHP'\nHTTP_711 = '711 Convenience Store'\nHTTP_712 = '712 NoSQL'\nHTTP_719 = '719 I am not a teapot'\n\n# 72X - Edge Cases\nHTTP_720 = '720 Unpossible'\nHTTP_721 = '721 Known Unknowns'\nHTTP_722 = '722 Unknown Unknowns'\nHTTP_723 = '723 Tricky'\nHTTP_724 = '724 This line should be unreachable'\nHTTP_725 = '725 It works on my machine'\nHTTP_726 = \"726 It's a feature, not a bug\"\nHTTP_727 = '727 32 bits is plenty'\n\n# 74X - Meme Driven\nHTTP_740 = '740 Computer says no'\nHTTP_741 = '741 Compiling'\nHTTP_742 = '742 A kitten dies'\nHTTP_743 = '743 I thought I knew regular expressions'\nHTTP_744 = '744 Y U NO write integration tests?'\nHTTP_745 = (\"745 I don't always test my code, but when I do\"\n \"I do it in production\")\nHTTP_748 = '748 Confounded by Ponies'\nHTTP_749 = '749 Reserved for Chuck Norris'\n\n# 75X - Syntax Errors\nHTTP_750 = \"750 Didn't bother to compile it\"\nHTTP_753 = '753 Syntax Error'\nHTTP_754 = '754 Too many semi-colons'\nHTTP_755 = '755 Not enough semi-colons'\nHTTP_759 = '759 Unexpected T_PAAMAYIM_NEKUDOTAYIM'\n\n# 77X - Predictable Problems\nHTTP_771 = '771 Cached for too long'\nHTTP_772 = '772 Not cached long enough'\nHTTP_773 = '773 Not cached at all'\nHTTP_774 = '774 Why was this cached?'\nHTTP_776 = '776 Error on the Exception'\nHTTP_777 = '777 Coincidence'\nHTTP_778 = '778 Off By One Error'\nHTTP_779 = '779 Off By Too Many To Count Error'\n\n# 78X - Somebody Else's Problem\nHTTP_780 = '780 Project owner not responding'\nHTTP_781 = '781 Operations'\nHTTP_782 = '782 QA'\nHTTP_783 = '783 It was a customer request, honestly'\nHTTP_784 = '784 Management, obviously'\nHTTP_785 = '785 TPS Cover Sheet not attached'\nHTTP_786 = '786 Try it now'\n\n# 79X - Internet crashed\nHTTP_791 = '791 The Internet shut down due to copyright restrictions'\nHTTP_792 = '792 Climate change driven catastrophic weather event'\nHTTP_797 = '797 This is the last page of the Internet. Go back'\nHTTP_799 = '799 End of the world'\n", "path": "falcon/status_codes.py"}], "after_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nHTTP_100 = '100 Continue'\nHTTP_CONTINUE = HTTP_100\nHTTP_101 = '101 Switching Protocols'\nHTTP_SWITCHING_PROTOCOLS = HTTP_101\n\nHTTP_200 = '200 OK'\nHTTP_OK = HTTP_200\nHTTP_201 = '201 Created'\nHTTP_CREATED = HTTP_201\nHTTP_202 = '202 Accepted'\nHTTP_ACCEPTED = HTTP_202\nHTTP_203 = '203 Non-Authoritative Information'\nHTTP_NON_AUTHORITATIVE_INFORMATION = HTTP_203\nHTTP_204 = '204 No Content'\nHTTP_NO_CONTENT = HTTP_204\nHTTP_205 = '205 Reset Content'\nHTTP_RESET_CONTENT = HTTP_205\nHTTP_206 = '206 Partial Content'\nHTTP_PARTIAL_CONTENT = HTTP_206\nHTTP_226 = '226 IM Used'\nHTTP_IM_USED = HTTP_226\n\nHTTP_300 = '300 Multiple Choices'\nHTTP_MULTIPLE_CHOICES = HTTP_300\nHTTP_301 = '301 Moved Permanently'\nHTTP_MOVED_PERMANENTLY = HTTP_301\nHTTP_302 = '302 Found'\nHTTP_FOUND = HTTP_302\nHTTP_303 = '303 See Other'\nHTTP_SEE_OTHER = HTTP_303\nHTTP_304 = '304 Not Modified'\nHTTP_NOT_MODIFIED = HTTP_304\nHTTP_305 = '305 Use Proxy'\nHTTP_USE_PROXY = HTTP_305\nHTTP_307 = '307 Temporary Redirect'\nHTTP_TEMPORARY_REDIRECT = HTTP_307\n\nHTTP_400 = '400 Bad Request'\nHTTP_BAD_REQUEST = HTTP_400\nHTTP_401 = '401 Unauthorized' # <-- Really means \"unauthenticated\"\nHTTP_UNAUTHORIZED = HTTP_401\nHTTP_402 = '402 Payment Required'\nHTTP_PAYMENT_REQUIRED = HTTP_402\nHTTP_403 = '403 Forbidden' # <-- Really means \"unauthorized\"\nHTTP_FORBIDDEN = HTTP_403\nHTTP_404 = '404 Not Found'\nHTTP_NOT_FOUND = HTTP_404\nHTTP_405 = '405 Method Not Allowed'\nHTTP_METHOD_NOT_ALLOWED = HTTP_405\nHTTP_406 = '406 Not Acceptable'\nHTTP_NOT_ACCEPTABLE = HTTP_406\nHTTP_407 = '407 Proxy Authentication Required'\nHTTP_PROXY_AUTHENTICATION_REQUIRED = HTTP_407\nHTTP_408 = '408 Request Time-out'\nHTTP_REQUEST_TIMEOUT = HTTP_408\nHTTP_409 = '409 Conflict'\nHTTP_CONFLICT = HTTP_409\nHTTP_410 = '410 Gone'\nHTTP_GONE = HTTP_410\nHTTP_411 = '411 Length Required'\nHTTP_LENGTH_REQUIRED = HTTP_411\nHTTP_412 = '412 Precondition Failed'\nHTTP_PRECONDITION_FAILED = HTTP_412\nHTTP_413 = '413 Payload Too Large'\nHTTP_REQUEST_ENTITY_TOO_LARGE = HTTP_413\nHTTP_414 = '414 URI Too Long'\nHTTP_REQUEST_URI_TOO_LONG = HTTP_414\nHTTP_415 = '415 Unsupported Media Type'\nHTTP_UNSUPPORTED_MEDIA_TYPE = HTTP_415\nHTTP_416 = '416 Range Not Satisfiable'\nHTTP_REQUESTED_RANGE_NOT_SATISFIABLE = HTTP_416\nHTTP_417 = '417 Expectation Failed'\nHTTP_EXPECTATION_FAILED = HTTP_417\nHTTP_418 = \"418 I'm a teapot\"\nHTTP_IM_A_TEAPOT = HTTP_418\nHTTP_426 = '426 Upgrade Required'\nHTTP_UPGRADE_REQUIRED = HTTP_426\nHTTP_428 = '428 Precondition Required'\nHTTP_PRECONDITION_REQUIRED = HTTP_428\nHTTP_429 = '429 Too Many Requests'\nHTTP_TOO_MANY_REQUESTS = HTTP_429\nHTTP_431 = '431 Request Header Fields Too Large'\nHTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = HTTP_431\n\nHTTP_500 = '500 Internal Server Error'\nHTTP_INTERNAL_SERVER_ERROR = HTTP_500\nHTTP_501 = '501 Not Implemented'\nHTTP_NOT_IMPLEMENTED = HTTP_501\nHTTP_502 = '502 Bad Gateway'\nHTTP_BAD_GATEWAY = HTTP_502\nHTTP_503 = '503 Service Unavailable'\nHTTP_SERVICE_UNAVAILABLE = HTTP_503\nHTTP_504 = '504 Gateway Time-out'\nHTTP_GATEWAY_TIMEOUT = HTTP_504\nHTTP_505 = '505 HTTP Version not supported'\nHTTP_HTTP_VERSION_NOT_SUPPORTED = HTTP_505\nHTTP_511 = '511 Network Authentication Required'\nHTTP_NETWORK_AUTHENTICATION_REQUIRED = HTTP_511\n\n# 70X - Inexcusable\nHTTP_701 = '701 Meh'\nHTTP_702 = '702 Emacs'\nHTTP_703 = '703 Explosion'\n\n# 71X - Novelty Implementations\nHTTP_710 = '710 PHP'\nHTTP_711 = '711 Convenience Store'\nHTTP_712 = '712 NoSQL'\nHTTP_719 = '719 I am not a teapot'\n\n# 72X - Edge Cases\nHTTP_720 = '720 Unpossible'\nHTTP_721 = '721 Known Unknowns'\nHTTP_722 = '722 Unknown Unknowns'\nHTTP_723 = '723 Tricky'\nHTTP_724 = '724 This line should be unreachable'\nHTTP_725 = '725 It works on my machine'\nHTTP_726 = \"726 It's a feature, not a bug\"\nHTTP_727 = '727 32 bits is plenty'\n\n# 74X - Meme Driven\nHTTP_740 = '740 Computer says no'\nHTTP_741 = '741 Compiling'\nHTTP_742 = '742 A kitten dies'\nHTTP_743 = '743 I thought I knew regular expressions'\nHTTP_744 = '744 Y U NO write integration tests?'\nHTTP_745 = (\"745 I don't always test my code, but when I do\"\n \"I do it in production\")\nHTTP_748 = '748 Confounded by Ponies'\nHTTP_749 = '749 Reserved for Chuck Norris'\n\n# 75X - Syntax Errors\nHTTP_750 = \"750 Didn't bother to compile it\"\nHTTP_753 = '753 Syntax Error'\nHTTP_754 = '754 Too many semi-colons'\nHTTP_755 = '755 Not enough semi-colons'\nHTTP_759 = '759 Unexpected T_PAAMAYIM_NEKUDOTAYIM'\n\n# 77X - Predictable Problems\nHTTP_771 = '771 Cached for too long'\nHTTP_772 = '772 Not cached long enough'\nHTTP_773 = '773 Not cached at all'\nHTTP_774 = '774 Why was this cached?'\nHTTP_776 = '776 Error on the Exception'\nHTTP_777 = '777 Coincidence'\nHTTP_778 = '778 Off By One Error'\nHTTP_779 = '779 Off By Too Many To Count Error'\n\n# 78X - Somebody Else's Problem\nHTTP_780 = '780 Project owner not responding'\nHTTP_781 = '781 Operations'\nHTTP_782 = '782 QA'\nHTTP_783 = '783 It was a customer request, honestly'\nHTTP_784 = '784 Management, obviously'\nHTTP_785 = '785 TPS Cover Sheet not attached'\nHTTP_786 = '786 Try it now'\n\n# 79X - Internet crashed\nHTTP_791 = '791 The Internet shut down due to copyright restrictions'\nHTTP_792 = '792 Climate change driven catastrophic weather event'\nHTTP_797 = '797 This is the last page of the Internet. Go back'\nHTTP_799 = '799 End of the world'\n", "path": "falcon/status_codes.py"}]} | 2,729 | 295 |
gh_patches_debug_17285 | rasdani/github-patches | git_diff | weecology__retriever-427 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add list of all available datasets to the documentation
It would be nice to have a page of the documentation that is an auto-generated list of all of the currently available datasets. Each item would have the text from the `name` attribute and be linked to the `url` or `ref` attribute if available.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 import sys, os
2 from retriever import VERSION,COPYRIGHT
3 needs_sphinx = '1.3'
4
5 # Add any Sphinx extension module names here, as strings.
6 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
7
8 # Add any paths that contain templates here, relative to this directory.
9 templates_path = ['_templates']
10
11 # The suffix of source filenames.
12 source_suffix = '.rst'
13
14 # The encoding of source files.
15 # source_encoding = 'utf-8-sig'
16
17 # The master toctree document.
18 master_doc = 'index'
19
20 # General information about the project.
21 project = u'EcoData Retriever'
22 copyright = COPYRIGHT
23
24 version = release = VERSION
25
26 # List of patterns, relative to source directory, that match files and
27 # directories to ignore when looking for source files.
28 exclude_patterns = []
29
30 # The reST default role (used for this markup: `text`) to use for all documents.
31 #default_role = None
32
33 # If true, '()' will be appended to :func: etc. cross-reference text.
34 #add_function_parentheses = True
35
36 # If true, the current module name will be prepended to all description
37 # unit titles (such as .. function::).
38 #add_module_names = True
39
40 # If true, sectionauthor and moduleauthor directives will be shown in the
41 # output. They are ignored by default.
42 #show_authors = False
43
44 # The name of the Pygments (syntax highlighting) style to use.
45 pygments_style = 'sphinx'
46
47 # A list of ignored prefixes for module index sorting.
48 #modindex_common_prefix = []
49
50
51 # -- Options for HTML output ---------------------------------------------------
52
53 # The theme to use for HTML and HTML Help pages. See the documentation for
54 # a list of builtin themes.
55 html_theme = 'classic'
56
57 # Theme options are theme-specific and customize the look and feel of a theme
58 # further. For a list of options available for each theme, see the
59 # documentation.
60 #html_theme_options = {}
61
62 # Add any paths that contain custom themes here, relative to this directory.
63 #html_theme_path = []
64
65 # The name for this set of Sphinx documents. If None, it defaults to
66 # "<project> v<release> documentation".
67 #html_title = None
68
69 # A shorter title for the navigation bar. Default is the same as html_title.
70 #html_short_title = None
71
72 # The name of an image file (relative to this directory) to place at the top
73 # of the sidebar.
74 #html_logo = None
75
76 # The name of an image file (within the static path) to use as favicon of the
77 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
78 # pixels large.
79 #html_favicon = None
80
81 # Add any paths that contain custom static files (such as style sheets) here,
82 # relative to this directory. They are copied after the builtin static files,
83 # so a file named "default.css" will overwrite the builtin "default.css".
84
85
86 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
87 # using the given strftime format.
88 #html_last_updated_fmt = '%b %d, %Y'
89
90 # If true, SmartyPants will be used to convert quotes and dashes to
91 # typographically correct entities.
92 #html_use_smartypants = True
93
94 # Custom sidebar templates, maps document names to template names.
95 #html_sidebars = {}
96
97 # Additional templates that should be rendered to pages, maps page names to
98 # template names.
99 #html_additional_pages = {}
100
101 # If false, no module index is generated.
102 #html_domain_indices = True
103
104 # If false, no index is generated.
105 #html_use_index = True
106
107 # If true, the index is split into individual pages for each letter.
108 #html_split_index = False
109
110 # If true, links to the reST sources are added to the pages.
111 #html_show_sourcelink = True
112
113 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
114 #html_show_sphinx = True
115
116 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
117 #html_show_copyright = True
118
119 # If true, an OpenSearch description file will be output, and all pages will
120 # contain a <link> tag referring to it. The value of this option must be the
121 # base URL from which the finished HTML is served.
122 #html_use_opensearch = ''
123
124 # This is the file name suffix for HTML files (e.g. ".xhtml").
125 #html_file_suffix = None
126
127 # Output file base name for HTML help builder.
128
129
130
131 # -- Options for LaTeX output --------------------------------------------------
132
133 latex_elements = {
134 # The paper size ('letterpaper' or 'a4paper').
135 #'papersize': 'letterpaper',
136
137 # The font size ('10pt', '11pt' or '12pt').
138 #'pointsize': '10pt',
139
140 # Additional stuff for the LaTeX preamble.
141 #'preamble': '',
142 }
143
144 # Grouping the document tree into LaTeX files. List of tuples
145 # (source start file, target name, title, author, documentclass [howto/manual]).
146
147 # The name of an image file (relative to this directory) to place at the top of
148 # the title page.
149 #latex_logo = None
150
151 # For "manual" documents, if this is true, then toplevel headings are parts,
152 # not chapters.
153 #latex_use_parts = False
154
155 # If true, show page references after internal links.
156 #latex_show_pagerefs = False
157
158 # If true, show URL addresses after external links.
159 #latex_show_urls = False
160
161 # Documents to append as an appendix to all manuals.
162 #latex_appendices = []
163
164 # If false, no module index is generated.
165 #latex_domain_indices = True
166
167
168 # -- Options for manual page output --------------------------------------------
169
170 # One entry per manual page. List of tuples
171
172 # If true, show URL addresses after external links.
173 #man_show_urls = False
174
175
176 # -- Options for Texinfo output ------------------------------------------------
177
178 # Grouping the document tree into Texinfo files. List of tuples
179 # (source start file, target name, title, author,
180 # dir menu entry, description, category)
181
182
183 # Documents to append as an appendix to all manuals.
184 #texinfo_appendices = []
185
186 # If false, no module index is generated.
187 #texinfo_domain_indices = True
188
189 # How to display URL addresses: 'footnote', 'no', or 'inline'.
190 #texinfo_show_urls = 'footnote'
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,5 +1,33 @@
-import sys, os
from retriever import VERSION,COPYRIGHT
+from retriever.lib.repository import check_for_updates
+from retriever import SCRIPT_LIST
+
+# Create the .rst file for the available datasets
+datasetfile = open("datasets.rst", "w")
+datasetfile_title = """
+==================
+Datasets Available
+==================
+
+
+"""
+check_for_updates(graphical=False)
+script_list = SCRIPT_LIST()
+
+# write the title of dataset rst file
+datasetfile.write(datasetfile_title)
+
+# get info from the scripts
+for script_num, script in enumerate(script_list, start=1):
+ if script.ref.strip():
+ reference_link = script.ref
+ elif bool(script.urls.values()):
+ reference_link = script.urls.values()[0].rpartition('/')[0]
+ else:
+ reference_link = ""
+ datasetfile.write("| " + str(script_num) + ". **{}** \n| shortname: {}\n| reference: {}\n\n".format(script.name, script.shortname, reference_link))
+datasetfile.close()
+
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings.
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -1,5 +1,33 @@\n-import sys, os\n from retriever import VERSION,COPYRIGHT\n+from retriever.lib.repository import check_for_updates\n+from retriever import SCRIPT_LIST\n+\n+# Create the .rst file for the available datasets\n+datasetfile = open(\"datasets.rst\", \"w\")\n+datasetfile_title = \"\"\"\n+==================\n+Datasets Available\n+==================\n+\n+\n+\"\"\"\n+check_for_updates(graphical=False)\n+script_list = SCRIPT_LIST()\n+\n+# write the title of dataset rst file\n+datasetfile.write(datasetfile_title)\n+\n+# get info from the scripts\n+for script_num, script in enumerate(script_list, start=1):\n+ if script.ref.strip():\n+ reference_link = script.ref\n+ elif bool(script.urls.values()):\n+ reference_link = script.urls.values()[0].rpartition('/')[0]\n+ else:\n+ reference_link = \"\"\n+ datasetfile.write(\"| \" + str(script_num) + \". **{}** \\n| shortname: {}\\n| reference: {}\\n\\n\".format(script.name, script.shortname, reference_link))\n+datasetfile.close()\n+\n needs_sphinx = '1.3'\n \n # Add any Sphinx extension module names here, as strings.\n", "issue": "Add list of all available datasets to the documentation\nIt would be nice to have a page of the documentation that is an auto-generated list of all of the currently available datasets. Each item would have the text from the `name` attribute and be linked to the `url` or `ref` attribute if available.\n\n", "before_files": [{"content": "import sys, os\nfrom retriever import VERSION,COPYRIGHT\nneeds_sphinx = '1.3'\n\n# Add any Sphinx extension module names here, as strings.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'EcoData Retriever'\ncopyright = COPYRIGHT\n\nversion = release = VERSION\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'classic'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n \n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\n \n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\n \n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n \n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\n \n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n", "path": "docs/conf.py"}], "after_files": [{"content": "from retriever import VERSION,COPYRIGHT\nfrom retriever.lib.repository import check_for_updates\nfrom retriever import SCRIPT_LIST\n\n# Create the .rst file for the available datasets\ndatasetfile = open(\"datasets.rst\", \"w\")\ndatasetfile_title = \"\"\"\n==================\nDatasets Available\n==================\n\n\n\"\"\"\ncheck_for_updates(graphical=False)\nscript_list = SCRIPT_LIST()\n\n# write the title of dataset rst file\ndatasetfile.write(datasetfile_title)\n\n# get info from the scripts\nfor script_num, script in enumerate(script_list, start=1):\n if script.ref.strip():\n reference_link = script.ref\n elif bool(script.urls.values()):\n reference_link = script.urls.values()[0].rpartition('/')[0]\n else:\n reference_link = \"\"\n datasetfile.write(\"| \" + str(script_num) + \". **{}** \\n| shortname: {}\\n| reference: {}\\n\\n\".format(script.name, script.shortname, reference_link))\ndatasetfile.close()\n\nneeds_sphinx = '1.3'\n\n# Add any Sphinx extension module names here, as strings.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'EcoData Retriever'\ncopyright = COPYRIGHT\n\nversion = release = VERSION\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'classic'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n \n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\n \n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\n \n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n \n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\n \n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n", "path": "docs/conf.py"}]} | 2,221 | 290 |
gh_patches_debug_30250 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-231 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Alarm skill doesn't speak the date naturally
What I mean by this is if setting an alarm for 5 minutes, it reads the entire date (ie. "set an alarm for Thrusday, June 16th, at 04:05 pm") rather than simply saying the same thing in simpler terms (ie. "set an alarm for 5 minues"). And if the date is a day ahead I think it would be best that it says "Tomorrow at 4:05pm".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/skills/scheduled_skills.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import abc
20 from datetime import datetime
21 from threading import Timer, Lock
22 from time import mktime
23
24 import parsedatetime as pdt
25
26 from adapt.intent import IntentBuilder
27 from mycroft.skills import time_rules
28 from mycroft.skills.core import MycroftSkill
29
30 __author__ = 'jdorleans'
31
32
33 class ScheduledSkill(MycroftSkill):
34 """
35 Abstract class which provides a repeatable notification behaviour at a
36 specified time.
37
38 Skills implementation inherits this class when it needs to schedule a task
39 or a notification.
40 """
41
42 DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())
43
44 def __init__(self, name, emitter=None):
45 super(ScheduledSkill, self).__init__(name, emitter)
46 self.timer = None
47 self.calendar = pdt.Calendar()
48 self.time_rules = time_rules.create(self.lang)
49
50 def schedule(self):
51 times = sorted(self.get_times())
52
53 if len(times) > 0:
54 self.cancel()
55 t = times[0]
56 now = self.get_utc_time()
57 delay = max(float(t) - now, 1)
58 self.timer = Timer(delay, self.notify, [t])
59 self.start()
60
61 def start(self):
62 if self.timer:
63 self.timer.start()
64
65 def cancel(self):
66 if self.timer:
67 self.timer.cancel()
68
69 def convert_local(self, utc_time):
70 return utc_time + self.DELTA_TIME
71
72 def get_utc_time(self, sentence=''):
73 return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME
74
75 def get_formatted_time(self, timestamp):
76 return datetime.fromtimestamp(timestamp).strftime(
77 self.config_core.get('time.format'))
78
79 @abc.abstractmethod
80 def get_times(self):
81 pass
82
83 @abc.abstractmethod
84 def notify(self, timestamp):
85 pass
86
87
88 class ScheduledCRUDSkill(ScheduledSkill):
89 """
90 Abstract CRUD class which provides a repeatable notification behaviour at
91 a specified time.
92
93 It registers CRUD intents and exposes its functions to manipulate a
94 provided ``data``
95
96 Skills implementation inherits this class when it needs to schedule a task
97 or a notification with a provided data
98 that can be manipulated by CRUD commands.
99
100 E.g. CRUD operations for a Reminder Skill
101 #. "Mycroft, list two reminders"
102 #. "Mycroft, list all reminders"
103 #. "Mycroft, delete one reminder"
104 #. "Mycroft, remind me to contribute to Mycroft project"
105 """
106
107 LOCK = Lock()
108 REPEAT_TASK = 'repeat'
109 PENDING_TASK = 'pending'
110 ONE_DAY_SECS = 86400
111
112 def __init__(self, name, emitter=None, basedir=None):
113 super(ScheduledCRUDSkill, self).__init__(name, emitter)
114 self.data = {}
115 self.repeat_data = {}
116 self.basedir = basedir
117
118 def initialize(self):
119 self.load_data()
120 self.load_repeat_data()
121 self.load_data_files(self.basedir)
122 self.register_regex("(?P<" + self.name + "Amount>\d+)")
123 self.register_intent(
124 self.build_intent_create().build(), self.handle_create)
125 self.register_intent(
126 self.build_intent_list().build(), self.handle_list)
127 self.register_intent(
128 self.build_intent_delete().build(), self.handle_delete)
129 self.schedule()
130
131 @abc.abstractmethod
132 def load_data(self):
133 pass
134
135 @abc.abstractmethod
136 def load_repeat_data(self):
137 pass
138
139 def build_intent_create(self):
140 return IntentBuilder(
141 self.name + 'CreateIntent').require(self.name + 'CreateVerb')
142
143 def build_intent_list(self):
144 return IntentBuilder(
145 self.name + 'ListIntent').require(self.name + 'ListVerb') \
146 .optionally(self.name + 'Amount').require(self.name + 'Keyword')
147
148 def build_intent_delete(self):
149 return IntentBuilder(
150 self.name + 'DeleteIntent').require(self.name + 'DeleteVerb') \
151 .optionally(self.name + 'Amount').require(self.name + 'Keyword')
152
153 def get_times(self):
154 return self.data.keys()
155
156 def handle_create(self, message):
157 utterance = message.metadata.get('utterance')
158 date = self.get_utc_time(utterance)
159 delay = date - self.get_utc_time()
160
161 if delay > 0:
162 self.feedback_create(date)
163 self.add_sync(date, message)
164 self.save_sync()
165 else:
166 self.speak_dialog('schedule.datetime.error')
167
168 def feedback_create(self, utc_time):
169 self.speak_dialog(
170 'schedule.create', data=self.build_feedback_payload(utc_time))
171
172 def add_sync(self, utc_time, message):
173 with self.LOCK:
174 self.add(utc_time, message)
175
176 def add(self, utc_time, message):
177 utterance = message.metadata.get('utterance')
178 self.data[utc_time] = None
179 self.repeat_data[utc_time] = self.time_rules.get_week_days(utterance)
180
181 def remove_sync(self, utc_time, add_next=True):
182 with self.LOCK:
183 val = self.remove(utc_time, add_next)
184 return val
185
186 def remove(self, utc_time, add_next=True):
187 value = self.data.pop(utc_time)
188 self.add_next_time(utc_time, value, add_next)
189 return value
190
191 def add_next_time(self, utc_time, value, add_next=True):
192 days = self.repeat_data.pop(utc_time)
193 if add_next and days:
194 now_time = self.get_utc_time()
195 next_time = utc_time + self.ONE_DAY_SECS
196 now_day = datetime.fromtimestamp(utc_time).weekday()
197 next_day = datetime.fromtimestamp(next_time).weekday()
198 while next_day != now_day:
199 if days[next_day] and next_time >= now_time:
200 self.data[next_time] = value
201 self.repeat_data[next_time] = days
202 break
203 next_time += self.ONE_DAY_SECS
204 next_day = datetime.fromtimestamp(next_time).weekday()
205
206 def save_sync(self):
207 with self.LOCK:
208 self.save()
209
210 @abc.abstractmethod
211 def save(self):
212 pass
213
214 def handle_list(self, message):
215 count = self.get_amount(message)
216 if count > 0:
217 for key in sorted(self.data.keys()):
218 if count > 0:
219 self.feedback_list(key)
220 count -= 1
221 else:
222 break
223 else:
224 self.speak_dialog('schedule.list.empty')
225
226 def feedback_list(self, utc_time):
227 self.speak_dialog(
228 'schedule.list', data=self.build_feedback_payload(utc_time))
229
230 def build_feedback_payload(self, utc_time):
231 timestamp = self.convert_local(float(utc_time))
232 payload = {
233 'data': self.data.get(utc_time),
234 'datetime': self.get_formatted_time(timestamp)
235 }
236 return payload
237
238 def handle_delete(self, message):
239 count = self.get_amount(message)
240 if count > 0:
241 amount = count
242 for key in sorted(self.data.keys()):
243 if count > 0:
244 self.remove_sync(key, False)
245 count -= 1
246 else:
247 break
248 self.feedback_delete(amount)
249 self.save_sync()
250 else:
251 self.speak_dialog('schedule.delete.empty')
252
253 def feedback_delete(self, amount):
254 if amount > 1:
255 self.speak_dialog('schedule.delete.many', data={'amount': amount})
256 else:
257 self.speak_dialog(
258 'schedule.delete.single', data={'amount': amount})
259
260 # TODO - Localization
261 def get_amount(self, message, default=None):
262 size = len(self.data)
263 amount = message.metadata.get(self.name + 'Amount', default)
264 if amount in ['all', 'my', 'all my', None]:
265 total = size
266 elif amount in ['one', 'the next', 'the following']:
267 total = 1
268 elif amount == 'two':
269 total = 2
270 else:
271 total = int(amount)
272 return min(total, size)
273
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mycroft/skills/scheduled_skills.py b/mycroft/skills/scheduled_skills.py
--- a/mycroft/skills/scheduled_skills.py
+++ b/mycroft/skills/scheduled_skills.py
@@ -26,9 +26,12 @@
from adapt.intent import IntentBuilder
from mycroft.skills import time_rules
from mycroft.skills.core import MycroftSkill
+from mycroft.util.log import getLogger
__author__ = 'jdorleans'
+logger = getLogger(__name__)
+
class ScheduledSkill(MycroftSkill):
"""
@@ -40,6 +43,9 @@
"""
DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())
+ SECONDS_PER_DAY = 86400
+ SECONDS_PER_HOUR = 3600
+ SECONDS_PER_MINUTE = 60
def __init__(self, name, emitter=None):
super(ScheduledSkill, self).__init__(name, emitter)
@@ -73,7 +79,19 @@
return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME
def get_formatted_time(self, timestamp):
- return datetime.fromtimestamp(timestamp).strftime(
+ date = datetime.fromtimestamp(timestamp)
+ now = datetime.now()
+ diff = (date - now).total_seconds()
+ if diff <= self.SECONDS_PER_DAY:
+ hours, remainder = divmod(diff, self.SECONDS_PER_HOUR)
+ minutes, seconds = divmod(remainder, self.SECONDS_PER_MINUTE)
+ if hours:
+ return "%s hours and %s minutes from now" % \
+ (int(hours), int(minutes))
+ else:
+ return "%s minutes and %s seconds from now" % \
+ (int(minutes), int(seconds))
+ return date.strftime(
self.config_core.get('time.format'))
@abc.abstractmethod
| {"golden_diff": "diff --git a/mycroft/skills/scheduled_skills.py b/mycroft/skills/scheduled_skills.py\n--- a/mycroft/skills/scheduled_skills.py\n+++ b/mycroft/skills/scheduled_skills.py\n@@ -26,9 +26,12 @@\n from adapt.intent import IntentBuilder\n from mycroft.skills import time_rules\n from mycroft.skills.core import MycroftSkill\n+from mycroft.util.log import getLogger\n \n __author__ = 'jdorleans'\n \n+logger = getLogger(__name__)\n+\n \n class ScheduledSkill(MycroftSkill):\n \"\"\"\n@@ -40,6 +43,9 @@\n \"\"\"\n \n DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())\n+ SECONDS_PER_DAY = 86400\n+ SECONDS_PER_HOUR = 3600\n+ SECONDS_PER_MINUTE = 60\n \n def __init__(self, name, emitter=None):\n super(ScheduledSkill, self).__init__(name, emitter)\n@@ -73,7 +79,19 @@\n return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME\n \n def get_formatted_time(self, timestamp):\n- return datetime.fromtimestamp(timestamp).strftime(\n+ date = datetime.fromtimestamp(timestamp)\n+ now = datetime.now()\n+ diff = (date - now).total_seconds()\n+ if diff <= self.SECONDS_PER_DAY:\n+ hours, remainder = divmod(diff, self.SECONDS_PER_HOUR)\n+ minutes, seconds = divmod(remainder, self.SECONDS_PER_MINUTE)\n+ if hours:\n+ return \"%s hours and %s minutes from now\" % \\\n+ (int(hours), int(minutes))\n+ else:\n+ return \"%s minutes and %s seconds from now\" % \\\n+ (int(minutes), int(seconds))\n+ return date.strftime(\n self.config_core.get('time.format'))\n \n @abc.abstractmethod\n", "issue": "Alarm skill doesn't speak the date naturally\nWhat I mean by this is if setting an alarm for 5 minutes, it reads the entire date (ie. \"set an alarm for Thrusday, June 16th, at 04:05 pm\") rather than simply saying the same thing in simpler terms (ie. \"set an alarm for 5 minues\"). And if the date is a day ahead I think it would be best that it says \"Tomorrow at 4:05pm\".\n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport abc\nfrom datetime import datetime\nfrom threading import Timer, Lock\nfrom time import mktime\n\nimport parsedatetime as pdt\n\nfrom adapt.intent import IntentBuilder\nfrom mycroft.skills import time_rules\nfrom mycroft.skills.core import MycroftSkill\n\n__author__ = 'jdorleans'\n\n\nclass ScheduledSkill(MycroftSkill):\n \"\"\"\n Abstract class which provides a repeatable notification behaviour at a\n specified time.\n\n Skills implementation inherits this class when it needs to schedule a task\n or a notification.\n \"\"\"\n\n DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())\n\n def __init__(self, name, emitter=None):\n super(ScheduledSkill, self).__init__(name, emitter)\n self.timer = None\n self.calendar = pdt.Calendar()\n self.time_rules = time_rules.create(self.lang)\n\n def schedule(self):\n times = sorted(self.get_times())\n\n if len(times) > 0:\n self.cancel()\n t = times[0]\n now = self.get_utc_time()\n delay = max(float(t) - now, 1)\n self.timer = Timer(delay, self.notify, [t])\n self.start()\n\n def start(self):\n if self.timer:\n self.timer.start()\n\n def cancel(self):\n if self.timer:\n self.timer.cancel()\n\n def convert_local(self, utc_time):\n return utc_time + self.DELTA_TIME\n\n def get_utc_time(self, sentence=''):\n return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME\n\n def get_formatted_time(self, timestamp):\n return datetime.fromtimestamp(timestamp).strftime(\n self.config_core.get('time.format'))\n\n @abc.abstractmethod\n def get_times(self):\n pass\n\n @abc.abstractmethod\n def notify(self, timestamp):\n pass\n\n\nclass ScheduledCRUDSkill(ScheduledSkill):\n \"\"\"\n Abstract CRUD class which provides a repeatable notification behaviour at\n a specified time.\n\n It registers CRUD intents and exposes its functions to manipulate a\n provided ``data``\n\n Skills implementation inherits this class when it needs to schedule a task\n or a notification with a provided data\n that can be manipulated by CRUD commands.\n\n E.g. CRUD operations for a Reminder Skill\n #. \"Mycroft, list two reminders\"\n #. \"Mycroft, list all reminders\"\n #. \"Mycroft, delete one reminder\"\n #. \"Mycroft, remind me to contribute to Mycroft project\"\n \"\"\"\n\n LOCK = Lock()\n REPEAT_TASK = 'repeat'\n PENDING_TASK = 'pending'\n ONE_DAY_SECS = 86400\n\n def __init__(self, name, emitter=None, basedir=None):\n super(ScheduledCRUDSkill, self).__init__(name, emitter)\n self.data = {}\n self.repeat_data = {}\n self.basedir = basedir\n\n def initialize(self):\n self.load_data()\n self.load_repeat_data()\n self.load_data_files(self.basedir)\n self.register_regex(\"(?P<\" + self.name + \"Amount>\\d+)\")\n self.register_intent(\n self.build_intent_create().build(), self.handle_create)\n self.register_intent(\n self.build_intent_list().build(), self.handle_list)\n self.register_intent(\n self.build_intent_delete().build(), self.handle_delete)\n self.schedule()\n\n @abc.abstractmethod\n def load_data(self):\n pass\n\n @abc.abstractmethod\n def load_repeat_data(self):\n pass\n\n def build_intent_create(self):\n return IntentBuilder(\n self.name + 'CreateIntent').require(self.name + 'CreateVerb')\n\n def build_intent_list(self):\n return IntentBuilder(\n self.name + 'ListIntent').require(self.name + 'ListVerb') \\\n .optionally(self.name + 'Amount').require(self.name + 'Keyword')\n\n def build_intent_delete(self):\n return IntentBuilder(\n self.name + 'DeleteIntent').require(self.name + 'DeleteVerb') \\\n .optionally(self.name + 'Amount').require(self.name + 'Keyword')\n\n def get_times(self):\n return self.data.keys()\n\n def handle_create(self, message):\n utterance = message.metadata.get('utterance')\n date = self.get_utc_time(utterance)\n delay = date - self.get_utc_time()\n\n if delay > 0:\n self.feedback_create(date)\n self.add_sync(date, message)\n self.save_sync()\n else:\n self.speak_dialog('schedule.datetime.error')\n\n def feedback_create(self, utc_time):\n self.speak_dialog(\n 'schedule.create', data=self.build_feedback_payload(utc_time))\n\n def add_sync(self, utc_time, message):\n with self.LOCK:\n self.add(utc_time, message)\n\n def add(self, utc_time, message):\n utterance = message.metadata.get('utterance')\n self.data[utc_time] = None\n self.repeat_data[utc_time] = self.time_rules.get_week_days(utterance)\n\n def remove_sync(self, utc_time, add_next=True):\n with self.LOCK:\n val = self.remove(utc_time, add_next)\n return val\n\n def remove(self, utc_time, add_next=True):\n value = self.data.pop(utc_time)\n self.add_next_time(utc_time, value, add_next)\n return value\n\n def add_next_time(self, utc_time, value, add_next=True):\n days = self.repeat_data.pop(utc_time)\n if add_next and days:\n now_time = self.get_utc_time()\n next_time = utc_time + self.ONE_DAY_SECS\n now_day = datetime.fromtimestamp(utc_time).weekday()\n next_day = datetime.fromtimestamp(next_time).weekday()\n while next_day != now_day:\n if days[next_day] and next_time >= now_time:\n self.data[next_time] = value\n self.repeat_data[next_time] = days\n break\n next_time += self.ONE_DAY_SECS\n next_day = datetime.fromtimestamp(next_time).weekday()\n\n def save_sync(self):\n with self.LOCK:\n self.save()\n\n @abc.abstractmethod\n def save(self):\n pass\n\n def handle_list(self, message):\n count = self.get_amount(message)\n if count > 0:\n for key in sorted(self.data.keys()):\n if count > 0:\n self.feedback_list(key)\n count -= 1\n else:\n break\n else:\n self.speak_dialog('schedule.list.empty')\n\n def feedback_list(self, utc_time):\n self.speak_dialog(\n 'schedule.list', data=self.build_feedback_payload(utc_time))\n\n def build_feedback_payload(self, utc_time):\n timestamp = self.convert_local(float(utc_time))\n payload = {\n 'data': self.data.get(utc_time),\n 'datetime': self.get_formatted_time(timestamp)\n }\n return payload\n\n def handle_delete(self, message):\n count = self.get_amount(message)\n if count > 0:\n amount = count\n for key in sorted(self.data.keys()):\n if count > 0:\n self.remove_sync(key, False)\n count -= 1\n else:\n break\n self.feedback_delete(amount)\n self.save_sync()\n else:\n self.speak_dialog('schedule.delete.empty')\n\n def feedback_delete(self, amount):\n if amount > 1:\n self.speak_dialog('schedule.delete.many', data={'amount': amount})\n else:\n self.speak_dialog(\n 'schedule.delete.single', data={'amount': amount})\n\n # TODO - Localization\n def get_amount(self, message, default=None):\n size = len(self.data)\n amount = message.metadata.get(self.name + 'Amount', default)\n if amount in ['all', 'my', 'all my', None]:\n total = size\n elif amount in ['one', 'the next', 'the following']:\n total = 1\n elif amount == 'two':\n total = 2\n else:\n total = int(amount)\n return min(total, size)\n", "path": "mycroft/skills/scheduled_skills.py"}], "after_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport abc\nfrom datetime import datetime\nfrom threading import Timer, Lock\nfrom time import mktime\n\nimport parsedatetime as pdt\n\nfrom adapt.intent import IntentBuilder\nfrom mycroft.skills import time_rules\nfrom mycroft.skills.core import MycroftSkill\nfrom mycroft.util.log import getLogger\n\n__author__ = 'jdorleans'\n\nlogger = getLogger(__name__)\n\n\nclass ScheduledSkill(MycroftSkill):\n \"\"\"\n Abstract class which provides a repeatable notification behaviour at a\n specified time.\n\n Skills implementation inherits this class when it needs to schedule a task\n or a notification.\n \"\"\"\n\n DELTA_TIME = int((datetime.now() - datetime.utcnow()).total_seconds())\n SECONDS_PER_DAY = 86400\n SECONDS_PER_HOUR = 3600\n SECONDS_PER_MINUTE = 60\n\n def __init__(self, name, emitter=None):\n super(ScheduledSkill, self).__init__(name, emitter)\n self.timer = None\n self.calendar = pdt.Calendar()\n self.time_rules = time_rules.create(self.lang)\n\n def schedule(self):\n times = sorted(self.get_times())\n\n if len(times) > 0:\n self.cancel()\n t = times[0]\n now = self.get_utc_time()\n delay = max(float(t) - now, 1)\n self.timer = Timer(delay, self.notify, [t])\n self.start()\n\n def start(self):\n if self.timer:\n self.timer.start()\n\n def cancel(self):\n if self.timer:\n self.timer.cancel()\n\n def convert_local(self, utc_time):\n return utc_time + self.DELTA_TIME\n\n def get_utc_time(self, sentence=''):\n return mktime(self.calendar.parse(sentence)[0]) - self.DELTA_TIME\n\n def get_formatted_time(self, timestamp):\n date = datetime.fromtimestamp(timestamp)\n now = datetime.now()\n diff = (date - now).total_seconds()\n if diff <= self.SECONDS_PER_DAY:\n hours, remainder = divmod(diff, self.SECONDS_PER_HOUR)\n minutes, seconds = divmod(remainder, self.SECONDS_PER_MINUTE)\n if hours:\n return \"%s hours and %s minutes from now\" % \\\n (int(hours), int(minutes))\n else:\n return \"%s minutes and %s seconds from now\" % \\\n (int(minutes), int(seconds))\n return date.strftime(\n self.config_core.get('time.format'))\n\n @abc.abstractmethod\n def get_times(self):\n pass\n\n @abc.abstractmethod\n def notify(self, timestamp):\n pass\n\n\nclass ScheduledCRUDSkill(ScheduledSkill):\n \"\"\"\n Abstract CRUD class which provides a repeatable notification behaviour at\n a specified time.\n\n It registers CRUD intents and exposes its functions to manipulate a\n provided ``data``\n\n Skills implementation inherits this class when it needs to schedule a task\n or a notification with a provided data\n that can be manipulated by CRUD commands.\n\n E.g. CRUD operations for a Reminder Skill\n #. \"Mycroft, list two reminders\"\n #. \"Mycroft, list all reminders\"\n #. \"Mycroft, delete one reminder\"\n #. \"Mycroft, remind me to contribute to Mycroft project\"\n \"\"\"\n\n LOCK = Lock()\n REPEAT_TASK = 'repeat'\n PENDING_TASK = 'pending'\n ONE_DAY_SECS = 86400\n\n def __init__(self, name, emitter=None, basedir=None):\n super(ScheduledCRUDSkill, self).__init__(name, emitter)\n self.data = {}\n self.repeat_data = {}\n self.basedir = basedir\n\n def initialize(self):\n self.load_data()\n self.load_repeat_data()\n self.load_data_files(self.basedir)\n self.register_regex(\"(?P<\" + self.name + \"Amount>\\d+)\")\n self.register_intent(\n self.build_intent_create().build(), self.handle_create)\n self.register_intent(\n self.build_intent_list().build(), self.handle_list)\n self.register_intent(\n self.build_intent_delete().build(), self.handle_delete)\n self.schedule()\n\n @abc.abstractmethod\n def load_data(self):\n pass\n\n @abc.abstractmethod\n def load_repeat_data(self):\n pass\n\n def build_intent_create(self):\n return IntentBuilder(\n self.name + 'CreateIntent').require(self.name + 'CreateVerb')\n\n def build_intent_list(self):\n return IntentBuilder(\n self.name + 'ListIntent').require(self.name + 'ListVerb') \\\n .optionally(self.name + 'Amount').require(self.name + 'Keyword')\n\n def build_intent_delete(self):\n return IntentBuilder(\n self.name + 'DeleteIntent').require(self.name + 'DeleteVerb') \\\n .optionally(self.name + 'Amount').require(self.name + 'Keyword')\n\n def get_times(self):\n return self.data.keys()\n\n def handle_create(self, message):\n utterance = message.metadata.get('utterance')\n date = self.get_utc_time(utterance)\n delay = date - self.get_utc_time()\n\n if delay > 0:\n self.feedback_create(date)\n self.add_sync(date, message)\n self.save_sync()\n else:\n self.speak_dialog('schedule.datetime.error')\n\n def feedback_create(self, utc_time):\n self.speak_dialog(\n 'schedule.create', data=self.build_feedback_payload(utc_time))\n\n def add_sync(self, utc_time, message):\n with self.LOCK:\n self.add(utc_time, message)\n\n def add(self, utc_time, message):\n utterance = message.metadata.get('utterance')\n self.data[utc_time] = None\n self.repeat_data[utc_time] = self.time_rules.get_week_days(utterance)\n\n def remove_sync(self, utc_time, add_next=True):\n with self.LOCK:\n val = self.remove(utc_time, add_next)\n return val\n\n def remove(self, utc_time, add_next=True):\n value = self.data.pop(utc_time)\n self.add_next_time(utc_time, value, add_next)\n return value\n\n def add_next_time(self, utc_time, value, add_next=True):\n days = self.repeat_data.pop(utc_time)\n if add_next and days:\n now_time = self.get_utc_time()\n next_time = utc_time + self.ONE_DAY_SECS\n now_day = datetime.fromtimestamp(utc_time).weekday()\n next_day = datetime.fromtimestamp(next_time).weekday()\n while next_day != now_day:\n if days[next_day] and next_time >= now_time:\n self.data[next_time] = value\n self.repeat_data[next_time] = days\n break\n next_time += self.ONE_DAY_SECS\n next_day = datetime.fromtimestamp(next_time).weekday()\n\n def save_sync(self):\n with self.LOCK:\n self.save()\n\n @abc.abstractmethod\n def save(self):\n pass\n\n def handle_list(self, message):\n count = self.get_amount(message)\n if count > 0:\n for key in sorted(self.data.keys()):\n if count > 0:\n self.feedback_list(key)\n count -= 1\n else:\n break\n else:\n self.speak_dialog('schedule.list.empty')\n\n def feedback_list(self, utc_time):\n self.speak_dialog(\n 'schedule.list', data=self.build_feedback_payload(utc_time))\n\n def build_feedback_payload(self, utc_time):\n timestamp = self.convert_local(float(utc_time))\n payload = {\n 'data': self.data.get(utc_time),\n 'datetime': self.get_formatted_time(timestamp)\n }\n return payload\n\n def handle_delete(self, message):\n count = self.get_amount(message)\n if count > 0:\n amount = count\n for key in sorted(self.data.keys()):\n if count > 0:\n self.remove_sync(key, False)\n count -= 1\n else:\n break\n self.feedback_delete(amount)\n self.save_sync()\n else:\n self.speak_dialog('schedule.delete.empty')\n\n def feedback_delete(self, amount):\n if amount > 1:\n self.speak_dialog('schedule.delete.many', data={'amount': amount})\n else:\n self.speak_dialog(\n 'schedule.delete.single', data={'amount': amount})\n\n # TODO - Localization\n def get_amount(self, message, default=None):\n size = len(self.data)\n amount = message.metadata.get(self.name + 'Amount', default)\n if amount in ['all', 'my', 'all my', None]:\n total = size\n elif amount in ['one', 'the next', 'the following']:\n total = 1\n elif amount == 'two':\n total = 2\n else:\n total = int(amount)\n return min(total, size)\n", "path": "mycroft/skills/scheduled_skills.py"}]} | 3,013 | 416 |
gh_patches_debug_33517 | rasdani/github-patches | git_diff | UTNkar__moore-65 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check registry on account creation
The registry is currently not checked for membership status when an account is created.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/members/cron.py`
Content:
```
1 import kronos
2 import requests
3 from django.conf import settings
4 from requests.auth import HTTPDigestAuth
5
6 from members.models import Member
7
8
9 @kronos.register('0 3 * * 1') # At 03:00 on Monday.
10 def update_membership_status():
11 r = requests.get(
12 'https://register.utn.se/api.php',
13 auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,
14 settings.MEMBERSHIP_API_PASSWORD),
15 params={
16 'action': 'list',
17 },
18 )
19 try:
20 data = r.json()
21 except ValueError:
22 return
23
24 for member in Member.objects.all():
25 if member.person_number().replace('-', '') in data:
26 member.update(data='member')
27 else:
28 member.update_status(data='nonmember')
29 member.save()
30
```
Path: `website/members/signals.py`
Content:
```
1 import datetime
2 from django.conf import settings
3 from django.core.mail import EmailMultiAlternatives
4 from django.db.models.signals import pre_save
5 from django.dispatch import receiver
6 from django.template import loader
7 from django.utils import timezone
8 from simple_email_confirmation import unconfirmed_email_created
9
10 from members.models import Member
11
12
13 @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')
14 def send_confirmation_email(sender, email, user=None, **kwargs):
15 user = user or sender
16 if user is not None:
17 context = {
18 'email': email,
19 'domain': settings.BASE_URL,
20 'site_name': settings.WAGTAIL_SITE_NAME,
21 'token': user.get_confirmation_key(email),
22 }
23
24 subject = loader.render_to_string(
25 'members/email_change_subject.txt', context)
26 # Email subject *must not* contain newlines
27 subject = ''.join(subject.splitlines())
28 body = loader.render_to_string('members/email_change_email.html',
29 context)
30
31 email_message = EmailMultiAlternatives(subject, body, None, [email])
32 email_message.send()
33
34
35 @receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')
36 def check_membership(sender, instance, **kwargs):
37 if timezone.now() - instance.status_changed > datetime.timedelta(1):
38 instance.update_status()
39
```
Path: `website/members/models.py`
Content:
```
1 import requests
2 from django.conf import settings
3 from django.contrib.auth.models import AbstractUser
4 from django.core import validators
5 from django.db import models
6 from django.utils import timezone
7 from django.utils.translation import ugettext_lazy as _
8 from requests.auth import HTTPDigestAuth
9 from simple_email_confirmation.models import SimpleEmailConfirmationUserMixin
10
11 from utils.translation import TranslatedField
12
13
14 class StudyProgram(models.Model):
15 """This class describes a university study program"""
16
17 class Meta:
18 verbose_name = _('study program')
19 verbose_name_plural = _('study programs')
20
21 DEGREE_CHOICES = (
22 ('bachelor', _('Bachelor\'s Degree')),
23 ('master', _('Master\'s Degree')),
24 ('engineer', _('Engineer\'s Degree')),
25 )
26
27 name_en = models.CharField(
28 max_length=255,
29 verbose_name=_('English program name'),
30 help_text=_('Enter the name of the study program'),
31 null=False,
32 blank=False,
33 )
34
35 name_sv = models.CharField(
36 max_length=255,
37 verbose_name=_('Swedish program name'),
38 help_text=_('Enter the name of the study program'),
39 null=False,
40 blank=False,
41 )
42
43 name = TranslatedField('name_en', 'name_sv')
44
45 abbreviation_en = models.CharField(
46 max_length=130,
47 verbose_name=_('English program abbreviation'),
48 help_text=_('Enter the abbreviation for the study program'),
49 null=True,
50 blank=True,
51 )
52
53 abbreviation_sv = models.CharField(
54 max_length=130,
55 verbose_name=_('Swedish program abbreviation'),
56 help_text=_('Enter the abbreviation for the study program'),
57 null=True,
58 blank=True,
59 )
60
61 abbreviation = TranslatedField('name_en', 'name_sv')
62
63 degree = models.CharField(
64 max_length=20,
65 choices=DEGREE_CHOICES,
66 verbose_name=_('Degree type'),
67 blank=False,
68 null=False,
69 )
70
71 def __str__(self) -> str:
72 return '%s in %s' % (self.get_degree_display(), self.name)
73
74
75 class Member(SimpleEmailConfirmationUserMixin, AbstractUser):
76 """This class describes a member"""
77
78 # ---- Personal information ------
79
80 birthday = models.DateField(
81 verbose_name=_('Birthday'),
82 null=True
83 )
84
85 person_number_ext = models.CharField(
86 max_length=4,
87 verbose_name=_('Person number extension'),
88 help_text=_('Enter the last four digits of your Swedish person '
89 'number, given by the Swedish tax authority'),
90 validators=[validators.RegexValidator(
91 regex=r'^\d{4}$',
92 message=_('The person number extension consists of four numbers'),
93 )],
94 unique_for_date="birthday",
95 null=True,
96 blank=True,
97 )
98
99 # ---- Membership information ------
100
101 MEMBERSHIP_CHOICES = (
102 ('unknown', _('Unknown')),
103 ('nonmember', _('Nonmember')),
104 ('member', _('Member')),
105 ('alumnus', _('Alumnus')),
106 )
107
108 status = models.CharField(
109 max_length=20,
110 choices=MEMBERSHIP_CHOICES,
111 verbose_name=_('Membership status'),
112 blank=False,
113 null=False,
114 default='unknown'
115 )
116 status_changed = models.DateTimeField(
117 default=timezone.now,
118 null=False,
119 )
120
121 # ---- Contact information ------
122
123 phone_number = models.CharField(
124 max_length=20,
125 verbose_name=_('Phone number'),
126 help_text=_('Enter a phone number so UTN may reach you'),
127 validators=[validators.RegexValidator(
128 regex=r'^\+?\d+$',
129 message=_('Please enter a valid phone number'),
130 )],
131 null=True,
132 blank=True,
133 )
134
135 # ---- University information ------
136
137 registration_year = models.CharField(
138 max_length=4,
139 verbose_name=_('Registration year'),
140 help_text=_('Enter the year you started studying at the TakNat '
141 'faculty'),
142 validators=[validators.RegexValidator(
143 regex=r'^\d{4}$',
144 message=_('Please enter a valid year')
145 )],
146 null=True,
147 blank=True,
148 )
149
150 study = models.ForeignKey(
151 StudyProgram,
152 on_delete=models.SET_NULL,
153 null=True,
154 blank=True,
155 )
156
157 def __str__(self) -> str:
158 if self.first_name and self.last_name:
159 return '%s %s' % (self.first_name, self.last_name)
160 else:
161 return self.username
162
163 def person_number(self) -> str:
164 if self.birthday is None or self.person_number_ext is None:
165 return ''
166 else:
167 return '%s-%s' % (self.birthday.strftime('%Y%m%d'),
168 self.person_number_ext)
169
170 def update_status(self, data=None):
171 if data is None:
172 r = requests.get(
173 'https://register.utn.se/api.php',
174 auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,
175 settings.MEMBERSHIP_API_PASSWORD),
176 params={
177 'action': 'check',
178 'person_number': self.person_number().replace('-', '')
179 },
180 )
181 try:
182 data = r.json().get('status')
183 except ValueError:
184 return
185
186 if data == 'member':
187 self.status = 'member'
188 elif data == 'nonmember':
189 if self.status in ['unknown', 'nonmember']:
190 self.status = 'nonmember'
191 else:
192 self.status = 'alumnus'
193
194 self.status_changed = timezone.now()
195
196 def remove_old_email(self):
197 for email in self.get_unconfirmed_emails() or []:
198 self.remove_email(email)
199 for email in self.get_confirmed_emails():
200 if email != self.email:
201 self.remove_email(email)
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/members/cron.py b/website/members/cron.py
--- a/website/members/cron.py
+++ b/website/members/cron.py
@@ -23,7 +23,9 @@
for member in Member.objects.all():
if member.person_number().replace('-', '') in data:
- member.update(data='member')
+ member.update_status(data='member')
else:
member.update_status(data='nonmember')
- member.save()
+ Member.objects.filter(pk=member.pk).update(
+ status=member.status, status_changed=member.status_changed
+ )
diff --git a/website/members/models.py b/website/members/models.py
--- a/website/members/models.py
+++ b/website/members/models.py
@@ -169,17 +169,21 @@
def update_status(self, data=None):
if data is None:
- r = requests.get(
- 'https://register.utn.se/api.php',
- auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,
- settings.MEMBERSHIP_API_PASSWORD),
- params={
- 'action': 'check',
- 'person_number': self.person_number().replace('-', '')
- },
- )
+ if self.person_number() == '':
+ return
try:
+ r = requests.get(
+ 'https://register.utn.se/api.php',
+ auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,
+ settings.MEMBERSHIP_API_PASSWORD),
+ params={
+ 'action': 'check',
+ 'person_number': self.person_number().replace('-', '')
+ },
+ )
data = r.json().get('status')
+ except requests.exceptions.ConnectionError:
+ data = 'unknown'
except ValueError:
return
diff --git a/website/members/signals.py b/website/members/signals.py
--- a/website/members/signals.py
+++ b/website/members/signals.py
@@ -34,5 +34,6 @@
@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')
def check_membership(sender, instance, **kwargs):
- if timezone.now() - instance.status_changed > datetime.timedelta(1):
+ if timezone.now() - instance.status_changed > datetime.timedelta(1)\
+ or instance.status == 'unknown':
instance.update_status()
| {"golden_diff": "diff --git a/website/members/cron.py b/website/members/cron.py\n--- a/website/members/cron.py\n+++ b/website/members/cron.py\n@@ -23,7 +23,9 @@\n \n for member in Member.objects.all():\n if member.person_number().replace('-', '') in data:\n- member.update(data='member')\n+ member.update_status(data='member')\n else:\n member.update_status(data='nonmember')\n- member.save()\n+ Member.objects.filter(pk=member.pk).update(\n+ status=member.status, status_changed=member.status_changed\n+ )\ndiff --git a/website/members/models.py b/website/members/models.py\n--- a/website/members/models.py\n+++ b/website/members/models.py\n@@ -169,17 +169,21 @@\n \n def update_status(self, data=None):\n if data is None:\n- r = requests.get(\n- 'https://register.utn.se/api.php',\n- auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n- settings.MEMBERSHIP_API_PASSWORD),\n- params={\n- 'action': 'check',\n- 'person_number': self.person_number().replace('-', '')\n- },\n- )\n+ if self.person_number() == '':\n+ return\n try:\n+ r = requests.get(\n+ 'https://register.utn.se/api.php',\n+ auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n+ settings.MEMBERSHIP_API_PASSWORD),\n+ params={\n+ 'action': 'check',\n+ 'person_number': self.person_number().replace('-', '')\n+ },\n+ )\n data = r.json().get('status')\n+ except requests.exceptions.ConnectionError:\n+ data = 'unknown'\n except ValueError:\n return\n \ndiff --git a/website/members/signals.py b/website/members/signals.py\n--- a/website/members/signals.py\n+++ b/website/members/signals.py\n@@ -34,5 +34,6 @@\n \n @receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\n def check_membership(sender, instance, **kwargs):\n- if timezone.now() - instance.status_changed > datetime.timedelta(1):\n+ if timezone.now() - instance.status_changed > datetime.timedelta(1)\\\n+ or instance.status == 'unknown':\n instance.update_status()\n", "issue": "Check registry on account creation\nThe registry is currently not checked for membership status when an account is created.\n", "before_files": [{"content": "import kronos\nimport requests\nfrom django.conf import settings\nfrom requests.auth import HTTPDigestAuth\n\nfrom members.models import Member\n\n\[email protected]('0 3 * * 1') # At 03:00 on Monday.\ndef update_membership_status():\n r = requests.get(\n 'https://register.utn.se/api.php',\n auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n settings.MEMBERSHIP_API_PASSWORD),\n params={\n 'action': 'list',\n },\n )\n try:\n data = r.json()\n except ValueError:\n return\n\n for member in Member.objects.all():\n if member.person_number().replace('-', '') in data:\n member.update(data='member')\n else:\n member.update_status(data='nonmember')\n member.save()\n", "path": "website/members/cron.py"}, {"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n user = user or sender\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1):\n instance.update_status()\n", "path": "website/members/signals.py"}, {"content": "import requests\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core import validators\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom requests.auth import HTTPDigestAuth\nfrom simple_email_confirmation.models import SimpleEmailConfirmationUserMixin\n\nfrom utils.translation import TranslatedField\n\n\nclass StudyProgram(models.Model):\n \"\"\"This class describes a university study program\"\"\"\n\n class Meta:\n verbose_name = _('study program')\n verbose_name_plural = _('study programs')\n\n DEGREE_CHOICES = (\n ('bachelor', _('Bachelor\\'s Degree')),\n ('master', _('Master\\'s Degree')),\n ('engineer', _('Engineer\\'s Degree')),\n )\n\n name_en = models.CharField(\n max_length=255,\n verbose_name=_('English program name'),\n help_text=_('Enter the name of the study program'),\n null=False,\n blank=False,\n )\n\n name_sv = models.CharField(\n max_length=255,\n verbose_name=_('Swedish program name'),\n help_text=_('Enter the name of the study program'),\n null=False,\n blank=False,\n )\n\n name = TranslatedField('name_en', 'name_sv')\n\n abbreviation_en = models.CharField(\n max_length=130,\n verbose_name=_('English program abbreviation'),\n help_text=_('Enter the abbreviation for the study program'),\n null=True,\n blank=True,\n )\n\n abbreviation_sv = models.CharField(\n max_length=130,\n verbose_name=_('Swedish program abbreviation'),\n help_text=_('Enter the abbreviation for the study program'),\n null=True,\n blank=True,\n )\n\n abbreviation = TranslatedField('name_en', 'name_sv')\n\n degree = models.CharField(\n max_length=20,\n choices=DEGREE_CHOICES,\n verbose_name=_('Degree type'),\n blank=False,\n null=False,\n )\n\n def __str__(self) -> str:\n return '%s in %s' % (self.get_degree_display(), self.name)\n\n\nclass Member(SimpleEmailConfirmationUserMixin, AbstractUser):\n \"\"\"This class describes a member\"\"\"\n\n # ---- Personal information ------\n\n birthday = models.DateField(\n verbose_name=_('Birthday'),\n null=True\n )\n\n person_number_ext = models.CharField(\n max_length=4,\n verbose_name=_('Person number extension'),\n help_text=_('Enter the last four digits of your Swedish person '\n 'number, given by the Swedish tax authority'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('The person number extension consists of four numbers'),\n )],\n unique_for_date=\"birthday\",\n null=True,\n blank=True,\n )\n\n # ---- Membership information ------\n\n MEMBERSHIP_CHOICES = (\n ('unknown', _('Unknown')),\n ('nonmember', _('Nonmember')),\n ('member', _('Member')),\n ('alumnus', _('Alumnus')),\n )\n\n status = models.CharField(\n max_length=20,\n choices=MEMBERSHIP_CHOICES,\n verbose_name=_('Membership status'),\n blank=False,\n null=False,\n default='unknown'\n )\n status_changed = models.DateTimeField(\n default=timezone.now,\n null=False,\n )\n\n # ---- Contact information ------\n\n phone_number = models.CharField(\n max_length=20,\n verbose_name=_('Phone number'),\n help_text=_('Enter a phone number so UTN may reach you'),\n validators=[validators.RegexValidator(\n regex=r'^\\+?\\d+$',\n message=_('Please enter a valid phone number'),\n )],\n null=True,\n blank=True,\n )\n\n # ---- University information ------\n\n registration_year = models.CharField(\n max_length=4,\n verbose_name=_('Registration year'),\n help_text=_('Enter the year you started studying at the TakNat '\n 'faculty'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('Please enter a valid year')\n )],\n null=True,\n blank=True,\n )\n\n study = models.ForeignKey(\n StudyProgram,\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.first_name and self.last_name:\n return '%s %s' % (self.first_name, self.last_name)\n else:\n return self.username\n\n def person_number(self) -> str:\n if self.birthday is None or self.person_number_ext is None:\n return ''\n else:\n return '%s-%s' % (self.birthday.strftime('%Y%m%d'),\n self.person_number_ext)\n\n def update_status(self, data=None):\n if data is None:\n r = requests.get(\n 'https://register.utn.se/api.php',\n auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n settings.MEMBERSHIP_API_PASSWORD),\n params={\n 'action': 'check',\n 'person_number': self.person_number().replace('-', '')\n },\n )\n try:\n data = r.json().get('status')\n except ValueError:\n return\n\n if data == 'member':\n self.status = 'member'\n elif data == 'nonmember':\n if self.status in ['unknown', 'nonmember']:\n self.status = 'nonmember'\n else:\n self.status = 'alumnus'\n\n self.status_changed = timezone.now()\n\n def remove_old_email(self):\n for email in self.get_unconfirmed_emails() or []:\n self.remove_email(email)\n for email in self.get_confirmed_emails():\n if email != self.email:\n self.remove_email(email)\n", "path": "website/members/models.py"}], "after_files": [{"content": "import kronos\nimport requests\nfrom django.conf import settings\nfrom requests.auth import HTTPDigestAuth\n\nfrom members.models import Member\n\n\[email protected]('0 3 * * 1') # At 03:00 on Monday.\ndef update_membership_status():\n r = requests.get(\n 'https://register.utn.se/api.php',\n auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n settings.MEMBERSHIP_API_PASSWORD),\n params={\n 'action': 'list',\n },\n )\n try:\n data = r.json()\n except ValueError:\n return\n\n for member in Member.objects.all():\n if member.person_number().replace('-', '') in data:\n member.update_status(data='member')\n else:\n member.update_status(data='nonmember')\n Member.objects.filter(pk=member.pk).update(\n status=member.status, status_changed=member.status_changed\n )\n", "path": "website/members/cron.py"}, {"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n user = user or sender\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1)\\\n or instance.status == 'unknown':\n instance.update_status()\n", "path": "website/members/signals.py"}, {"content": "import requests\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core import validators\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom requests.auth import HTTPDigestAuth\nfrom simple_email_confirmation.models import SimpleEmailConfirmationUserMixin\n\nfrom utils.translation import TranslatedField\n\n\nclass StudyProgram(models.Model):\n \"\"\"This class describes a university study program\"\"\"\n\n class Meta:\n verbose_name = _('study program')\n verbose_name_plural = _('study programs')\n\n DEGREE_CHOICES = (\n ('bachelor', _('Bachelor\\'s Degree')),\n ('master', _('Master\\'s Degree')),\n ('engineer', _('Engineer\\'s Degree')),\n )\n\n name_en = models.CharField(\n max_length=255,\n verbose_name=_('English program name'),\n help_text=_('Enter the name of the study program'),\n null=False,\n blank=False,\n )\n\n name_sv = models.CharField(\n max_length=255,\n verbose_name=_('Swedish program name'),\n help_text=_('Enter the name of the study program'),\n null=False,\n blank=False,\n )\n\n name = TranslatedField('name_en', 'name_sv')\n\n abbreviation_en = models.CharField(\n max_length=130,\n verbose_name=_('English program abbreviation'),\n help_text=_('Enter the abbreviation for the study program'),\n null=True,\n blank=True,\n )\n\n abbreviation_sv = models.CharField(\n max_length=130,\n verbose_name=_('Swedish program abbreviation'),\n help_text=_('Enter the abbreviation for the study program'),\n null=True,\n blank=True,\n )\n\n abbreviation = TranslatedField('name_en', 'name_sv')\n\n degree = models.CharField(\n max_length=20,\n choices=DEGREE_CHOICES,\n verbose_name=_('Degree type'),\n blank=False,\n null=False,\n )\n\n def __str__(self) -> str:\n return '%s in %s' % (self.get_degree_display(), self.name)\n\n\nclass Member(SimpleEmailConfirmationUserMixin, AbstractUser):\n \"\"\"This class describes a member\"\"\"\n\n # ---- Personal information ------\n\n birthday = models.DateField(\n verbose_name=_('Birthday'),\n null=True\n )\n\n person_number_ext = models.CharField(\n max_length=4,\n verbose_name=_('Person number extension'),\n help_text=_('Enter the last four digits of your Swedish person '\n 'number, given by the Swedish tax authority'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('The person number extension consists of four numbers'),\n )],\n unique_for_date=\"birthday\",\n null=True,\n blank=True,\n )\n\n # ---- Membership information ------\n\n MEMBERSHIP_CHOICES = (\n ('unknown', _('Unknown')),\n ('nonmember', _('Nonmember')),\n ('member', _('Member')),\n ('alumnus', _('Alumnus')),\n )\n\n status = models.CharField(\n max_length=20,\n choices=MEMBERSHIP_CHOICES,\n verbose_name=_('Membership status'),\n blank=False,\n null=False,\n default='unknown'\n )\n status_changed = models.DateTimeField(\n default=timezone.now,\n null=False,\n )\n\n # ---- Contact information ------\n\n phone_number = models.CharField(\n max_length=20,\n verbose_name=_('Phone number'),\n help_text=_('Enter a phone number so UTN may reach you'),\n validators=[validators.RegexValidator(\n regex=r'^\\+?\\d+$',\n message=_('Please enter a valid phone number'),\n )],\n null=True,\n blank=True,\n )\n\n # ---- University information ------\n\n registration_year = models.CharField(\n max_length=4,\n verbose_name=_('Registration year'),\n help_text=_('Enter the year you started studying at the TakNat '\n 'faculty'),\n validators=[validators.RegexValidator(\n regex=r'^\\d{4}$',\n message=_('Please enter a valid year')\n )],\n null=True,\n blank=True,\n )\n\n study = models.ForeignKey(\n StudyProgram,\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n\n def __str__(self) -> str:\n if self.first_name and self.last_name:\n return '%s %s' % (self.first_name, self.last_name)\n else:\n return self.username\n\n def person_number(self) -> str:\n if self.birthday is None or self.person_number_ext is None:\n return ''\n else:\n return '%s-%s' % (self.birthday.strftime('%Y%m%d'),\n self.person_number_ext)\n\n def update_status(self, data=None):\n if data is None:\n if self.person_number() == '':\n return\n try:\n r = requests.get(\n 'https://register.utn.se/api.php',\n auth=HTTPDigestAuth(settings.MEMBERSHIP_API_USER,\n settings.MEMBERSHIP_API_PASSWORD),\n params={\n 'action': 'check',\n 'person_number': self.person_number().replace('-', '')\n },\n )\n data = r.json().get('status')\n except requests.exceptions.ConnectionError:\n data = 'unknown'\n except ValueError:\n return\n\n if data == 'member':\n self.status = 'member'\n elif data == 'nonmember':\n if self.status in ['unknown', 'nonmember']:\n self.status = 'nonmember'\n else:\n self.status = 'alumnus'\n\n self.status_changed = timezone.now()\n\n def remove_old_email(self):\n for email in self.get_unconfirmed_emails() or []:\n self.remove_email(email)\n for email in self.get_confirmed_emails():\n if email != self.email:\n self.remove_email(email)\n", "path": "website/members/models.py"}]} | 2,604 | 527 |
gh_patches_debug_12893 | rasdani/github-patches | git_diff | scrapy__scrapy-4420 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
follow_all fails with an empty list of URLs
<!--
Thanks for taking an interest in Scrapy!
If you have a question that starts with "How to...", please see the Scrapy Community page: https://scrapy.org/community/.
The GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.
Keep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md
The following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs
-->
### Description
`follow_all` with an empty list of urls fails with `ValueError('Please supply exactly one of the following arguments: urls, css, xpath')`
What I would expect instead is just an empty generator of requests.
### Steps to Reproduce
```py
class Spider(scrapy.Spider):
def parse(self, response):
yield from response.follow_all([], self.parse)
```
**Expected behavior:**
No error is raised
**Actual behavior:**
`ValueError('Please supply exactly one of the following arguments: urls, css, xpath')` exception is raised.
### Versions
2.0
### Additional context
I think the solution is just a matter of changing this line: https://github.com/scrapy/scrapy/blob/master/scrapy/http/response/text.py#L191
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/http/response/text.py`
Content:
```
1 """
2 This module implements the TextResponse class which adds encoding handling and
3 discovering (through HTTP headers) to base Response class.
4
5 See documentation in docs/topics/request-response.rst
6 """
7
8 from contextlib import suppress
9 from typing import Generator
10 from urllib.parse import urljoin
11
12 import parsel
13 from w3lib.encoding import (html_body_declared_encoding, html_to_unicode,
14 http_content_type_encoding, resolve_encoding)
15 from w3lib.html import strip_html5_whitespace
16
17 from scrapy.http import Request
18 from scrapy.http.response import Response
19 from scrapy.utils.python import memoizemethod_noargs, to_unicode
20 from scrapy.utils.response import get_base_url
21
22
23 class TextResponse(Response):
24
25 _DEFAULT_ENCODING = 'ascii'
26
27 def __init__(self, *args, **kwargs):
28 self._encoding = kwargs.pop('encoding', None)
29 self._cached_benc = None
30 self._cached_ubody = None
31 self._cached_selector = None
32 super(TextResponse, self).__init__(*args, **kwargs)
33
34 def _set_url(self, url):
35 if isinstance(url, str):
36 self._url = to_unicode(url, self.encoding)
37 else:
38 super(TextResponse, self)._set_url(url)
39
40 def _set_body(self, body):
41 self._body = b'' # used by encoding detection
42 if isinstance(body, str):
43 if self._encoding is None:
44 raise TypeError('Cannot convert unicode body - %s has no encoding' %
45 type(self).__name__)
46 self._body = body.encode(self._encoding)
47 else:
48 super(TextResponse, self)._set_body(body)
49
50 def replace(self, *args, **kwargs):
51 kwargs.setdefault('encoding', self.encoding)
52 return Response.replace(self, *args, **kwargs)
53
54 @property
55 def encoding(self):
56 return self._declared_encoding() or self._body_inferred_encoding()
57
58 def _declared_encoding(self):
59 return self._encoding or self._headers_encoding() \
60 or self._body_declared_encoding()
61
62 def body_as_unicode(self):
63 """Return body as unicode"""
64 return self.text
65
66 @property
67 def text(self):
68 """ Body as unicode """
69 # access self.encoding before _cached_ubody to make sure
70 # _body_inferred_encoding is called
71 benc = self.encoding
72 if self._cached_ubody is None:
73 charset = 'charset=%s' % benc
74 self._cached_ubody = html_to_unicode(charset, self.body)[1]
75 return self._cached_ubody
76
77 def urljoin(self, url):
78 """Join this Response's url with a possible relative url to form an
79 absolute interpretation of the latter."""
80 return urljoin(get_base_url(self), url)
81
82 @memoizemethod_noargs
83 def _headers_encoding(self):
84 content_type = self.headers.get(b'Content-Type', b'')
85 return http_content_type_encoding(to_unicode(content_type))
86
87 def _body_inferred_encoding(self):
88 if self._cached_benc is None:
89 content_type = to_unicode(self.headers.get(b'Content-Type', b''))
90 benc, ubody = html_to_unicode(content_type, self.body,
91 auto_detect_fun=self._auto_detect_fun,
92 default_encoding=self._DEFAULT_ENCODING)
93 self._cached_benc = benc
94 self._cached_ubody = ubody
95 return self._cached_benc
96
97 def _auto_detect_fun(self, text):
98 for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
99 try:
100 text.decode(enc)
101 except UnicodeError:
102 continue
103 return resolve_encoding(enc)
104
105 @memoizemethod_noargs
106 def _body_declared_encoding(self):
107 return html_body_declared_encoding(self.body)
108
109 @property
110 def selector(self):
111 from scrapy.selector import Selector
112 if self._cached_selector is None:
113 self._cached_selector = Selector(self)
114 return self._cached_selector
115
116 def xpath(self, query, **kwargs):
117 return self.selector.xpath(query, **kwargs)
118
119 def css(self, query):
120 return self.selector.css(query)
121
122 def follow(self, url, callback=None, method='GET', headers=None, body=None,
123 cookies=None, meta=None, encoding=None, priority=0,
124 dont_filter=False, errback=None, cb_kwargs=None, flags=None):
125 # type: (...) -> Request
126 """
127 Return a :class:`~.Request` instance to follow a link ``url``.
128 It accepts the same arguments as ``Request.__init__`` method,
129 but ``url`` can be not only an absolute URL, but also
130
131 * a relative URL
132 * a :class:`~scrapy.link.Link` object, e.g. the result of
133 :ref:`topics-link-extractors`
134 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
135 ``response.css('a.my_link')[0]``
136 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
137 ``response.css('a::attr(href)')[0]`` or
138 ``response.xpath('//img/@src')[0]``
139
140 See :ref:`response-follow-example` for usage examples.
141 """
142 if isinstance(url, parsel.Selector):
143 url = _url_from_selector(url)
144 elif isinstance(url, parsel.SelectorList):
145 raise ValueError("SelectorList is not supported")
146 encoding = self.encoding if encoding is None else encoding
147 return super(TextResponse, self).follow(
148 url=url,
149 callback=callback,
150 method=method,
151 headers=headers,
152 body=body,
153 cookies=cookies,
154 meta=meta,
155 encoding=encoding,
156 priority=priority,
157 dont_filter=dont_filter,
158 errback=errback,
159 cb_kwargs=cb_kwargs,
160 flags=flags,
161 )
162
163 def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,
164 cookies=None, meta=None, encoding=None, priority=0,
165 dont_filter=False, errback=None, cb_kwargs=None, flags=None,
166 css=None, xpath=None):
167 # type: (...) -> Generator[Request, None, None]
168 """
169 A generator that produces :class:`~.Request` instances to follow all
170 links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s
171 ``__init__`` method, except that each ``urls`` element does not need to be
172 an absolute URL, it can be any of the following:
173
174 * a relative URL
175 * a :class:`~scrapy.link.Link` object, e.g. the result of
176 :ref:`topics-link-extractors`
177 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
178 ``response.css('a.my_link')[0]``
179 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
180 ``response.css('a::attr(href)')[0]`` or
181 ``response.xpath('//img/@src')[0]``
182
183 In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction
184 within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).
185
186 Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or
187 using the ``css`` or ``xpath`` parameters, this method will not produce requests for
188 selectors from which links cannot be obtained (for instance, anchor tags without an
189 ``href`` attribute)
190 """
191 arg_count = len(list(filter(None, (urls, css, xpath))))
192 if arg_count != 1:
193 raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath')
194 if not urls:
195 if css:
196 urls = self.css(css)
197 if xpath:
198 urls = self.xpath(xpath)
199 if isinstance(urls, parsel.SelectorList):
200 selectors = urls
201 urls = []
202 for sel in selectors:
203 with suppress(_InvalidSelector):
204 urls.append(_url_from_selector(sel))
205 return super(TextResponse, self).follow_all(
206 urls=urls,
207 callback=callback,
208 method=method,
209 headers=headers,
210 body=body,
211 cookies=cookies,
212 meta=meta,
213 encoding=encoding,
214 priority=priority,
215 dont_filter=dont_filter,
216 errback=errback,
217 cb_kwargs=cb_kwargs,
218 flags=flags,
219 )
220
221
222 class _InvalidSelector(ValueError):
223 """
224 Raised when a URL cannot be obtained from a Selector
225 """
226
227
228 def _url_from_selector(sel):
229 # type: (parsel.Selector) -> str
230 if isinstance(sel.root, str):
231 # e.g. ::attr(href) result
232 return strip_html5_whitespace(sel.root)
233 if not hasattr(sel.root, 'tag'):
234 raise _InvalidSelector("Unsupported selector: %s" % sel)
235 if sel.root.tag not in ('a', 'link'):
236 raise _InvalidSelector("Only <a> and <link> elements are supported; got <%s>" %
237 sel.root.tag)
238 href = sel.root.get('href')
239 if href is None:
240 raise _InvalidSelector("<%s> element has no href attribute: %s" %
241 (sel.root.tag, sel))
242 return strip_html5_whitespace(href)
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py
--- a/scrapy/http/response/text.py
+++ b/scrapy/http/response/text.py
@@ -188,9 +188,11 @@
selectors from which links cannot be obtained (for instance, anchor tags without an
``href`` attribute)
"""
- arg_count = len(list(filter(None, (urls, css, xpath))))
- if arg_count != 1:
- raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath')
+ arguments = [x for x in (urls, css, xpath) if x is not None]
+ if len(arguments) != 1:
+ raise ValueError(
+ "Please supply exactly one of the following arguments: urls, css, xpath"
+ )
if not urls:
if css:
urls = self.css(css)
| {"golden_diff": "diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py\n--- a/scrapy/http/response/text.py\n+++ b/scrapy/http/response/text.py\n@@ -188,9 +188,11 @@\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n- arg_count = len(list(filter(None, (urls, css, xpath))))\n- if arg_count != 1:\n- raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath')\n+ arguments = [x for x in (urls, css, xpath) if x is not None]\n+ if len(arguments) != 1:\n+ raise ValueError(\n+ \"Please supply exactly one of the following arguments: urls, css, xpath\"\n+ )\n if not urls:\n if css:\n urls = self.css(css)\n", "issue": "follow_all fails with an empty list of URLs\n<!--\r\n\r\nThanks for taking an interest in Scrapy!\r\n\r\nIf you have a question that starts with \"How to...\", please see the Scrapy Community page: https://scrapy.org/community/.\r\nThe GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.\r\n\r\nKeep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md\r\n\r\nThe following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs\r\n\r\n-->\r\n\r\n### Description\r\n\r\n`follow_all` with an empty list of urls fails with `ValueError('Please supply exactly one of the following arguments: urls, css, xpath')`\r\n\r\nWhat I would expect instead is just an empty generator of requests. \r\n\r\n### Steps to Reproduce\r\n\r\n```py\r\nclass Spider(scrapy.Spider):\r\n\r\n def parse(self, response):\r\n yield from response.follow_all([], self.parse)\r\n```\r\n\r\n**Expected behavior:** \r\n\r\nNo error is raised\r\n\r\n**Actual behavior:**\r\n\r\n`ValueError('Please supply exactly one of the following arguments: urls, css, xpath')` exception is raised. \r\n\r\n\r\n### Versions\r\n\r\n2.0\r\n\r\n### Additional context\r\n\r\nI think the solution is just a matter of changing this line: https://github.com/scrapy/scrapy/blob/master/scrapy/http/response/text.py#L191\r\n\n", "before_files": [{"content": "\"\"\"\nThis module implements the TextResponse class which adds encoding handling and\ndiscovering (through HTTP headers) to base Response class.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\n\nfrom contextlib import suppress\nfrom typing import Generator\nfrom urllib.parse import urljoin\n\nimport parsel\nfrom w3lib.encoding import (html_body_declared_encoding, html_to_unicode,\n http_content_type_encoding, resolve_encoding)\nfrom w3lib.html import strip_html5_whitespace\n\nfrom scrapy.http import Request\nfrom scrapy.http.response import Response\nfrom scrapy.utils.python import memoizemethod_noargs, to_unicode\nfrom scrapy.utils.response import get_base_url\n\n\nclass TextResponse(Response):\n\n _DEFAULT_ENCODING = 'ascii'\n\n def __init__(self, *args, **kwargs):\n self._encoding = kwargs.pop('encoding', None)\n self._cached_benc = None\n self._cached_ubody = None\n self._cached_selector = None\n super(TextResponse, self).__init__(*args, **kwargs)\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = to_unicode(url, self.encoding)\n else:\n super(TextResponse, self)._set_url(url)\n\n def _set_body(self, body):\n self._body = b'' # used by encoding detection\n if isinstance(body, str):\n if self._encoding is None:\n raise TypeError('Cannot convert unicode body - %s has no encoding' %\n type(self).__name__)\n self._body = body.encode(self._encoding)\n else:\n super(TextResponse, self)._set_body(body)\n\n def replace(self, *args, **kwargs):\n kwargs.setdefault('encoding', self.encoding)\n return Response.replace(self, *args, **kwargs)\n\n @property\n def encoding(self):\n return self._declared_encoding() or self._body_inferred_encoding()\n\n def _declared_encoding(self):\n return self._encoding or self._headers_encoding() \\\n or self._body_declared_encoding()\n\n def body_as_unicode(self):\n \"\"\"Return body as unicode\"\"\"\n return self.text\n\n @property\n def text(self):\n \"\"\" Body as unicode \"\"\"\n # access self.encoding before _cached_ubody to make sure\n # _body_inferred_encoding is called\n benc = self.encoding\n if self._cached_ubody is None:\n charset = 'charset=%s' % benc\n self._cached_ubody = html_to_unicode(charset, self.body)[1]\n return self._cached_ubody\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(get_base_url(self), url)\n\n @memoizemethod_noargs\n def _headers_encoding(self):\n content_type = self.headers.get(b'Content-Type', b'')\n return http_content_type_encoding(to_unicode(content_type))\n\n def _body_inferred_encoding(self):\n if self._cached_benc is None:\n content_type = to_unicode(self.headers.get(b'Content-Type', b''))\n benc, ubody = html_to_unicode(content_type, self.body,\n auto_detect_fun=self._auto_detect_fun,\n default_encoding=self._DEFAULT_ENCODING)\n self._cached_benc = benc\n self._cached_ubody = ubody\n return self._cached_benc\n\n def _auto_detect_fun(self, text):\n for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):\n try:\n text.decode(enc)\n except UnicodeError:\n continue\n return resolve_encoding(enc)\n\n @memoizemethod_noargs\n def _body_declared_encoding(self):\n return html_body_declared_encoding(self.body)\n\n @property\n def selector(self):\n from scrapy.selector import Selector\n if self._cached_selector is None:\n self._cached_selector = Selector(self)\n return self._cached_selector\n\n def xpath(self, query, **kwargs):\n return self.selector.xpath(query, **kwargs)\n\n def css(self, query):\n return self.selector.css(query)\n\n def follow(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None):\n # type: (...) -> Request\n \"\"\"\n Return a :class:`~.Request` instance to follow a link ``url``.\n It accepts the same arguments as ``Request.__init__`` method,\n but ``url`` can be not only an absolute URL, but also\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n See :ref:`response-follow-example` for usage examples.\n \"\"\"\n if isinstance(url, parsel.Selector):\n url = _url_from_selector(url)\n elif isinstance(url, parsel.SelectorList):\n raise ValueError(\"SelectorList is not supported\")\n encoding = self.encoding if encoding is None else encoding\n return super(TextResponse, self).follow(\n url=url,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None,\n css=None, xpath=None):\n # type: (...) -> Generator[Request, None, None]\n \"\"\"\n A generator that produces :class:`~.Request` instances to follow all\n links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s\n ``__init__`` method, except that each ``urls`` element does not need to be\n an absolute URL, it can be any of the following:\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction\n within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).\n\n Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or\n using the ``css`` or ``xpath`` parameters, this method will not produce requests for\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n arg_count = len(list(filter(None, (urls, css, xpath))))\n if arg_count != 1:\n raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath')\n if not urls:\n if css:\n urls = self.css(css)\n if xpath:\n urls = self.xpath(xpath)\n if isinstance(urls, parsel.SelectorList):\n selectors = urls\n urls = []\n for sel in selectors:\n with suppress(_InvalidSelector):\n urls.append(_url_from_selector(sel))\n return super(TextResponse, self).follow_all(\n urls=urls,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n\nclass _InvalidSelector(ValueError):\n \"\"\"\n Raised when a URL cannot be obtained from a Selector\n \"\"\"\n\n\ndef _url_from_selector(sel):\n # type: (parsel.Selector) -> str\n if isinstance(sel.root, str):\n # e.g. ::attr(href) result\n return strip_html5_whitespace(sel.root)\n if not hasattr(sel.root, 'tag'):\n raise _InvalidSelector(\"Unsupported selector: %s\" % sel)\n if sel.root.tag not in ('a', 'link'):\n raise _InvalidSelector(\"Only <a> and <link> elements are supported; got <%s>\" %\n sel.root.tag)\n href = sel.root.get('href')\n if href is None:\n raise _InvalidSelector(\"<%s> element has no href attribute: %s\" %\n (sel.root.tag, sel))\n return strip_html5_whitespace(href)\n", "path": "scrapy/http/response/text.py"}], "after_files": [{"content": "\"\"\"\nThis module implements the TextResponse class which adds encoding handling and\ndiscovering (through HTTP headers) to base Response class.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\n\nfrom contextlib import suppress\nfrom typing import Generator\nfrom urllib.parse import urljoin\n\nimport parsel\nfrom w3lib.encoding import (html_body_declared_encoding, html_to_unicode,\n http_content_type_encoding, resolve_encoding)\nfrom w3lib.html import strip_html5_whitespace\n\nfrom scrapy.http import Request\nfrom scrapy.http.response import Response\nfrom scrapy.utils.python import memoizemethod_noargs, to_unicode\nfrom scrapy.utils.response import get_base_url\n\n\nclass TextResponse(Response):\n\n _DEFAULT_ENCODING = 'ascii'\n\n def __init__(self, *args, **kwargs):\n self._encoding = kwargs.pop('encoding', None)\n self._cached_benc = None\n self._cached_ubody = None\n self._cached_selector = None\n super(TextResponse, self).__init__(*args, **kwargs)\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = to_unicode(url, self.encoding)\n else:\n super(TextResponse, self)._set_url(url)\n\n def _set_body(self, body):\n self._body = b'' # used by encoding detection\n if isinstance(body, str):\n if self._encoding is None:\n raise TypeError('Cannot convert unicode body - %s has no encoding' %\n type(self).__name__)\n self._body = body.encode(self._encoding)\n else:\n super(TextResponse, self)._set_body(body)\n\n def replace(self, *args, **kwargs):\n kwargs.setdefault('encoding', self.encoding)\n return Response.replace(self, *args, **kwargs)\n\n @property\n def encoding(self):\n return self._declared_encoding() or self._body_inferred_encoding()\n\n def _declared_encoding(self):\n return self._encoding or self._headers_encoding() \\\n or self._body_declared_encoding()\n\n def body_as_unicode(self):\n \"\"\"Return body as unicode\"\"\"\n return self.text\n\n @property\n def text(self):\n \"\"\" Body as unicode \"\"\"\n # access self.encoding before _cached_ubody to make sure\n # _body_inferred_encoding is called\n benc = self.encoding\n if self._cached_ubody is None:\n charset = 'charset=%s' % benc\n self._cached_ubody = html_to_unicode(charset, self.body)[1]\n return self._cached_ubody\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(get_base_url(self), url)\n\n @memoizemethod_noargs\n def _headers_encoding(self):\n content_type = self.headers.get(b'Content-Type', b'')\n return http_content_type_encoding(to_unicode(content_type))\n\n def _body_inferred_encoding(self):\n if self._cached_benc is None:\n content_type = to_unicode(self.headers.get(b'Content-Type', b''))\n benc, ubody = html_to_unicode(content_type, self.body,\n auto_detect_fun=self._auto_detect_fun,\n default_encoding=self._DEFAULT_ENCODING)\n self._cached_benc = benc\n self._cached_ubody = ubody\n return self._cached_benc\n\n def _auto_detect_fun(self, text):\n for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):\n try:\n text.decode(enc)\n except UnicodeError:\n continue\n return resolve_encoding(enc)\n\n @memoizemethod_noargs\n def _body_declared_encoding(self):\n return html_body_declared_encoding(self.body)\n\n @property\n def selector(self):\n from scrapy.selector import Selector\n if self._cached_selector is None:\n self._cached_selector = Selector(self)\n return self._cached_selector\n\n def xpath(self, query, **kwargs):\n return self.selector.xpath(query, **kwargs)\n\n def css(self, query):\n return self.selector.css(query)\n\n def follow(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None):\n # type: (...) -> Request\n \"\"\"\n Return a :class:`~.Request` instance to follow a link ``url``.\n It accepts the same arguments as ``Request.__init__`` method,\n but ``url`` can be not only an absolute URL, but also\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n See :ref:`response-follow-example` for usage examples.\n \"\"\"\n if isinstance(url, parsel.Selector):\n url = _url_from_selector(url)\n elif isinstance(url, parsel.SelectorList):\n raise ValueError(\"SelectorList is not supported\")\n encoding = self.encoding if encoding is None else encoding\n return super(TextResponse, self).follow(\n url=url,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None,\n css=None, xpath=None):\n # type: (...) -> Generator[Request, None, None]\n \"\"\"\n A generator that produces :class:`~.Request` instances to follow all\n links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s\n ``__init__`` method, except that each ``urls`` element does not need to be\n an absolute URL, it can be any of the following:\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction\n within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).\n\n Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or\n using the ``css`` or ``xpath`` parameters, this method will not produce requests for\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n arguments = [x for x in (urls, css, xpath) if x is not None]\n if len(arguments) != 1:\n raise ValueError(\n \"Please supply exactly one of the following arguments: urls, css, xpath\"\n )\n if not urls:\n if css:\n urls = self.css(css)\n if xpath:\n urls = self.xpath(xpath)\n if isinstance(urls, parsel.SelectorList):\n selectors = urls\n urls = []\n for sel in selectors:\n with suppress(_InvalidSelector):\n urls.append(_url_from_selector(sel))\n return super(TextResponse, self).follow_all(\n urls=urls,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n\nclass _InvalidSelector(ValueError):\n \"\"\"\n Raised when a URL cannot be obtained from a Selector\n \"\"\"\n\n\ndef _url_from_selector(sel):\n # type: (parsel.Selector) -> str\n if isinstance(sel.root, str):\n # e.g. ::attr(href) result\n return strip_html5_whitespace(sel.root)\n if not hasattr(sel.root, 'tag'):\n raise _InvalidSelector(\"Unsupported selector: %s\" % sel)\n if sel.root.tag not in ('a', 'link'):\n raise _InvalidSelector(\"Only <a> and <link> elements are supported; got <%s>\" %\n sel.root.tag)\n href = sel.root.get('href')\n if href is None:\n raise _InvalidSelector(\"<%s> element has no href attribute: %s\" %\n (sel.root.tag, sel))\n return strip_html5_whitespace(href)\n", "path": "scrapy/http/response/text.py"}]} | 3,267 | 196 |
gh_patches_debug_37135 | rasdani/github-patches | git_diff | sopel-irc__sopel-843 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Strip leading . on .tld queries
`.tld .py` should be the same as `.tld py`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `willie/modules/tld.py`
Content:
```
1 # coding=utf8
2 """
3 tld.py - Willie TLD Module
4 Copyright 2009-10, Michael Yanovich, yanovich.net
5 Licensed under the Eiffel Forum License 2.
6
7 http://willie.dftba.net
8 """
9 from __future__ import unicode_literals
10
11 from willie import web
12 from willie.module import commands, example
13 import re
14 import sys
15 if sys.version_info.major >= 3:
16 unicode = str
17
18 uri = 'https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains'
19 r_tag = re.compile(r'<(?!!)[^>]+>')
20
21
22 @commands('tld')
23 @example('.tld ru')
24 def gettld(bot, trigger):
25 """Show information about the given Top Level Domain."""
26 page = web.get(uri)
27 search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
28 search = search.format(trigger.group(2))
29 re_country = re.compile(search)
30 matches = re_country.findall(page)
31 if not matches:
32 search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
33 search = search.format(trigger.group(2))
34 re_country = re.compile(search)
35 matches = re_country.findall(page)
36 if matches:
37 matches = list(matches[0])
38 i = 0
39 while i < len(matches):
40 matches[i] = r_tag.sub("", matches[i])
41 i += 1
42 desc = matches[2]
43 if len(desc) > 400:
44 desc = desc[:400] + "..."
45 reply = "%s -- %s. IDN: %s, DNSSEC: %s" % (matches[1], desc,
46 matches[3], matches[4])
47 bot.reply(reply)
48 else:
49 search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
50 search = search.format(unicode(trigger.group(2)))
51 re_country = re.compile(search)
52 matches = re_country.findall(page)
53 if matches:
54 matches = matches[0]
55 dict_val = dict()
56 dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"] = matches
57 for key in dict_val:
58 if dict_val[key] == " ":
59 dict_val[key] = "N/A"
60 dict_val[key] = r_tag.sub('', dict_val[key])
61 if len(dict_val["notes"]) > 400:
62 dict_val["notes"] = dict_val["notes"][:400] + "..."
63 reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % (dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"])
64 else:
65 reply = "No matches found for TLD: {0}".format(unicode(trigger.group(2)))
66 bot.reply(reply)
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/willie/modules/tld.py b/willie/modules/tld.py
--- a/willie/modules/tld.py
+++ b/willie/modules/tld.py
@@ -24,13 +24,16 @@
def gettld(bot, trigger):
"""Show information about the given Top Level Domain."""
page = web.get(uri)
+ tld = trigger.group(2)
+ if tld[0] == '.':
+ tld = tld[1:]
search = r'(?i)<td><a href="\S+" title="\S+">\.{0}</a></td>\n(<td><a href=".*</a></td>\n)?<td>([A-Za-z0-9].*?)</td>\n<td>(.*)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
- search = search.format(trigger.group(2))
+ search = search.format(tld)
re_country = re.compile(search)
matches = re_country.findall(page)
if not matches:
search = r'(?i)<td><a href="\S+" title="(\S+)">\.{0}</a></td>\n<td><a href=".*">(.*)</a></td>\n<td>([A-Za-z0-9].*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
- search = search.format(trigger.group(2))
+ search = search.format(tld)
re_country = re.compile(search)
matches = re_country.findall(page)
if matches:
@@ -47,7 +50,7 @@
bot.reply(reply)
else:
search = r'<td><a href="\S+" title="\S+">.{0}</a></td>\n<td><span class="flagicon"><img.*?\">(.*?)</a></td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n<td[^>]*>(.*?)</td>\n'
- search = search.format(unicode(trigger.group(2)))
+ search = search.format(unicode(tld))
re_country = re.compile(search)
matches = re_country.findall(page)
if matches:
@@ -62,5 +65,5 @@
dict_val["notes"] = dict_val["notes"][:400] + "..."
reply = "%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s" % (dict_val["country"], dict_val["expl"], dict_val["notes"], dict_val["idn"], dict_val["dnssec"], dict_val["sld"])
else:
- reply = "No matches found for TLD: {0}".format(unicode(trigger.group(2)))
+ reply = "No matches found for TLD: {0}".format(unicode(tld))
bot.reply(reply)
| {"golden_diff": "diff --git a/willie/modules/tld.py b/willie/modules/tld.py\n--- a/willie/modules/tld.py\n+++ b/willie/modules/tld.py\n@@ -24,13 +24,16 @@\n def gettld(bot, trigger):\n \"\"\"Show information about the given Top Level Domain.\"\"\"\n page = web.get(uri)\n+ tld = trigger.group(2)\n+ if tld[0] == '.':\n+ tld = tld[1:]\n search = r'(?i)<td><a href=\"\\S+\" title=\"\\S+\">\\.{0}</a></td>\\n(<td><a href=\".*</a></td>\\n)?<td>([A-Za-z0-9].*?)</td>\\n<td>(.*)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n- search = search.format(trigger.group(2))\n+ search = search.format(tld)\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if not matches:\n search = r'(?i)<td><a href=\"\\S+\" title=\"(\\S+)\">\\.{0}</a></td>\\n<td><a href=\".*\">(.*)</a></td>\\n<td>([A-Za-z0-9].*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n- search = search.format(trigger.group(2))\n+ search = search.format(tld)\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n@@ -47,7 +50,7 @@\n bot.reply(reply)\n else:\n search = r'<td><a href=\"\\S+\" title=\"\\S+\">.{0}</a></td>\\n<td><span class=\"flagicon\"><img.*?\\\">(.*?)</a></td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n- search = search.format(unicode(trigger.group(2)))\n+ search = search.format(unicode(tld))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n@@ -62,5 +65,5 @@\n dict_val[\"notes\"] = dict_val[\"notes\"][:400] + \"...\"\n reply = \"%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s\" % (dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"])\n else:\n- reply = \"No matches found for TLD: {0}\".format(unicode(trigger.group(2)))\n+ reply = \"No matches found for TLD: {0}\".format(unicode(tld))\n bot.reply(reply)\n", "issue": "Strip leading . on .tld queries\n`.tld .py` should be the same as `.tld py`\n\n", "before_files": [{"content": "# coding=utf8\n\"\"\"\ntld.py - Willie TLD Module\nCopyright 2009-10, Michael Yanovich, yanovich.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom willie import web\nfrom willie.module import commands, example\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n\nuri = 'https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains'\nr_tag = re.compile(r'<(?!!)[^>]+>')\n\n\n@commands('tld')\n@example('.tld ru')\ndef gettld(bot, trigger):\n \"\"\"Show information about the given Top Level Domain.\"\"\"\n page = web.get(uri)\n search = r'(?i)<td><a href=\"\\S+\" title=\"\\S+\">\\.{0}</a></td>\\n(<td><a href=\".*</a></td>\\n)?<td>([A-Za-z0-9].*?)</td>\\n<td>(.*)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(trigger.group(2))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if not matches:\n search = r'(?i)<td><a href=\"\\S+\" title=\"(\\S+)\">\\.{0}</a></td>\\n<td><a href=\".*\">(.*)</a></td>\\n<td>([A-Za-z0-9].*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(trigger.group(2))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n matches = list(matches[0])\n i = 0\n while i < len(matches):\n matches[i] = r_tag.sub(\"\", matches[i])\n i += 1\n desc = matches[2]\n if len(desc) > 400:\n desc = desc[:400] + \"...\"\n reply = \"%s -- %s. IDN: %s, DNSSEC: %s\" % (matches[1], desc,\n matches[3], matches[4])\n bot.reply(reply)\n else:\n search = r'<td><a href=\"\\S+\" title=\"\\S+\">.{0}</a></td>\\n<td><span class=\"flagicon\"><img.*?\\\">(.*?)</a></td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(unicode(trigger.group(2)))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n matches = matches[0]\n dict_val = dict()\n dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"] = matches\n for key in dict_val:\n if dict_val[key] == \" \":\n dict_val[key] = \"N/A\"\n dict_val[key] = r_tag.sub('', dict_val[key])\n if len(dict_val[\"notes\"]) > 400:\n dict_val[\"notes\"] = dict_val[\"notes\"][:400] + \"...\"\n reply = \"%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s\" % (dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"])\n else:\n reply = \"No matches found for TLD: {0}\".format(unicode(trigger.group(2)))\n bot.reply(reply)\n", "path": "willie/modules/tld.py"}], "after_files": [{"content": "# coding=utf8\n\"\"\"\ntld.py - Willie TLD Module\nCopyright 2009-10, Michael Yanovich, yanovich.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom willie import web\nfrom willie.module import commands, example\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n\nuri = 'https://en.wikipedia.org/wiki/List_of_Internet_top-level_domains'\nr_tag = re.compile(r'<(?!!)[^>]+>')\n\n\n@commands('tld')\n@example('.tld ru')\ndef gettld(bot, trigger):\n \"\"\"Show information about the given Top Level Domain.\"\"\"\n page = web.get(uri)\n tld = trigger.group(2)\n if tld[0] == '.':\n tld = tld[1:]\n search = r'(?i)<td><a href=\"\\S+\" title=\"\\S+\">\\.{0}</a></td>\\n(<td><a href=\".*</a></td>\\n)?<td>([A-Za-z0-9].*?)</td>\\n<td>(.*)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(tld)\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if not matches:\n search = r'(?i)<td><a href=\"\\S+\" title=\"(\\S+)\">\\.{0}</a></td>\\n<td><a href=\".*\">(.*)</a></td>\\n<td>([A-Za-z0-9].*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(tld)\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n matches = list(matches[0])\n i = 0\n while i < len(matches):\n matches[i] = r_tag.sub(\"\", matches[i])\n i += 1\n desc = matches[2]\n if len(desc) > 400:\n desc = desc[:400] + \"...\"\n reply = \"%s -- %s. IDN: %s, DNSSEC: %s\" % (matches[1], desc,\n matches[3], matches[4])\n bot.reply(reply)\n else:\n search = r'<td><a href=\"\\S+\" title=\"\\S+\">.{0}</a></td>\\n<td><span class=\"flagicon\"><img.*?\\\">(.*?)</a></td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n<td[^>]*>(.*?)</td>\\n'\n search = search.format(unicode(tld))\n re_country = re.compile(search)\n matches = re_country.findall(page)\n if matches:\n matches = matches[0]\n dict_val = dict()\n dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"] = matches\n for key in dict_val:\n if dict_val[key] == \" \":\n dict_val[key] = \"N/A\"\n dict_val[key] = r_tag.sub('', dict_val[key])\n if len(dict_val[\"notes\"]) > 400:\n dict_val[\"notes\"] = dict_val[\"notes\"][:400] + \"...\"\n reply = \"%s (%s, %s). IDN: %s, DNSSEC: %s, SLD: %s\" % (dict_val[\"country\"], dict_val[\"expl\"], dict_val[\"notes\"], dict_val[\"idn\"], dict_val[\"dnssec\"], dict_val[\"sld\"])\n else:\n reply = \"No matches found for TLD: {0}\".format(unicode(tld))\n bot.reply(reply)\n", "path": "willie/modules/tld.py"}]} | 1,277 | 668 |
gh_patches_debug_25153 | rasdani/github-patches | git_diff | pytorch__pytorch-1404 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dropout documentation is incorrect
"Randomly zeroes some of the elements of the input tensor. The elements to zero are randomized on every forward call."
This is incorrect; the function also scales up by 1/(1-p), which the implementation correctly does.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/nn/modules/dropout.py`
Content:
```
1 from .module import Module
2 from .. import functional as F
3
4
5 class Dropout(Module):
6 r"""Randomly zeroes some of the elements of the input tensor.
7 The elements to zero are randomized on every forward call.
8
9 Args:
10 p: probability of an element to be zeroed. Default: 0.5
11 inplace: If set to True, will do this operation in-place. Default: false
12
13 Shape:
14 - Input: `Any`. Input can be of any shape
15 - Output: `Same`. Output is of the same shape as input
16
17 Examples::
18
19 >>> m = nn.Dropout(p=0.2)
20 >>> input = autograd.Variable(torch.randn(20, 16))
21 >>> output = m(input)
22 """
23
24 def __init__(self, p=0.5, inplace=False):
25 super(Dropout, self).__init__()
26 if p < 0 or p > 1:
27 raise ValueError("dropout probability has to be between 0 and 1, "
28 "but got {}".format(p))
29 self.p = p
30 self.inplace = inplace
31
32 def forward(self, input):
33 return F.dropout(input, self.p, self.training, self.inplace)
34
35 def __repr__(self):
36 inplace_str = ', inplace' if self.inplace else ''
37 return self.__class__.__name__ + ' (' \
38 + 'p = ' + str(self.p) \
39 + inplace_str + ')'
40
41
42 class Dropout2d(Module):
43 r"""Randomly zeroes whole channels of the input tensor.
44 The channels to zero-out are randomized on every forward call.
45
46 *Usually the input comes from Conv2d modules.*
47
48 As described in the paper
49 `Efficient Object Localization Using Convolutional Networks`_ ,
50 if adjacent pixels within feature maps are strongly correlated
51 (as is normally the case in early convolution layers) then iid dropout
52 will not regularize the activations and will otherwise just result
53 in an effective learning rate decrease.
54
55 In this case, :func:`nn.Dropout2d` will help promote independence between
56 feature maps and should be used instead.
57
58 Args:
59 p (float, optional): probability of an element to be zeroed.
60 inplace (bool, optional): If set to True, will do this operation in-place
61
62 Shape:
63 - Input: :math:`(N, C, H, W)`
64 - Output: :math:`(N, C, H, W)` (same shape as input)
65
66 Examples::
67
68 >>> m = nn.Dropout2d(p=0.2)
69 >>> input = autograd.Variable(torch.randn(20, 16, 32, 32))
70 >>> output = m(input)
71
72 .. _Efficient Object Localization Using Convolutional Networks:
73 http://arxiv.org/abs/1411.4280
74 """
75
76 def __init__(self, p=0.5, inplace=False):
77 super(Dropout2d, self).__init__()
78 if p < 0 or p > 1:
79 raise ValueError("dropout probability has to be between 0 and 1, "
80 "but got {}".format(p))
81 self.p = p
82 self.inplace = inplace
83
84 def forward(self, input):
85 return self._backend.Dropout2d(self.p, self.training, self.inplace)(input)
86
87 def __repr__(self):
88 inplace_str = ', inplace' if self.inplace else ''
89 return self.__class__.__name__ + ' (' \
90 + 'p=' + str(self.p) \
91 + inplace_str + ')'
92
93
94 class Dropout3d(Module):
95 r"""Randomly zeroes whole channels of the input tensor.
96 The channels to zero are randomized on every forward call.
97
98 *Usually the input comes from Conv3d modules.*
99
100 As described in the paper
101 `Efficient Object Localization Using Convolutional Networks`_ ,
102 if adjacent pixels within feature maps are strongly correlated
103 (as is normally the case in early convolution layers) then iid dropout
104 will not regularize the activations and will otherwise just result
105 in an effective learning rate decrease.
106
107 In this case, :func:`nn.Dropout3d` will help promote independence between
108 feature maps and should be used instead.
109
110 Args:
111 p (float, optional): probability of an element to be zeroed.
112 inplace (bool, optional): If set to True, will do this operation in-place
113
114 Shape:
115 - Input: :math:`(N, C, D, H, W)`
116 - Output: :math:`(N, C, D, H, W)` (same shape as input)
117
118 Examples::
119
120 >>> m = nn.Dropout3d(p=0.2)
121 >>> input = autograd.Variable(torch.randn(20, 16, 4, 32, 32))
122 >>> output = m(input)
123
124 .. _Efficient Object Localization Using Convolutional Networks:
125 http://arxiv.org/abs/1411.4280
126 """
127
128 def __init__(self, p=0.5, inplace=False):
129 super(Dropout3d, self).__init__()
130 if p < 0 or p > 1:
131 raise ValueError("dropout probability has to be between 0 and 1, "
132 "but got {}".format(p))
133 self.p = p
134 self.inplace = inplace
135
136 def forward(self, input):
137 return self._backend.Dropout3d(self.p, self.training, self.inplace)(input)
138
139 def __repr__(self):
140 inplace_str = ', inplace' if self.inplace else ''
141 return self.__class__.__name__ + ' (' \
142 + 'p=' + str(self.p) \
143 + inplace_str + ')'
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch/nn/modules/dropout.py b/torch/nn/modules/dropout.py
--- a/torch/nn/modules/dropout.py
+++ b/torch/nn/modules/dropout.py
@@ -3,9 +3,19 @@
class Dropout(Module):
- r"""Randomly zeroes some of the elements of the input tensor.
+ r"""During training, randomly zeroes some of the elements of the input
+ tensor with probability *p* using samples from a bernoulli distribution.
The elements to zero are randomized on every forward call.
+ This has proven to be an effective technique for regularization and
+ preventing the co-adaptation of neurons as described in the paper
+ `Improving neural networks by preventing co-adaptation of feature
+ detectors`_ .
+
+ Furthermore, the outputs are scaled by a factor of *1/(1-p)* during
+ training. This means that during evaluation the module simply computes an
+ identity function.
+
Args:
p: probability of an element to be zeroed. Default: 0.5
inplace: If set to True, will do this operation in-place. Default: false
@@ -19,6 +29,8 @@
>>> m = nn.Dropout(p=0.2)
>>> input = autograd.Variable(torch.randn(20, 16))
>>> output = m(input)
+
+ .. _Improving neural networks by preventing co-adaptation of feature detectors: https://arxiv.org/abs/1207.0580
"""
def __init__(self, p=0.5, inplace=False):
| {"golden_diff": "diff --git a/torch/nn/modules/dropout.py b/torch/nn/modules/dropout.py\n--- a/torch/nn/modules/dropout.py\n+++ b/torch/nn/modules/dropout.py\n@@ -3,9 +3,19 @@\n \n \n class Dropout(Module):\n- r\"\"\"Randomly zeroes some of the elements of the input tensor.\n+ r\"\"\"During training, randomly zeroes some of the elements of the input\n+ tensor with probability *p* using samples from a bernoulli distribution.\n The elements to zero are randomized on every forward call.\n \n+ This has proven to be an effective technique for regularization and\n+ preventing the co-adaptation of neurons as described in the paper\n+ `Improving neural networks by preventing co-adaptation of feature\n+ detectors`_ .\n+\n+ Furthermore, the outputs are scaled by a factor of *1/(1-p)* during\n+ training. This means that during evaluation the module simply computes an\n+ identity function.\n+\n Args:\n p: probability of an element to be zeroed. Default: 0.5\n inplace: If set to True, will do this operation in-place. Default: false\n@@ -19,6 +29,8 @@\n >>> m = nn.Dropout(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16))\n >>> output = m(input)\n+\n+ .. _Improving neural networks by preventing co-adaptation of feature detectors: https://arxiv.org/abs/1207.0580\n \"\"\"\n \n def __init__(self, p=0.5, inplace=False):\n", "issue": "Dropout documentation is incorrect\n\"Randomly zeroes some of the elements of the input tensor. The elements to zero are randomized on every forward call.\"\r\n\r\nThis is incorrect; the function also scales up by 1/(1-p), which the implementation correctly does.\n", "before_files": [{"content": "from .module import Module\nfrom .. import functional as F\n\n\nclass Dropout(Module):\n r\"\"\"Randomly zeroes some of the elements of the input tensor.\n The elements to zero are randomized on every forward call.\n\n Args:\n p: probability of an element to be zeroed. Default: 0.5\n inplace: If set to True, will do this operation in-place. Default: false\n\n Shape:\n - Input: `Any`. Input can be of any shape\n - Output: `Same`. Output is of the same shape as input\n\n Examples::\n\n >>> m = nn.Dropout(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16))\n >>> output = m(input)\n \"\"\"\n\n def __init__(self, p=0.5, inplace=False):\n super(Dropout, self).__init__()\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n self.p = p\n self.inplace = inplace\n\n def forward(self, input):\n return F.dropout(input, self.p, self.training, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'p = ' + str(self.p) \\\n + inplace_str + ')'\n\n\nclass Dropout2d(Module):\n r\"\"\"Randomly zeroes whole channels of the input tensor.\n The channels to zero-out are randomized on every forward call.\n\n *Usually the input comes from Conv2d modules.*\n\n As described in the paper\n `Efficient Object Localization Using Convolutional Networks`_ ,\n if adjacent pixels within feature maps are strongly correlated\n (as is normally the case in early convolution layers) then iid dropout\n will not regularize the activations and will otherwise just result\n in an effective learning rate decrease.\n\n In this case, :func:`nn.Dropout2d` will help promote independence between\n feature maps and should be used instead.\n\n Args:\n p (float, optional): probability of an element to be zeroed.\n inplace (bool, optional): If set to True, will do this operation in-place\n\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n\n Examples::\n\n >>> m = nn.Dropout2d(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16, 32, 32))\n >>> output = m(input)\n\n .. _Efficient Object Localization Using Convolutional Networks:\n http://arxiv.org/abs/1411.4280\n \"\"\"\n\n def __init__(self, p=0.5, inplace=False):\n super(Dropout2d, self).__init__()\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n self.p = p\n self.inplace = inplace\n\n def forward(self, input):\n return self._backend.Dropout2d(self.p, self.training, self.inplace)(input)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'p=' + str(self.p) \\\n + inplace_str + ')'\n\n\nclass Dropout3d(Module):\n r\"\"\"Randomly zeroes whole channels of the input tensor.\n The channels to zero are randomized on every forward call.\n\n *Usually the input comes from Conv3d modules.*\n\n As described in the paper\n `Efficient Object Localization Using Convolutional Networks`_ ,\n if adjacent pixels within feature maps are strongly correlated\n (as is normally the case in early convolution layers) then iid dropout\n will not regularize the activations and will otherwise just result\n in an effective learning rate decrease.\n\n In this case, :func:`nn.Dropout3d` will help promote independence between\n feature maps and should be used instead.\n\n Args:\n p (float, optional): probability of an element to be zeroed.\n inplace (bool, optional): If set to True, will do this operation in-place\n\n Shape:\n - Input: :math:`(N, C, D, H, W)`\n - Output: :math:`(N, C, D, H, W)` (same shape as input)\n\n Examples::\n\n >>> m = nn.Dropout3d(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16, 4, 32, 32))\n >>> output = m(input)\n\n .. _Efficient Object Localization Using Convolutional Networks:\n http://arxiv.org/abs/1411.4280\n \"\"\"\n\n def __init__(self, p=0.5, inplace=False):\n super(Dropout3d, self).__init__()\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n self.p = p\n self.inplace = inplace\n\n def forward(self, input):\n return self._backend.Dropout3d(self.p, self.training, self.inplace)(input)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'p=' + str(self.p) \\\n + inplace_str + ')'\n", "path": "torch/nn/modules/dropout.py"}], "after_files": [{"content": "from .module import Module\nfrom .. import functional as F\n\n\nclass Dropout(Module):\n r\"\"\"During training, randomly zeroes some of the elements of the input\n tensor with probability *p* using samples from a bernoulli distribution.\n The elements to zero are randomized on every forward call.\n\n This has proven to be an effective technique for regularization and\n preventing the co-adaptation of neurons as described in the paper\n `Improving neural networks by preventing co-adaptation of feature\n detectors`_ .\n\n Furthermore, the outputs are scaled by a factor of *1/(1-p)* during\n training. This means that during evaluation the module simply computes an\n identity function.\n\n Args:\n p: probability of an element to be zeroed. Default: 0.5\n inplace: If set to True, will do this operation in-place. Default: false\n\n Shape:\n - Input: `Any`. Input can be of any shape\n - Output: `Same`. Output is of the same shape as input\n\n Examples::\n\n >>> m = nn.Dropout(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16))\n >>> output = m(input)\n\n .. _Improving neural networks by preventing co-adaptation of feature detectors: https://arxiv.org/abs/1207.0580\n \"\"\"\n\n def __init__(self, p=0.5, inplace=False):\n super(Dropout, self).__init__()\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n self.p = p\n self.inplace = inplace\n\n def forward(self, input):\n return F.dropout(input, self.p, self.training, self.inplace)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'p = ' + str(self.p) \\\n + inplace_str + ')'\n\n\nclass Dropout2d(Module):\n r\"\"\"Randomly zeroes whole channels of the input tensor.\n The channels to zero-out are randomized on every forward call.\n\n *Usually the input comes from Conv2d modules.*\n\n As described in the paper\n `Efficient Object Localization Using Convolutional Networks`_ ,\n if adjacent pixels within feature maps are strongly correlated\n (as is normally the case in early convolution layers) then iid dropout\n will not regularize the activations and will otherwise just result\n in an effective learning rate decrease.\n\n In this case, :func:`nn.Dropout2d` will help promote independence between\n feature maps and should be used instead.\n\n Args:\n p (float, optional): probability of an element to be zeroed.\n inplace (bool, optional): If set to True, will do this operation in-place\n\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n\n Examples::\n\n >>> m = nn.Dropout2d(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16, 32, 32))\n >>> output = m(input)\n\n .. _Efficient Object Localization Using Convolutional Networks:\n http://arxiv.org/abs/1411.4280\n \"\"\"\n\n def __init__(self, p=0.5, inplace=False):\n super(Dropout2d, self).__init__()\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n self.p = p\n self.inplace = inplace\n\n def forward(self, input):\n return self._backend.Dropout2d(self.p, self.training, self.inplace)(input)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'p=' + str(self.p) \\\n + inplace_str + ')'\n\n\nclass Dropout3d(Module):\n r\"\"\"Randomly zeroes whole channels of the input tensor.\n The channels to zero are randomized on every forward call.\n\n *Usually the input comes from Conv3d modules.*\n\n As described in the paper\n `Efficient Object Localization Using Convolutional Networks`_ ,\n if adjacent pixels within feature maps are strongly correlated\n (as is normally the case in early convolution layers) then iid dropout\n will not regularize the activations and will otherwise just result\n in an effective learning rate decrease.\n\n In this case, :func:`nn.Dropout3d` will help promote independence between\n feature maps and should be used instead.\n\n Args:\n p (float, optional): probability of an element to be zeroed.\n inplace (bool, optional): If set to True, will do this operation in-place\n\n Shape:\n - Input: :math:`(N, C, D, H, W)`\n - Output: :math:`(N, C, D, H, W)` (same shape as input)\n\n Examples::\n\n >>> m = nn.Dropout3d(p=0.2)\n >>> input = autograd.Variable(torch.randn(20, 16, 4, 32, 32))\n >>> output = m(input)\n\n .. _Efficient Object Localization Using Convolutional Networks:\n http://arxiv.org/abs/1411.4280\n \"\"\"\n\n def __init__(self, p=0.5, inplace=False):\n super(Dropout3d, self).__init__()\n if p < 0 or p > 1:\n raise ValueError(\"dropout probability has to be between 0 and 1, \"\n \"but got {}\".format(p))\n self.p = p\n self.inplace = inplace\n\n def forward(self, input):\n return self._backend.Dropout3d(self.p, self.training, self.inplace)(input)\n\n def __repr__(self):\n inplace_str = ', inplace' if self.inplace else ''\n return self.__class__.__name__ + ' (' \\\n + 'p=' + str(self.p) \\\n + inplace_str + ')'\n", "path": "torch/nn/modules/dropout.py"}]} | 1,905 | 366 |
gh_patches_debug_42538 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-300 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Volume change should not be spoken every time
When changing the volume with hardware it should not speak the result but rather play a sound.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/skills/volume/__init__.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import time
20 from alsaaudio import Mixer
21 from os.path import dirname
22
23 from adapt.intent import IntentBuilder
24 from mycroft.skills.core import MycroftSkill
25 from mycroft.util.log import getLogger
26
27 __author__ = 'jdorleans'
28
29 LOGGER = getLogger(__name__)
30
31
32 class VolumeSkill(MycroftSkill):
33 VOLUMES = {0: 0, 1: 15, 2: 25, 3: 35, 4: 45, 5: 55, 6: 65, 7: 70, 8: 80,
34 9: 90, 10: 95, 11: 100}
35
36 def __init__(self):
37 super(VolumeSkill, self).__init__(name="VolumeSkill")
38 self.default_volume = int(self.config.get('default_volume'))
39
40 def initialize(self):
41 self.load_data_files(dirname(__file__))
42 self.__build_set_volume()
43
44 def __build_set_volume(self):
45 intent = IntentBuilder("SetVolumeIntent").require(
46 "VolumeKeyword").require("VolumeAmount").build()
47 self.register_intent(intent, self.handle_set_volume)
48
49 intent = IntentBuilder("IncreaseVolumeIntent").require(
50 "IncreaseVolumeKeyword").build()
51 self.register_intent(intent, self.handle_increase_volume)
52
53 intent = IntentBuilder("DecreaseVolumeIntent").require(
54 "DecreaseVolumeKeyword").build()
55 self.register_intent(intent, self.handle_decrease_volume)
56
57 intent = IntentBuilder("MuteVolumeIntent").require(
58 "MuteVolumeKeyword").build()
59 self.register_intent(intent, self.handle_mute_volume)
60
61 intent = IntentBuilder("ResetVolumeIntent").require(
62 "ResetVolumeKeyword").build()
63 self.register_intent(intent, self.handle_reset_volume)
64
65 def handle_set_volume(self, message):
66 mixer = Mixer()
67 code, volume = self.get_volume(message, mixer.getvolume()[0])
68 mixer.setvolume(volume)
69 self.speak_dialog('set.volume', data={'volume': code})
70
71 def handle_increase_volume(self, message):
72 code, volume = self.__update_volume(1)
73 self.speak_dialog('increase.volume', data={'volume': code})
74
75 def handle_decrease_volume(self, message):
76 code, volume = self.__update_volume(-1)
77 self.speak_dialog('decrease.volume', data={'volume': code})
78
79 def handle_mute_volume(self, message):
80 self.speak_dialog('mute.volume')
81 time.sleep(2)
82 Mixer().setvolume(0)
83
84 def handle_reset_volume(self, message):
85 Mixer().setvolume(self.default_volume)
86 self.speak_dialog(
87 'reset.volume',
88 data={'volume': self.get_volume_code(self.default_volume)})
89
90 def __update_volume(self, level=0):
91 mixer = Mixer()
92 volume = mixer.getvolume()[0]
93 code = self.get_volume_code(volume) + level
94 code = self.fix_code(code)
95 if code in self.VOLUMES:
96 volume = self.VOLUMES[code]
97 mixer.setvolume(volume)
98 return code, volume
99
100 def get_volume(self, message, default=None):
101 amount = message.metadata.get('VolumeAmount', default)
102 if not amount:
103 return self.default_volume
104
105 if amount in ['loud']:
106 amount = 9
107 elif amount in ['normal']:
108 amount = 6
109 elif amount in ['quiet']:
110 amount = 3
111 elif amount in ['two']:
112 amount = 2
113 elif amount in ['one']:
114 amount = 1
115 elif amount in ['zero']:
116 amount = 0
117 else:
118 try:
119 amount = int(amount)
120 except:
121 return self.default_volume
122
123 amount = self.fix_code(amount)
124 return amount, self.VOLUMES[amount]
125
126 def get_volume_code(self, volume):
127 for k, v in self.VOLUMES.iteritems():
128 if volume <= v:
129 return k
130
131 @staticmethod
132 def fix_code(code):
133 if code > 11:
134 code = 11
135 elif code < 0:
136 code = 0
137 return code
138
139 def stop(self):
140 pass
141
142
143 def create_skill():
144 return VolumeSkill()
145
```
Path: `mycroft/client/enclosure/enclosure.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import sys
20 from Queue import Queue
21 from threading import Thread
22
23 import serial
24
25 from mycroft.client.enclosure.arduino import EnclosureArduino
26 from mycroft.client.enclosure.eyes import EnclosureEyes
27 from mycroft.client.enclosure.mouth import EnclosureMouth
28 from mycroft.client.enclosure.weather import EnclosureWeather
29 from mycroft.configuration import ConfigurationManager
30 from mycroft.messagebus.client.ws import WebsocketClient
31 from mycroft.messagebus.message import Message
32 from mycroft.util import kill
33 from mycroft.util.log import getLogger
34
35 __author__ = 'aatchison + jdorleans + iward'
36
37 LOGGER = getLogger("EnclosureClient")
38
39
40 class EnclosureReader(Thread):
41 """
42 Reads data from Serial port.
43
44 Listens to all commands sent by Arduino that must be be performed on
45 Mycroft Core.
46
47 E.g. Mycroft Stop Feature
48 #. Arduino sends a Stop command after a button press on a Mycroft unit
49 #. ``EnclosureReader`` captures the Stop command
50 #. Notify all Mycroft Core processes (e.g. skills) to be stopped
51
52 Note: A command is identified by a line break
53 """
54
55 def __init__(self, serial, client):
56 super(EnclosureReader, self).__init__(target=self.read)
57 self.alive = True
58 self.daemon = True
59 self.serial = serial
60 self.client = client
61 self.start()
62
63 def read(self):
64 while self.alive:
65 try:
66 data = self.serial.readline()[:-2]
67 if data:
68 self.process(data)
69 LOGGER.info("Reading: " + data)
70 except Exception as e:
71 LOGGER.error("Reading error: {0}".format(e))
72
73 def process(self, data):
74 self.client.emit(Message(data))
75
76 if "mycroft.stop" in data:
77 self.client.emit(Message("mycroft.stop"))
78 kill(['mimic']) # TODO - Refactoring in favor of Mycroft Stop
79
80 if "volume.up" in data:
81 self.client.emit(Message("IncreaseVolumeIntent"))
82
83 if "volume.down" in data:
84 self.client.emit(Message("DecreaseVolumeIntent"))
85
86 def stop(self):
87 self.alive = False
88 self.join()
89
90
91 class EnclosureWriter(Thread):
92 """
93 Writes data to Serial port.
94 #. Enqueues all commands received from Mycroft enclosures
95 implementation
96 #. Process them on the received order by writing on the Serial port
97
98 E.g. Displaying a text on Mycroft's Mouth
99 #. ``EnclosureMouth`` sends a text command
100 #. ``EnclosureWriter`` captures and enqueue the command
101 #. ``EnclosureWriter`` removes the next command from the queue
102 #. ``EnclosureWriter`` writes the command to Serial port
103
104 Note: A command has to end with a line break
105 """
106
107 def __init__(self, serial, client, size=16):
108 super(EnclosureWriter, self).__init__(target=self.flush)
109 self.alive = True
110 self.daemon = True
111 self.serial = serial
112 self.client = client
113 self.commands = Queue(size)
114 self.start()
115
116 def flush(self):
117 while self.alive:
118 try:
119 cmd = self.commands.get()
120 self.serial.write(cmd + '\n')
121 LOGGER.info("Writing: " + cmd)
122 self.commands.task_done()
123 except Exception as e:
124 LOGGER.error("Writing error: {0}".format(e))
125
126 def write(self, command):
127 self.commands.put(str(command))
128
129 def stop(self):
130 self.alive = False
131 self.join()
132
133
134 class Enclosure:
135 """
136 Serves as a communication interface between Arduino and Mycroft Core.
137
138 ``Enclosure`` initializes and aggregates all enclosures implementation.
139
140 E.g. ``EnclosureEyes``, ``EnclosureMouth`` and ``EnclosureArduino``
141
142 It also listens to the basis events in order to perform those core actions
143 on the unit.
144
145 E.g. Start and Stop talk animation
146 """
147
148 def __init__(self):
149 self.__init_serial()
150 self.client = WebsocketClient()
151 self.reader = EnclosureReader(self.serial, self.client)
152 self.writer = EnclosureWriter(self.serial, self.client)
153 self.eyes = EnclosureEyes(self.client, self.writer)
154 self.mouth = EnclosureMouth(self.client, self.writer)
155 self.system = EnclosureArduino(self.client, self.writer)
156 self.weather = EnclosureWeather(self.client, self.writer)
157 self.__register_events()
158
159 def __init_serial(self):
160 try:
161 self.config = ConfigurationManager.get().get("enclosure")
162 self.port = self.config.get("port")
163 self.rate = int(self.config.get("rate"))
164 self.timeout = int(self.config.get("timeout"))
165 self.serial = serial.serial_for_url(
166 url=self.port, baudrate=self.rate, timeout=self.timeout)
167 LOGGER.info(
168 "Connected to: " + self.port + " rate: " + str(self.rate) +
169 " timeout: " + str(self.timeout))
170 except:
171 LOGGER.error(
172 "It is not possible to connect to serial port: " + self.port)
173 raise
174
175 def __register_events(self):
176 self.client.on('mycroft.paired', self.__update_events)
177 self.client.on('enclosure.mouth.listeners', self.__mouth_listeners)
178 self.__register_mouth_events()
179
180 def __mouth_listeners(self, event=None):
181 if event and event.metadata:
182 active = event.metadata['active']
183 if active:
184 self.__register_mouth_events()
185 else:
186 self.__remove_mouth_events()
187
188 def __register_mouth_events(self):
189 self.client.on('recognizer_loop:record_begin', self.mouth.listen)
190 self.client.on('recognizer_loop:record_end', self.mouth.reset)
191 self.client.on('recognizer_loop:audio_output_start', self.mouth.talk)
192 self.client.on('recognizer_loop:audio_output_end', self.mouth.reset)
193
194 def __remove_mouth_events(self):
195 self.client.remove('recognizer_loop:record_begin', self.mouth.listen)
196 self.client.remove('recognizer_loop:record_end', self.mouth.reset)
197 self.client.remove('recognizer_loop:audio_output_start',
198 self.mouth.talk)
199 self.client.remove('recognizer_loop:audio_output_end',
200 self.mouth.reset)
201
202 def __update_events(self, event=None):
203 if event and event.metadata:
204 if event.metadata.get('paired', False):
205 self.__register_mouth_events()
206 else:
207 self.__remove_mouth_events()
208
209 def run(self):
210 try:
211 self.client.run_forever()
212 except Exception as e:
213 LOGGER.error("Client error: {0}".format(e))
214 self.stop()
215
216 def stop(self):
217 self.writer.stop()
218 self.reader.stop()
219 self.serial.close()
220
221
222 def main():
223 try:
224 Enclosure().run()
225 finally:
226 sys.exit()
227
228
229 if __name__ == "__main__":
230 main()
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mycroft/client/enclosure/enclosure.py b/mycroft/client/enclosure/enclosure.py
--- a/mycroft/client/enclosure/enclosure.py
+++ b/mycroft/client/enclosure/enclosure.py
@@ -78,10 +78,12 @@
kill(['mimic']) # TODO - Refactoring in favor of Mycroft Stop
if "volume.up" in data:
- self.client.emit(Message("IncreaseVolumeIntent"))
+ self.client.emit(
+ Message("IncreaseVolumeIntent", metadata={'play_sound': True}))
if "volume.down" in data:
- self.client.emit(Message("DecreaseVolumeIntent"))
+ self.client.emit(
+ Message("DecreaseVolumeIntent", metadata={'play_sound': True}))
def stop(self):
self.alive = False
diff --git a/mycroft/skills/volume/__init__.py b/mycroft/skills/volume/__init__.py
--- a/mycroft/skills/volume/__init__.py
+++ b/mycroft/skills/volume/__init__.py
@@ -18,10 +18,13 @@
import time
from alsaaudio import Mixer
-from os.path import dirname
+from os.path import dirname, join
from adapt.intent import IntentBuilder
+
+from mycroft.client.enclosure import enclosure
from mycroft.skills.core import MycroftSkill
+from mycroft.util import play_wav
from mycroft.util.log import getLogger
__author__ = 'jdorleans'
@@ -36,6 +39,7 @@
def __init__(self):
super(VolumeSkill, self).__init__(name="VolumeSkill")
self.default_volume = int(self.config.get('default_volume'))
+ self.volume_sound = join(dirname(__file__), "blop-mark-diangelo.wav")
def initialize(self):
self.load_data_files(dirname(__file__))
@@ -68,13 +72,23 @@
mixer.setvolume(volume)
self.speak_dialog('set.volume', data={'volume': code})
+ def communicate_volume_change(self, message, dialog, code, changed):
+ play_sound = message.metadata.get('play_sound', False)
+ if play_sound:
+ if changed:
+ play_wav(self.volume_sound)
+ else:
+ if not changed:
+ dialog = 'already.max.volume'
+ self.speak_dialog(dialog, data={'volume': code})
+
def handle_increase_volume(self, message):
- code, volume = self.__update_volume(1)
- self.speak_dialog('increase.volume', data={'volume': code})
+ self.communicate_volume_change(message, 'increase.volume',
+ *self.__update_volume(+1))
def handle_decrease_volume(self, message):
- code, volume = self.__update_volume(-1)
- self.speak_dialog('decrease.volume', data={'volume': code})
+ self.communicate_volume_change(message, 'decrease.volume',
+ *self.__update_volume(-1))
def handle_mute_volume(self, message):
self.speak_dialog('mute.volume')
@@ -88,14 +102,20 @@
data={'volume': self.get_volume_code(self.default_volume)})
def __update_volume(self, level=0):
+ """
+ Tries to change volume level
+ :param level: +1 or -1; the step to change by
+ :return: new code (0..11), whether volume changed
+ """
mixer = Mixer()
volume = mixer.getvolume()[0]
- code = self.get_volume_code(volume) + level
- code = self.fix_code(code)
- if code in self.VOLUMES:
- volume = self.VOLUMES[code]
+ old_code = self.get_volume_code(volume)
+
+ new_code = self.fix_code(old_code + level)
+ if new_code in self.VOLUMES:
+ volume = self.VOLUMES[new_code]
mixer.setvolume(volume)
- return code, volume
+ return new_code, new_code != old_code
def get_volume(self, message, default=None):
amount = message.metadata.get('VolumeAmount', default)
| {"golden_diff": "diff --git a/mycroft/client/enclosure/enclosure.py b/mycroft/client/enclosure/enclosure.py\n--- a/mycroft/client/enclosure/enclosure.py\n+++ b/mycroft/client/enclosure/enclosure.py\n@@ -78,10 +78,12 @@\n kill(['mimic']) # TODO - Refactoring in favor of Mycroft Stop\n \n if \"volume.up\" in data:\n- self.client.emit(Message(\"IncreaseVolumeIntent\"))\n+ self.client.emit(\n+ Message(\"IncreaseVolumeIntent\", metadata={'play_sound': True}))\n \n if \"volume.down\" in data:\n- self.client.emit(Message(\"DecreaseVolumeIntent\"))\n+ self.client.emit(\n+ Message(\"DecreaseVolumeIntent\", metadata={'play_sound': True}))\n \n def stop(self):\n self.alive = False\ndiff --git a/mycroft/skills/volume/__init__.py b/mycroft/skills/volume/__init__.py\n--- a/mycroft/skills/volume/__init__.py\n+++ b/mycroft/skills/volume/__init__.py\n@@ -18,10 +18,13 @@\n \n import time\n from alsaaudio import Mixer\n-from os.path import dirname\n+from os.path import dirname, join\n \n from adapt.intent import IntentBuilder\n+\n+from mycroft.client.enclosure import enclosure\n from mycroft.skills.core import MycroftSkill\n+from mycroft.util import play_wav\n from mycroft.util.log import getLogger\n \n __author__ = 'jdorleans'\n@@ -36,6 +39,7 @@\n def __init__(self):\n super(VolumeSkill, self).__init__(name=\"VolumeSkill\")\n self.default_volume = int(self.config.get('default_volume'))\n+ self.volume_sound = join(dirname(__file__), \"blop-mark-diangelo.wav\")\n \n def initialize(self):\n self.load_data_files(dirname(__file__))\n@@ -68,13 +72,23 @@\n mixer.setvolume(volume)\n self.speak_dialog('set.volume', data={'volume': code})\n \n+ def communicate_volume_change(self, message, dialog, code, changed):\n+ play_sound = message.metadata.get('play_sound', False)\n+ if play_sound:\n+ if changed:\n+ play_wav(self.volume_sound)\n+ else:\n+ if not changed:\n+ dialog = 'already.max.volume'\n+ self.speak_dialog(dialog, data={'volume': code})\n+\n def handle_increase_volume(self, message):\n- code, volume = self.__update_volume(1)\n- self.speak_dialog('increase.volume', data={'volume': code})\n+ self.communicate_volume_change(message, 'increase.volume',\n+ *self.__update_volume(+1))\n \n def handle_decrease_volume(self, message):\n- code, volume = self.__update_volume(-1)\n- self.speak_dialog('decrease.volume', data={'volume': code})\n+ self.communicate_volume_change(message, 'decrease.volume',\n+ *self.__update_volume(-1))\n \n def handle_mute_volume(self, message):\n self.speak_dialog('mute.volume')\n@@ -88,14 +102,20 @@\n data={'volume': self.get_volume_code(self.default_volume)})\n \n def __update_volume(self, level=0):\n+ \"\"\"\n+ Tries to change volume level\n+ :param level: +1 or -1; the step to change by\n+ :return: new code (0..11), whether volume changed\n+ \"\"\"\n mixer = Mixer()\n volume = mixer.getvolume()[0]\n- code = self.get_volume_code(volume) + level\n- code = self.fix_code(code)\n- if code in self.VOLUMES:\n- volume = self.VOLUMES[code]\n+ old_code = self.get_volume_code(volume)\n+\n+ new_code = self.fix_code(old_code + level)\n+ if new_code in self.VOLUMES:\n+ volume = self.VOLUMES[new_code]\n mixer.setvolume(volume)\n- return code, volume\n+ return new_code, new_code != old_code\n \n def get_volume(self, message, default=None):\n amount = message.metadata.get('VolumeAmount', default)\n", "issue": "Volume change should not be spoken every time\nWhen changing the volume with hardware it should not speak the result but rather play a sound.\n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport time\nfrom alsaaudio import Mixer\nfrom os.path import dirname\n\nfrom adapt.intent import IntentBuilder\nfrom mycroft.skills.core import MycroftSkill\nfrom mycroft.util.log import getLogger\n\n__author__ = 'jdorleans'\n\nLOGGER = getLogger(__name__)\n\n\nclass VolumeSkill(MycroftSkill):\n VOLUMES = {0: 0, 1: 15, 2: 25, 3: 35, 4: 45, 5: 55, 6: 65, 7: 70, 8: 80,\n 9: 90, 10: 95, 11: 100}\n\n def __init__(self):\n super(VolumeSkill, self).__init__(name=\"VolumeSkill\")\n self.default_volume = int(self.config.get('default_volume'))\n\n def initialize(self):\n self.load_data_files(dirname(__file__))\n self.__build_set_volume()\n\n def __build_set_volume(self):\n intent = IntentBuilder(\"SetVolumeIntent\").require(\n \"VolumeKeyword\").require(\"VolumeAmount\").build()\n self.register_intent(intent, self.handle_set_volume)\n\n intent = IntentBuilder(\"IncreaseVolumeIntent\").require(\n \"IncreaseVolumeKeyword\").build()\n self.register_intent(intent, self.handle_increase_volume)\n\n intent = IntentBuilder(\"DecreaseVolumeIntent\").require(\n \"DecreaseVolumeKeyword\").build()\n self.register_intent(intent, self.handle_decrease_volume)\n\n intent = IntentBuilder(\"MuteVolumeIntent\").require(\n \"MuteVolumeKeyword\").build()\n self.register_intent(intent, self.handle_mute_volume)\n\n intent = IntentBuilder(\"ResetVolumeIntent\").require(\n \"ResetVolumeKeyword\").build()\n self.register_intent(intent, self.handle_reset_volume)\n\n def handle_set_volume(self, message):\n mixer = Mixer()\n code, volume = self.get_volume(message, mixer.getvolume()[0])\n mixer.setvolume(volume)\n self.speak_dialog('set.volume', data={'volume': code})\n\n def handle_increase_volume(self, message):\n code, volume = self.__update_volume(1)\n self.speak_dialog('increase.volume', data={'volume': code})\n\n def handle_decrease_volume(self, message):\n code, volume = self.__update_volume(-1)\n self.speak_dialog('decrease.volume', data={'volume': code})\n\n def handle_mute_volume(self, message):\n self.speak_dialog('mute.volume')\n time.sleep(2)\n Mixer().setvolume(0)\n\n def handle_reset_volume(self, message):\n Mixer().setvolume(self.default_volume)\n self.speak_dialog(\n 'reset.volume',\n data={'volume': self.get_volume_code(self.default_volume)})\n\n def __update_volume(self, level=0):\n mixer = Mixer()\n volume = mixer.getvolume()[0]\n code = self.get_volume_code(volume) + level\n code = self.fix_code(code)\n if code in self.VOLUMES:\n volume = self.VOLUMES[code]\n mixer.setvolume(volume)\n return code, volume\n\n def get_volume(self, message, default=None):\n amount = message.metadata.get('VolumeAmount', default)\n if not amount:\n return self.default_volume\n\n if amount in ['loud']:\n amount = 9\n elif amount in ['normal']:\n amount = 6\n elif amount in ['quiet']:\n amount = 3\n elif amount in ['two']:\n amount = 2\n elif amount in ['one']:\n amount = 1\n elif amount in ['zero']:\n amount = 0\n else:\n try:\n amount = int(amount)\n except:\n return self.default_volume\n\n amount = self.fix_code(amount)\n return amount, self.VOLUMES[amount]\n\n def get_volume_code(self, volume):\n for k, v in self.VOLUMES.iteritems():\n if volume <= v:\n return k\n\n @staticmethod\n def fix_code(code):\n if code > 11:\n code = 11\n elif code < 0:\n code = 0\n return code\n\n def stop(self):\n pass\n\n\ndef create_skill():\n return VolumeSkill()\n", "path": "mycroft/skills/volume/__init__.py"}, {"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport sys\nfrom Queue import Queue\nfrom threading import Thread\n\nimport serial\n\nfrom mycroft.client.enclosure.arduino import EnclosureArduino\nfrom mycroft.client.enclosure.eyes import EnclosureEyes\nfrom mycroft.client.enclosure.mouth import EnclosureMouth\nfrom mycroft.client.enclosure.weather import EnclosureWeather\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.messagebus.client.ws import WebsocketClient\nfrom mycroft.messagebus.message import Message\nfrom mycroft.util import kill\nfrom mycroft.util.log import getLogger\n\n__author__ = 'aatchison + jdorleans + iward'\n\nLOGGER = getLogger(\"EnclosureClient\")\n\n\nclass EnclosureReader(Thread):\n \"\"\"\n Reads data from Serial port.\n\n Listens to all commands sent by Arduino that must be be performed on\n Mycroft Core.\n\n E.g. Mycroft Stop Feature\n #. Arduino sends a Stop command after a button press on a Mycroft unit\n #. ``EnclosureReader`` captures the Stop command\n #. Notify all Mycroft Core processes (e.g. skills) to be stopped\n\n Note: A command is identified by a line break\n \"\"\"\n\n def __init__(self, serial, client):\n super(EnclosureReader, self).__init__(target=self.read)\n self.alive = True\n self.daemon = True\n self.serial = serial\n self.client = client\n self.start()\n\n def read(self):\n while self.alive:\n try:\n data = self.serial.readline()[:-2]\n if data:\n self.process(data)\n LOGGER.info(\"Reading: \" + data)\n except Exception as e:\n LOGGER.error(\"Reading error: {0}\".format(e))\n\n def process(self, data):\n self.client.emit(Message(data))\n\n if \"mycroft.stop\" in data:\n self.client.emit(Message(\"mycroft.stop\"))\n kill(['mimic']) # TODO - Refactoring in favor of Mycroft Stop\n\n if \"volume.up\" in data:\n self.client.emit(Message(\"IncreaseVolumeIntent\"))\n\n if \"volume.down\" in data:\n self.client.emit(Message(\"DecreaseVolumeIntent\"))\n\n def stop(self):\n self.alive = False\n self.join()\n\n\nclass EnclosureWriter(Thread):\n \"\"\"\n Writes data to Serial port.\n #. Enqueues all commands received from Mycroft enclosures\n implementation\n #. Process them on the received order by writing on the Serial port\n\n E.g. Displaying a text on Mycroft's Mouth\n #. ``EnclosureMouth`` sends a text command\n #. ``EnclosureWriter`` captures and enqueue the command\n #. ``EnclosureWriter`` removes the next command from the queue\n #. ``EnclosureWriter`` writes the command to Serial port\n\n Note: A command has to end with a line break\n \"\"\"\n\n def __init__(self, serial, client, size=16):\n super(EnclosureWriter, self).__init__(target=self.flush)\n self.alive = True\n self.daemon = True\n self.serial = serial\n self.client = client\n self.commands = Queue(size)\n self.start()\n\n def flush(self):\n while self.alive:\n try:\n cmd = self.commands.get()\n self.serial.write(cmd + '\\n')\n LOGGER.info(\"Writing: \" + cmd)\n self.commands.task_done()\n except Exception as e:\n LOGGER.error(\"Writing error: {0}\".format(e))\n\n def write(self, command):\n self.commands.put(str(command))\n\n def stop(self):\n self.alive = False\n self.join()\n\n\nclass Enclosure:\n \"\"\"\n Serves as a communication interface between Arduino and Mycroft Core.\n\n ``Enclosure`` initializes and aggregates all enclosures implementation.\n\n E.g. ``EnclosureEyes``, ``EnclosureMouth`` and ``EnclosureArduino``\n\n It also listens to the basis events in order to perform those core actions\n on the unit.\n\n E.g. Start and Stop talk animation\n \"\"\"\n\n def __init__(self):\n self.__init_serial()\n self.client = WebsocketClient()\n self.reader = EnclosureReader(self.serial, self.client)\n self.writer = EnclosureWriter(self.serial, self.client)\n self.eyes = EnclosureEyes(self.client, self.writer)\n self.mouth = EnclosureMouth(self.client, self.writer)\n self.system = EnclosureArduino(self.client, self.writer)\n self.weather = EnclosureWeather(self.client, self.writer)\n self.__register_events()\n\n def __init_serial(self):\n try:\n self.config = ConfigurationManager.get().get(\"enclosure\")\n self.port = self.config.get(\"port\")\n self.rate = int(self.config.get(\"rate\"))\n self.timeout = int(self.config.get(\"timeout\"))\n self.serial = serial.serial_for_url(\n url=self.port, baudrate=self.rate, timeout=self.timeout)\n LOGGER.info(\n \"Connected to: \" + self.port + \" rate: \" + str(self.rate) +\n \" timeout: \" + str(self.timeout))\n except:\n LOGGER.error(\n \"It is not possible to connect to serial port: \" + self.port)\n raise\n\n def __register_events(self):\n self.client.on('mycroft.paired', self.__update_events)\n self.client.on('enclosure.mouth.listeners', self.__mouth_listeners)\n self.__register_mouth_events()\n\n def __mouth_listeners(self, event=None):\n if event and event.metadata:\n active = event.metadata['active']\n if active:\n self.__register_mouth_events()\n else:\n self.__remove_mouth_events()\n\n def __register_mouth_events(self):\n self.client.on('recognizer_loop:record_begin', self.mouth.listen)\n self.client.on('recognizer_loop:record_end', self.mouth.reset)\n self.client.on('recognizer_loop:audio_output_start', self.mouth.talk)\n self.client.on('recognizer_loop:audio_output_end', self.mouth.reset)\n\n def __remove_mouth_events(self):\n self.client.remove('recognizer_loop:record_begin', self.mouth.listen)\n self.client.remove('recognizer_loop:record_end', self.mouth.reset)\n self.client.remove('recognizer_loop:audio_output_start',\n self.mouth.talk)\n self.client.remove('recognizer_loop:audio_output_end',\n self.mouth.reset)\n\n def __update_events(self, event=None):\n if event and event.metadata:\n if event.metadata.get('paired', False):\n self.__register_mouth_events()\n else:\n self.__remove_mouth_events()\n\n def run(self):\n try:\n self.client.run_forever()\n except Exception as e:\n LOGGER.error(\"Client error: {0}\".format(e))\n self.stop()\n\n def stop(self):\n self.writer.stop()\n self.reader.stop()\n self.serial.close()\n\n\ndef main():\n try:\n Enclosure().run()\n finally:\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mycroft/client/enclosure/enclosure.py"}], "after_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport time\nfrom alsaaudio import Mixer\nfrom os.path import dirname, join\n\nfrom adapt.intent import IntentBuilder\n\nfrom mycroft.client.enclosure import enclosure\nfrom mycroft.skills.core import MycroftSkill\nfrom mycroft.util import play_wav\nfrom mycroft.util.log import getLogger\n\n__author__ = 'jdorleans'\n\nLOGGER = getLogger(__name__)\n\n\nclass VolumeSkill(MycroftSkill):\n VOLUMES = {0: 0, 1: 15, 2: 25, 3: 35, 4: 45, 5: 55, 6: 65, 7: 70, 8: 80,\n 9: 90, 10: 95, 11: 100}\n\n def __init__(self):\n super(VolumeSkill, self).__init__(name=\"VolumeSkill\")\n self.default_volume = int(self.config.get('default_volume'))\n self.volume_sound = join(dirname(__file__), \"blop-mark-diangelo.wav\")\n\n def initialize(self):\n self.load_data_files(dirname(__file__))\n self.__build_set_volume()\n\n def __build_set_volume(self):\n intent = IntentBuilder(\"SetVolumeIntent\").require(\n \"VolumeKeyword\").require(\"VolumeAmount\").build()\n self.register_intent(intent, self.handle_set_volume)\n\n intent = IntentBuilder(\"IncreaseVolumeIntent\").require(\n \"IncreaseVolumeKeyword\").build()\n self.register_intent(intent, self.handle_increase_volume)\n\n intent = IntentBuilder(\"DecreaseVolumeIntent\").require(\n \"DecreaseVolumeKeyword\").build()\n self.register_intent(intent, self.handle_decrease_volume)\n\n intent = IntentBuilder(\"MuteVolumeIntent\").require(\n \"MuteVolumeKeyword\").build()\n self.register_intent(intent, self.handle_mute_volume)\n\n intent = IntentBuilder(\"ResetVolumeIntent\").require(\n \"ResetVolumeKeyword\").build()\n self.register_intent(intent, self.handle_reset_volume)\n\n def handle_set_volume(self, message):\n mixer = Mixer()\n code, volume = self.get_volume(message, mixer.getvolume()[0])\n mixer.setvolume(volume)\n self.speak_dialog('set.volume', data={'volume': code})\n\n def communicate_volume_change(self, message, dialog, code, changed):\n play_sound = message.metadata.get('play_sound', False)\n if play_sound:\n if changed:\n play_wav(self.volume_sound)\n else:\n if not changed:\n dialog = 'already.max.volume'\n self.speak_dialog(dialog, data={'volume': code})\n\n def handle_increase_volume(self, message):\n self.communicate_volume_change(message, 'increase.volume',\n *self.__update_volume(+1))\n\n def handle_decrease_volume(self, message):\n self.communicate_volume_change(message, 'decrease.volume',\n *self.__update_volume(-1))\n\n def handle_mute_volume(self, message):\n self.speak_dialog('mute.volume')\n time.sleep(2)\n Mixer().setvolume(0)\n\n def handle_reset_volume(self, message):\n Mixer().setvolume(self.default_volume)\n self.speak_dialog(\n 'reset.volume',\n data={'volume': self.get_volume_code(self.default_volume)})\n\n def __update_volume(self, level=0):\n \"\"\"\n Tries to change volume level\n :param level: +1 or -1; the step to change by\n :return: new code (0..11), whether volume changed\n \"\"\"\n mixer = Mixer()\n volume = mixer.getvolume()[0]\n old_code = self.get_volume_code(volume)\n\n new_code = self.fix_code(old_code + level)\n if new_code in self.VOLUMES:\n volume = self.VOLUMES[new_code]\n mixer.setvolume(volume)\n return new_code, new_code != old_code\n\n def get_volume(self, message, default=None):\n amount = message.metadata.get('VolumeAmount', default)\n if not amount:\n return self.default_volume\n\n if amount in ['loud']:\n amount = 9\n elif amount in ['normal']:\n amount = 6\n elif amount in ['quiet']:\n amount = 3\n elif amount in ['two']:\n amount = 2\n elif amount in ['one']:\n amount = 1\n elif amount in ['zero']:\n amount = 0\n else:\n try:\n amount = int(amount)\n except:\n return self.default_volume\n\n amount = self.fix_code(amount)\n return amount, self.VOLUMES[amount]\n\n def get_volume_code(self, volume):\n for k, v in self.VOLUMES.iteritems():\n if volume <= v:\n return k\n\n @staticmethod\n def fix_code(code):\n if code > 11:\n code = 11\n elif code < 0:\n code = 0\n return code\n\n def stop(self):\n pass\n\n\ndef create_skill():\n return VolumeSkill()\n", "path": "mycroft/skills/volume/__init__.py"}, {"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport sys\nfrom Queue import Queue\nfrom threading import Thread\n\nimport serial\n\nfrom mycroft.client.enclosure.arduino import EnclosureArduino\nfrom mycroft.client.enclosure.eyes import EnclosureEyes\nfrom mycroft.client.enclosure.mouth import EnclosureMouth\nfrom mycroft.client.enclosure.weather import EnclosureWeather\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.messagebus.client.ws import WebsocketClient\nfrom mycroft.messagebus.message import Message\nfrom mycroft.util import kill\nfrom mycroft.util.log import getLogger\n\n__author__ = 'aatchison + jdorleans + iward'\n\nLOGGER = getLogger(\"EnclosureClient\")\n\n\nclass EnclosureReader(Thread):\n \"\"\"\n Reads data from Serial port.\n\n Listens to all commands sent by Arduino that must be be performed on\n Mycroft Core.\n\n E.g. Mycroft Stop Feature\n #. Arduino sends a Stop command after a button press on a Mycroft unit\n #. ``EnclosureReader`` captures the Stop command\n #. Notify all Mycroft Core processes (e.g. skills) to be stopped\n\n Note: A command is identified by a line break\n \"\"\"\n\n def __init__(self, serial, client):\n super(EnclosureReader, self).__init__(target=self.read)\n self.alive = True\n self.daemon = True\n self.serial = serial\n self.client = client\n self.start()\n\n def read(self):\n while self.alive:\n try:\n data = self.serial.readline()[:-2]\n if data:\n self.process(data)\n LOGGER.info(\"Reading: \" + data)\n except Exception as e:\n LOGGER.error(\"Reading error: {0}\".format(e))\n\n def process(self, data):\n self.client.emit(Message(data))\n\n if \"mycroft.stop\" in data:\n self.client.emit(Message(\"mycroft.stop\"))\n kill(['mimic']) # TODO - Refactoring in favor of Mycroft Stop\n\n if \"volume.up\" in data:\n self.client.emit(\n Message(\"IncreaseVolumeIntent\", metadata={'play_sound': True}))\n\n if \"volume.down\" in data:\n self.client.emit(\n Message(\"DecreaseVolumeIntent\", metadata={'play_sound': True}))\n\n def stop(self):\n self.alive = False\n self.join()\n\n\nclass EnclosureWriter(Thread):\n \"\"\"\n Writes data to Serial port.\n #. Enqueues all commands received from Mycroft enclosures\n implementation\n #. Process them on the received order by writing on the Serial port\n\n E.g. Displaying a text on Mycroft's Mouth\n #. ``EnclosureMouth`` sends a text command\n #. ``EnclosureWriter`` captures and enqueue the command\n #. ``EnclosureWriter`` removes the next command from the queue\n #. ``EnclosureWriter`` writes the command to Serial port\n\n Note: A command has to end with a line break\n \"\"\"\n\n def __init__(self, serial, client, size=16):\n super(EnclosureWriter, self).__init__(target=self.flush)\n self.alive = True\n self.daemon = True\n self.serial = serial\n self.client = client\n self.commands = Queue(size)\n self.start()\n\n def flush(self):\n while self.alive:\n try:\n cmd = self.commands.get()\n self.serial.write(cmd + '\\n')\n LOGGER.info(\"Writing: \" + cmd)\n self.commands.task_done()\n except Exception as e:\n LOGGER.error(\"Writing error: {0}\".format(e))\n\n def write(self, command):\n self.commands.put(str(command))\n\n def stop(self):\n self.alive = False\n self.join()\n\n\nclass Enclosure:\n \"\"\"\n Serves as a communication interface between Arduino and Mycroft Core.\n\n ``Enclosure`` initializes and aggregates all enclosures implementation.\n\n E.g. ``EnclosureEyes``, ``EnclosureMouth`` and ``EnclosureArduino``\n\n It also listens to the basis events in order to perform those core actions\n on the unit.\n\n E.g. Start and Stop talk animation\n \"\"\"\n\n def __init__(self):\n self.__init_serial()\n self.client = WebsocketClient()\n self.reader = EnclosureReader(self.serial, self.client)\n self.writer = EnclosureWriter(self.serial, self.client)\n self.eyes = EnclosureEyes(self.client, self.writer)\n self.mouth = EnclosureMouth(self.client, self.writer)\n self.system = EnclosureArduino(self.client, self.writer)\n self.weather = EnclosureWeather(self.client, self.writer)\n self.__register_events()\n\n def __init_serial(self):\n try:\n self.config = ConfigurationManager.get().get(\"enclosure\")\n self.port = self.config.get(\"port\")\n self.rate = int(self.config.get(\"rate\"))\n self.timeout = int(self.config.get(\"timeout\"))\n self.serial = serial.serial_for_url(\n url=self.port, baudrate=self.rate, timeout=self.timeout)\n LOGGER.info(\n \"Connected to: \" + self.port + \" rate: \" + str(self.rate) +\n \" timeout: \" + str(self.timeout))\n except:\n LOGGER.error(\n \"It is not possible to connect to serial port: \" + self.port)\n raise\n\n def __register_events(self):\n self.client.on('mycroft.paired', self.__update_events)\n self.client.on('enclosure.mouth.listeners', self.__mouth_listeners)\n self.__register_mouth_events()\n\n def __mouth_listeners(self, event=None):\n if event and event.metadata:\n active = event.metadata['active']\n if active:\n self.__register_mouth_events()\n else:\n self.__remove_mouth_events()\n\n def __register_mouth_events(self):\n self.client.on('recognizer_loop:record_begin', self.mouth.listen)\n self.client.on('recognizer_loop:record_end', self.mouth.reset)\n self.client.on('recognizer_loop:audio_output_start', self.mouth.talk)\n self.client.on('recognizer_loop:audio_output_end', self.mouth.reset)\n\n def __remove_mouth_events(self):\n self.client.remove('recognizer_loop:record_begin', self.mouth.listen)\n self.client.remove('recognizer_loop:record_end', self.mouth.reset)\n self.client.remove('recognizer_loop:audio_output_start',\n self.mouth.talk)\n self.client.remove('recognizer_loop:audio_output_end',\n self.mouth.reset)\n\n def __update_events(self, event=None):\n if event and event.metadata:\n if event.metadata.get('paired', False):\n self.__register_mouth_events()\n else:\n self.__remove_mouth_events()\n\n def run(self):\n try:\n self.client.run_forever()\n except Exception as e:\n LOGGER.error(\"Client error: {0}\".format(e))\n self.stop()\n\n def stop(self):\n self.writer.stop()\n self.reader.stop()\n self.serial.close()\n\n\ndef main():\n try:\n Enclosure().run()\n finally:\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mycroft/client/enclosure/enclosure.py"}]} | 4,034 | 919 |
gh_patches_debug_746 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1507 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 81.skills-skilldialog throwing error: [on_turn_error] unhandled error: Cannot deserialize content-type: text/plain
## Sample information
1. Sample type: \samples\
2. Sample language: python
3. Sample name: 81.skills-skilldialog
## Describe the bug
When you run the sample as per the instructions, the skill bot is throwing the following error:
======== Running on http://localhost:39783 ========
(Press CTRL+C to quit)
[on_turn_error] unhandled error: Cannot deserialize content-type: text/plain
Traceback (most recent call last):
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_adapter.py", line 128, in run_pipeline
context, callback
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py", line 69, in receive_activity_with_status
return await self.receive_activity_internal(context, callback)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py", line 79, in receive_activity_internal
return await callback(context)
File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/bots/skill_bot.py", line 21, in on_turn
self._conversation_state.create_property("DialogState"),
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_extensions.py", line 68, in run_dialog
result = await dialog_context.begin_dialog(dialog.id)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py", line 91, in begin_dialog
return await dialog.begin_dialog(self, options)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py", line 67, in begin_dialog
turn_result = await self.on_begin_dialog(inner_dc, options)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py", line 221, in on_begin_dialog
return await inner_dc.begin_dialog(self.initial_dialog_id, options)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py", line 91, in begin_dialog
return await dialog.begin_dialog(self, options)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py", line 65, in begin_dialog
return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py", line 156, in run_step
return await self.on_step(step_context)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py", line 132, in on_step
return await self._steps[step_context.index](step_context)
File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py", line 50, in process_activity
return await self._on_event_activity(step_context)
File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py", line 77, in _on_event_activity
return await self._begin_get_weather(step_context)
File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py", line 156, in _begin_get_weather
get_weather_message, get_weather_message, InputHints.ignoring_input,
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 174, in send_activity
result = await self.send_activities([activity_or_text])
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 226, in send_activities
return await self._emit(self._on_send_activities, output, logic())
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 304, in _emit
return await logic
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 221, in logic
responses = await self.adapter.send_activities(self, output)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py", line 729, in send_activities
raise error
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py", line 715, in send_activities
activity.conversation.id, activity.reply_to_id, activity
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botframework/connector/aio/operations_async/_conversations_operations_async.py", line 529, in reply_to_activity
request, stream=False, **operation_config
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/async_client.py", line 115, in async_send
pipeline_response = await self.config.pipeline.run(request, **kwargs)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py", line 159, in run
return await first_node.send(pipeline_request, **kwargs) # type: ignore
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py", line 79, in send
response = await self.next.send(request, **kwargs) # type: ignore
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_requests.py", line 106, in send
return await self.next.send(request, **kwargs)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py", line 84, in send
self._policy.on_response(request, response, **kwargs)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py", line 252, in on_response
http_response.headers
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py", line 226, in deserialize_from_http_generics
return cls.deserialize_from_text(body_bytes, content_type)
File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py", line 203, in deserialize_from_text
raise DeserializationError("Cannot deserialize content-type: {}".format(content_type))
msrest.exceptions.DeserializationError: Cannot deserialize content-type: text/plain
## To Reproduce
Steps to reproduce the behavior:
1. Run the root & skill bots as per the instructions from the sample readme
2. Start the bot framework emulator & connect
3. Choose the DialogSkillBot
4. Enter activity 3
## Expected behavior
Error not returned
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-integration-aiohttp/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.12.0"
8 REQUIRES = [
9 "botbuilder-schema==4.12.0",
10 "botframework-connector==4.12.0",
11 "botbuilder-core==4.12.0",
12 "aiohttp==3.6.2",
13 ]
14
15 root = os.path.abspath(os.path.dirname(__file__))
16
17 with open(os.path.join(root, "botbuilder", "integration", "aiohttp", "about.py")) as f:
18 package_info = {}
19 info = f.read()
20 exec(info, package_info)
21
22 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
23 long_description = f.read()
24
25 setup(
26 name=package_info["__title__"],
27 version=package_info["__version__"],
28 url=package_info["__uri__"],
29 author=package_info["__author__"],
30 description=package_info["__description__"],
31 keywords=[
32 "BotBuilderIntegrationAiohttp",
33 "bots",
34 "ai",
35 "botframework",
36 "botbuilder",
37 ],
38 long_description=long_description,
39 long_description_content_type="text/x-rst",
40 license=package_info["__license__"],
41 packages=[
42 "botbuilder.integration.aiohttp",
43 "botbuilder.integration.aiohttp.skills",
44 ],
45 install_requires=REQUIRES,
46 classifiers=[
47 "Programming Language :: Python :: 3.7",
48 "Intended Audience :: Developers",
49 "License :: OSI Approved :: MIT License",
50 "Operating System :: OS Independent",
51 "Development Status :: 5 - Production/Stable",
52 "Topic :: Scientific/Engineering :: Artificial Intelligence",
53 ],
54 )
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-integration-aiohttp/setup.py b/libraries/botbuilder-integration-aiohttp/setup.py
--- a/libraries/botbuilder-integration-aiohttp/setup.py
+++ b/libraries/botbuilder-integration-aiohttp/setup.py
@@ -9,6 +9,7 @@
"botbuilder-schema==4.12.0",
"botframework-connector==4.12.0",
"botbuilder-core==4.12.0",
+ "yarl<=1.4.2",
"aiohttp==3.6.2",
]
| {"golden_diff": "diff --git a/libraries/botbuilder-integration-aiohttp/setup.py b/libraries/botbuilder-integration-aiohttp/setup.py\n--- a/libraries/botbuilder-integration-aiohttp/setup.py\n+++ b/libraries/botbuilder-integration-aiohttp/setup.py\n@@ -9,6 +9,7 @@\n \"botbuilder-schema==4.12.0\",\n \"botframework-connector==4.12.0\",\n \"botbuilder-core==4.12.0\",\n+ \"yarl<=1.4.2\",\n \"aiohttp==3.6.2\",\n ]\n", "issue": "Python 81.skills-skilldialog throwing error: [on_turn_error] unhandled error: Cannot deserialize content-type: text/plain\n## Sample information\r\n1. Sample type: \\samples\\\r\n2. Sample language: python\r\n3. Sample name: 81.skills-skilldialog\r\n\r\n## Describe the bug\r\nWhen you run the sample as per the instructions, the skill bot is throwing the following error:\r\n\r\n======== Running on http://localhost:39783 ========\r\n(Press CTRL+C to quit)\r\n\r\n [on_turn_error] unhandled error: Cannot deserialize content-type: text/plain\r\nTraceback (most recent call last):\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_adapter.py\", line 128, in run_pipeline\r\n context, callback\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py\", line 69, in receive_activity_with_status\r\n return await self.receive_activity_internal(context, callback)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py\", line 79, in receive_activity_internal\r\n return await callback(context)\r\n File \"/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/bots/skill_bot.py\", line 21, in on_turn\r\n self._conversation_state.create_property(\"DialogState\"),\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_extensions.py\", line 68, in run_dialog\r\n result = await dialog_context.begin_dialog(dialog.id)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py\", line 91, in begin_dialog\r\n return await dialog.begin_dialog(self, options)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py\", line 67, in begin_dialog\r\n turn_result = await self.on_begin_dialog(inner_dc, options)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py\", line 221, in on_begin_dialog\r\n return await inner_dc.begin_dialog(self.initial_dialog_id, options)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py\", line 91, in begin_dialog\r\n return await dialog.begin_dialog(self, options)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py\", line 65, in begin_dialog\r\n return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py\", line 156, in run_step\r\n return await self.on_step(step_context)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py\", line 132, in on_step\r\n return await self._steps[step_context.index](step_context)\r\n File \"/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py\", line 50, in process_activity\r\n return await self._on_event_activity(step_context)\r\n File \"/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py\", line 77, in _on_event_activity\r\n return await self._begin_get_weather(step_context)\r\n File \"/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py\", line 156, in _begin_get_weather\r\n get_weather_message, get_weather_message, InputHints.ignoring_input,\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py\", line 174, in send_activity\r\n result = await self.send_activities([activity_or_text])\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py\", line 226, in send_activities\r\n return await self._emit(self._on_send_activities, output, logic())\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py\", line 304, in _emit\r\n return await logic\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py\", line 221, in logic\r\n responses = await self.adapter.send_activities(self, output)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py\", line 729, in send_activities\r\n raise error\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py\", line 715, in send_activities\r\n activity.conversation.id, activity.reply_to_id, activity\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botframework/connector/aio/operations_async/_conversations_operations_async.py\", line 529, in reply_to_activity\r\n request, stream=False, **operation_config\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/async_client.py\", line 115, in async_send\r\n pipeline_response = await self.config.pipeline.run(request, **kwargs)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py\", line 159, in run\r\n return await first_node.send(pipeline_request, **kwargs) # type: ignore\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py\", line 79, in send\r\n response = await self.next.send(request, **kwargs) # type: ignore\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_requests.py\", line 106, in send\r\n return await self.next.send(request, **kwargs)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py\", line 84, in send\r\n self._policy.on_response(request, response, **kwargs)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py\", line 252, in on_response\r\n http_response.headers\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py\", line 226, in deserialize_from_http_generics\r\n return cls.deserialize_from_text(body_bytes, content_type)\r\n File \"/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py\", line 203, in deserialize_from_text\r\n raise DeserializationError(\"Cannot deserialize content-type: {}\".format(content_type))\r\nmsrest.exceptions.DeserializationError: Cannot deserialize content-type: text/plain\r\n\r\n\r\n\r\n## To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Run the root & skill bots as per the instructions from the sample readme\r\n2. Start the bot framework emulator & connect\r\n3. Choose the DialogSkillBot\r\n4. Enter activity 3\r\n\r\n## Expected behavior\r\nError not returned\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\nREQUIRES = [\n \"botbuilder-schema==4.12.0\",\n \"botframework-connector==4.12.0\",\n \"botbuilder-core==4.12.0\",\n \"aiohttp==3.6.2\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"integration\", \"aiohttp\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderIntegrationAiohttp\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.integration.aiohttp\",\n \"botbuilder.integration.aiohttp.skills\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-aiohttp/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\nREQUIRES = [\n \"botbuilder-schema==4.12.0\",\n \"botframework-connector==4.12.0\",\n \"botbuilder-core==4.12.0\",\n \"yarl<=1.4.2\",\n \"aiohttp==3.6.2\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"integration\", \"aiohttp\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderIntegrationAiohttp\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.integration.aiohttp\",\n \"botbuilder.integration.aiohttp.skills\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-aiohttp/setup.py"}]} | 2,625 | 135 |
gh_patches_debug_4999 | rasdani/github-patches | git_diff | encode__starlette-434 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Path prefix for PATH_INFO with WSGIMiddleware
When mounting an app (e.g. Django) through `WSGIMiddleware` it seems to be required to also adjust `PATH_INFO` accordingly.
With the following, Django would e.g. redirect to "/admin/" (without the outer "/dj" prefix).
```python
djapp = django.core.wsgi.get_wsgi_application()
app.mount("/dj", app=WSGIMiddleware(djapp))
```
I've got this working through an additional "path_prefix" argument for WSGIMiddleware, but I think it makes sense to either prepend the mount path in `build_environ` always (using `"PATH_INFO": scope["root_path"] + scope["path"]`, and making it visible to the mounted app though), or handle e.g. "Location" response headers by prepending it there.
The latter could likely result in redirect-loops, and does not help with Django POSTing to "/admin/login" then.
I've not investigated too much into this yet, what do you think?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/wsgi.py`
Content:
```
1 import asyncio
2 import io
3 import sys
4 import typing
5
6 from starlette.concurrency import run_in_threadpool
7 from starlette.types import ASGIInstance, Message, Receive, Scope, Send
8
9
10 def build_environ(scope: Scope, body: bytes) -> dict:
11 """
12 Builds a scope and request body into a WSGI environ object.
13 """
14 environ = {
15 "REQUEST_METHOD": scope["method"],
16 "SCRIPT_NAME": "",
17 "PATH_INFO": scope["path"],
18 "QUERY_STRING": scope["query_string"].decode("ascii"),
19 "SERVER_PROTOCOL": f"HTTP/{scope['http_version']}",
20 "wsgi.version": (1, 0),
21 "wsgi.url_scheme": scope.get("scheme", "http"),
22 "wsgi.input": io.BytesIO(body),
23 "wsgi.errors": sys.stdout,
24 "wsgi.multithread": True,
25 "wsgi.multiprocess": True,
26 "wsgi.run_once": False,
27 }
28
29 # Get server name and port - required in WSGI, not in ASGI
30 server = scope.get("server") or ("localhost", 80)
31 environ["SERVER_NAME"] = server[0]
32 environ["SERVER_PORT"] = server[1]
33
34 # Get client IP address
35 if scope.get("client"):
36 environ["REMOTE_ADDR"] = scope["client"][0]
37
38 # Go through headers and make them into environ entries
39 for name, value in scope.get("headers", []):
40 name = name.decode("latin1")
41 if name == "content-length":
42 corrected_name = "CONTENT_LENGTH"
43 elif name == "content-type":
44 corrected_name = "CONTENT_TYPE"
45 else:
46 corrected_name = f"HTTP_{name}".upper().replace("-", "_")
47 # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case
48 value = value.decode("latin1")
49 if corrected_name in environ:
50 value = environ[corrected_name] + "," + value
51 environ[corrected_name] = value
52 return environ
53
54
55 class WSGIMiddleware:
56 def __init__(self, app: typing.Callable, workers: int = 10) -> None:
57 self.app = app
58
59 def __call__(self, scope: Scope) -> ASGIInstance:
60 assert scope["type"] == "http"
61 return WSGIResponder(self.app, scope)
62
63
64 class WSGIResponder:
65 def __init__(self, app: typing.Callable, scope: Scope) -> None:
66 self.app = app
67 self.scope = scope
68 self.status = None
69 self.response_headers = None
70 self.send_event = asyncio.Event()
71 self.send_queue = [] # type: typing.List[typing.Optional[Message]]
72 self.loop = asyncio.get_event_loop()
73 self.response_started = False
74 self.exc_info = None # type: typing.Any
75
76 async def __call__(self, receive: Receive, send: Send) -> None:
77 body = b""
78 more_body = True
79 while more_body:
80 message = await receive()
81 body += message.get("body", b"")
82 more_body = message.get("more_body", False)
83 environ = build_environ(self.scope, body)
84 try:
85 sender = self.loop.create_task(self.sender(send))
86 await run_in_threadpool(self.wsgi, environ, self.start_response)
87 self.send_queue.append(None)
88 self.send_event.set()
89 await asyncio.wait_for(sender, None)
90 if self.exc_info is not None:
91 raise self.exc_info[0].with_traceback(
92 self.exc_info[1], self.exc_info[2]
93 )
94 finally:
95 if not sender.done():
96 sender.cancel() # pragma: no cover
97
98 async def sender(self, send: Send) -> None:
99 while True:
100 if self.send_queue:
101 message = self.send_queue.pop(0)
102 if message is None:
103 return
104 await send(message)
105 else:
106 await self.send_event.wait()
107 self.send_event.clear()
108
109 def start_response(
110 self,
111 status: str,
112 response_headers: typing.List[typing.Tuple[str, str]],
113 exc_info: typing.Any = None,
114 ) -> None:
115 self.exc_info = exc_info
116 if not self.response_started:
117 self.response_started = True
118 status_code_string, _ = status.split(" ", 1)
119 status_code = int(status_code_string)
120 headers = [
121 (name.encode("ascii"), value.encode("ascii"))
122 for name, value in response_headers
123 ]
124 self.send_queue.append(
125 {
126 "type": "http.response.start",
127 "status": status_code,
128 "headers": headers,
129 }
130 )
131 self.loop.call_soon_threadsafe(self.send_event.set)
132
133 def wsgi(self, environ: dict, start_response: typing.Callable) -> None:
134 for chunk in self.app(environ, start_response):
135 self.send_queue.append(
136 {"type": "http.response.body", "body": chunk, "more_body": True}
137 )
138 self.loop.call_soon_threadsafe(self.send_event.set)
139
140 self.send_queue.append({"type": "http.response.body", "body": b""})
141 self.loop.call_soon_threadsafe(self.send_event.set)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/middleware/wsgi.py b/starlette/middleware/wsgi.py
--- a/starlette/middleware/wsgi.py
+++ b/starlette/middleware/wsgi.py
@@ -13,7 +13,7 @@
"""
environ = {
"REQUEST_METHOD": scope["method"],
- "SCRIPT_NAME": "",
+ "SCRIPT_NAME": scope.get("root_path", ""),
"PATH_INFO": scope["path"],
"QUERY_STRING": scope["query_string"].decode("ascii"),
"SERVER_PROTOCOL": f"HTTP/{scope['http_version']}",
| {"golden_diff": "diff --git a/starlette/middleware/wsgi.py b/starlette/middleware/wsgi.py\n--- a/starlette/middleware/wsgi.py\n+++ b/starlette/middleware/wsgi.py\n@@ -13,7 +13,7 @@\n \"\"\"\n environ = {\n \"REQUEST_METHOD\": scope[\"method\"],\n- \"SCRIPT_NAME\": \"\",\n+ \"SCRIPT_NAME\": scope.get(\"root_path\", \"\"),\n \"PATH_INFO\": scope[\"path\"],\n \"QUERY_STRING\": scope[\"query_string\"].decode(\"ascii\"),\n \"SERVER_PROTOCOL\": f\"HTTP/{scope['http_version']}\",\n", "issue": "Path prefix for PATH_INFO with WSGIMiddleware\nWhen mounting an app (e.g. Django) through `WSGIMiddleware` it seems to be required to also adjust `PATH_INFO` accordingly.\r\n\r\nWith the following, Django would e.g. redirect to \"/admin/\" (without the outer \"/dj\" prefix).\r\n\r\n```python\r\ndjapp = django.core.wsgi.get_wsgi_application()\r\napp.mount(\"/dj\", app=WSGIMiddleware(djapp))\r\n```\r\n\r\nI've got this working through an additional \"path_prefix\" argument for WSGIMiddleware, but I think it makes sense to either prepend the mount path in `build_environ` always (using `\"PATH_INFO\": scope[\"root_path\"] + scope[\"path\"]`, and making it visible to the mounted app though), or handle e.g. \"Location\" response headers by prepending it there.\r\nThe latter could likely result in redirect-loops, and does not help with Django POSTing to \"/admin/login\" then.\r\n\r\nI've not investigated too much into this yet, what do you think?\n", "before_files": [{"content": "import asyncio\nimport io\nimport sys\nimport typing\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.types import ASGIInstance, Message, Receive, Scope, Send\n\n\ndef build_environ(scope: Scope, body: bytes) -> dict:\n \"\"\"\n Builds a scope and request body into a WSGI environ object.\n \"\"\"\n environ = {\n \"REQUEST_METHOD\": scope[\"method\"],\n \"SCRIPT_NAME\": \"\",\n \"PATH_INFO\": scope[\"path\"],\n \"QUERY_STRING\": scope[\"query_string\"].decode(\"ascii\"),\n \"SERVER_PROTOCOL\": f\"HTTP/{scope['http_version']}\",\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scope.get(\"scheme\", \"http\"),\n \"wsgi.input\": io.BytesIO(body),\n \"wsgi.errors\": sys.stdout,\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": True,\n \"wsgi.run_once\": False,\n }\n\n # Get server name and port - required in WSGI, not in ASGI\n server = scope.get(\"server\") or (\"localhost\", 80)\n environ[\"SERVER_NAME\"] = server[0]\n environ[\"SERVER_PORT\"] = server[1]\n\n # Get client IP address\n if scope.get(\"client\"):\n environ[\"REMOTE_ADDR\"] = scope[\"client\"][0]\n\n # Go through headers and make them into environ entries\n for name, value in scope.get(\"headers\", []):\n name = name.decode(\"latin1\")\n if name == \"content-length\":\n corrected_name = \"CONTENT_LENGTH\"\n elif name == \"content-type\":\n corrected_name = \"CONTENT_TYPE\"\n else:\n corrected_name = f\"HTTP_{name}\".upper().replace(\"-\", \"_\")\n # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case\n value = value.decode(\"latin1\")\n if corrected_name in environ:\n value = environ[corrected_name] + \",\" + value\n environ[corrected_name] = value\n return environ\n\n\nclass WSGIMiddleware:\n def __init__(self, app: typing.Callable, workers: int = 10) -> None:\n self.app = app\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n return WSGIResponder(self.app, scope)\n\n\nclass WSGIResponder:\n def __init__(self, app: typing.Callable, scope: Scope) -> None:\n self.app = app\n self.scope = scope\n self.status = None\n self.response_headers = None\n self.send_event = asyncio.Event()\n self.send_queue = [] # type: typing.List[typing.Optional[Message]]\n self.loop = asyncio.get_event_loop()\n self.response_started = False\n self.exc_info = None # type: typing.Any\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n body = b\"\"\n more_body = True\n while more_body:\n message = await receive()\n body += message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n environ = build_environ(self.scope, body)\n try:\n sender = self.loop.create_task(self.sender(send))\n await run_in_threadpool(self.wsgi, environ, self.start_response)\n self.send_queue.append(None)\n self.send_event.set()\n await asyncio.wait_for(sender, None)\n if self.exc_info is not None:\n raise self.exc_info[0].with_traceback(\n self.exc_info[1], self.exc_info[2]\n )\n finally:\n if not sender.done():\n sender.cancel() # pragma: no cover\n\n async def sender(self, send: Send) -> None:\n while True:\n if self.send_queue:\n message = self.send_queue.pop(0)\n if message is None:\n return\n await send(message)\n else:\n await self.send_event.wait()\n self.send_event.clear()\n\n def start_response(\n self,\n status: str,\n response_headers: typing.List[typing.Tuple[str, str]],\n exc_info: typing.Any = None,\n ) -> None:\n self.exc_info = exc_info\n if not self.response_started:\n self.response_started = True\n status_code_string, _ = status.split(\" \", 1)\n status_code = int(status_code_string)\n headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\"))\n for name, value in response_headers\n ]\n self.send_queue.append(\n {\n \"type\": \"http.response.start\",\n \"status\": status_code,\n \"headers\": headers,\n }\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n def wsgi(self, environ: dict, start_response: typing.Callable) -> None:\n for chunk in self.app(environ, start_response):\n self.send_queue.append(\n {\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True}\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n self.send_queue.append({\"type\": \"http.response.body\", \"body\": b\"\"})\n self.loop.call_soon_threadsafe(self.send_event.set)\n", "path": "starlette/middleware/wsgi.py"}], "after_files": [{"content": "import asyncio\nimport io\nimport sys\nimport typing\n\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.types import ASGIInstance, Message, Receive, Scope, Send\n\n\ndef build_environ(scope: Scope, body: bytes) -> dict:\n \"\"\"\n Builds a scope and request body into a WSGI environ object.\n \"\"\"\n environ = {\n \"REQUEST_METHOD\": scope[\"method\"],\n \"SCRIPT_NAME\": scope.get(\"root_path\", \"\"),\n \"PATH_INFO\": scope[\"path\"],\n \"QUERY_STRING\": scope[\"query_string\"].decode(\"ascii\"),\n \"SERVER_PROTOCOL\": f\"HTTP/{scope['http_version']}\",\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scope.get(\"scheme\", \"http\"),\n \"wsgi.input\": io.BytesIO(body),\n \"wsgi.errors\": sys.stdout,\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": True,\n \"wsgi.run_once\": False,\n }\n\n # Get server name and port - required in WSGI, not in ASGI\n server = scope.get(\"server\") or (\"localhost\", 80)\n environ[\"SERVER_NAME\"] = server[0]\n environ[\"SERVER_PORT\"] = server[1]\n\n # Get client IP address\n if scope.get(\"client\"):\n environ[\"REMOTE_ADDR\"] = scope[\"client\"][0]\n\n # Go through headers and make them into environ entries\n for name, value in scope.get(\"headers\", []):\n name = name.decode(\"latin1\")\n if name == \"content-length\":\n corrected_name = \"CONTENT_LENGTH\"\n elif name == \"content-type\":\n corrected_name = \"CONTENT_TYPE\"\n else:\n corrected_name = f\"HTTP_{name}\".upper().replace(\"-\", \"_\")\n # HTTPbis say only ASCII chars are allowed in headers, but we latin1 just in case\n value = value.decode(\"latin1\")\n if corrected_name in environ:\n value = environ[corrected_name] + \",\" + value\n environ[corrected_name] = value\n return environ\n\n\nclass WSGIMiddleware:\n def __init__(self, app: typing.Callable, workers: int = 10) -> None:\n self.app = app\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n return WSGIResponder(self.app, scope)\n\n\nclass WSGIResponder:\n def __init__(self, app: typing.Callable, scope: Scope) -> None:\n self.app = app\n self.scope = scope\n self.status = None\n self.response_headers = None\n self.send_event = asyncio.Event()\n self.send_queue = [] # type: typing.List[typing.Optional[Message]]\n self.loop = asyncio.get_event_loop()\n self.response_started = False\n self.exc_info = None # type: typing.Any\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n body = b\"\"\n more_body = True\n while more_body:\n message = await receive()\n body += message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n environ = build_environ(self.scope, body)\n try:\n sender = self.loop.create_task(self.sender(send))\n await run_in_threadpool(self.wsgi, environ, self.start_response)\n self.send_queue.append(None)\n self.send_event.set()\n await asyncio.wait_for(sender, None)\n if self.exc_info is not None:\n raise self.exc_info[0].with_traceback(\n self.exc_info[1], self.exc_info[2]\n )\n finally:\n if not sender.done():\n sender.cancel() # pragma: no cover\n\n async def sender(self, send: Send) -> None:\n while True:\n if self.send_queue:\n message = self.send_queue.pop(0)\n if message is None:\n return\n await send(message)\n else:\n await self.send_event.wait()\n self.send_event.clear()\n\n def start_response(\n self,\n status: str,\n response_headers: typing.List[typing.Tuple[str, str]],\n exc_info: typing.Any = None,\n ) -> None:\n self.exc_info = exc_info\n if not self.response_started:\n self.response_started = True\n status_code_string, _ = status.split(\" \", 1)\n status_code = int(status_code_string)\n headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\"))\n for name, value in response_headers\n ]\n self.send_queue.append(\n {\n \"type\": \"http.response.start\",\n \"status\": status_code,\n \"headers\": headers,\n }\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n def wsgi(self, environ: dict, start_response: typing.Callable) -> None:\n for chunk in self.app(environ, start_response):\n self.send_queue.append(\n {\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True}\n )\n self.loop.call_soon_threadsafe(self.send_event.set)\n\n self.send_queue.append({\"type\": \"http.response.body\", \"body\": b\"\"})\n self.loop.call_soon_threadsafe(self.send_event.set)\n", "path": "starlette/middleware/wsgi.py"}]} | 1,947 | 125 |
gh_patches_debug_22474 | rasdani/github-patches | git_diff | tobymao__sqlglot-3385 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't parse `trim` in TrinoSQL
**Fully reproducible code snippet**
Please include a fully reproducible code snippet or the input sql, dialect, and expected output.
```python
import sqlglot
print(sqlglot.__version__)
sql = "SELECT trim(',' FROM some_col);"
result = sqlglot.parse(sql, read="trino")
print(repr(result))
```
Expected:
```
23.12.2
[Select(
expressions=[
Trim(
this=Column(
this=Identifier(this=some_col, quoted=False)),
expression=Literal(this=,, is_string=True))])]
```
Got:
```
23.12.2
Traceback (most recent call last):
File "proof.py", line 7, in <module>
result = sqlglot.parse(sql, read="trino")
File ".../python3.8/site-packages/sqlglot/__init__.py", line 102, in parse
return Dialect.get_or_raise(read or dialect).parse(sql, **opts)
File ".../python3.8/site-packages/sqlglot/dialects/dialect.py", line 506, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
File ".../python3.8/site-packages/sqlglot/parser.py", line 1175, in parse
return self._parse(
File ".../python3.8/site-packages/sqlglot/parser.py", line 1241, in _parse
expressions.append(parse_method(self))
File ".../python3.8/site-packages/sqlglot/parser.py", line 1476, in _parse_statement
expression = self._parse_set_operations(expression) if expression else self._parse_select()
File ".../python3.8/site-packages/sqlglot/parser.py", line 2532, in _parse_select
projections = self._parse_projections()
File ".../python3.8/site-packages/sqlglot/parser.py", line 2480, in _parse_projections
return self._parse_expressions()
File ".../python3.8/site-packages/sqlglot/parser.py", line 5695, in _parse_expressions
return self._parse_csv(self._parse_expression)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5649, in _parse_csv
parse_result = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3805, in _parse_expression
return self._parse_alias(self._parse_conjunction())
File ".../python3.8/site-packages/sqlglot/parser.py", line 3808, in _parse_conjunction
return self._parse_tokens(self._parse_equality, self.CONJUNCTION)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3811, in _parse_equality
return self._parse_tokens(self._parse_comparison, self.EQUALITY)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3814, in _parse_comparison
return self._parse_tokens(self._parse_range, self.COMPARISON)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3817, in _parse_range
this = this or self._parse_bitwise()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3941, in _parse_bitwise
this = self._parse_term()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3973, in _parse_term
return self._parse_tokens(self._parse_factor, self.TERM)
File ".../python3.8/site-packages/sqlglot/parser.py", line 5663, in _parse_tokens
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3977, in _parse_factor
this = parse_method()
File ".../python3.8/site-packages/sqlglot/parser.py", line 3998, in _parse_unary
return self._parse_at_time_zone(self._parse_type())
File ".../python3.8/site-packages/sqlglot/parser.py", line 4020, in _parse_type
this = self._parse_column()
File ".../python3.8/site-packages/sqlglot/parser.py", line 4220, in _parse_column
this = self._parse_column_reference()
File ".../python3.8/site-packages/sqlglot/parser.py", line 4224, in _parse_column_reference
this = self._parse_field()
File ".../python3.8/site-packages/sqlglot/parser.py", line 4347, in _parse_field
field = self._parse_primary() or self._parse_function(
File ".../python3.8/site-packages/sqlglot/parser.py", line 4370, in _parse_function
func = self._parse_function_call(
File ".../python3.8/site-packages/sqlglot/parser.py", line 4458, in _parse_function_call
self._match_r_paren(this)
File ".../python3.8/site-packages/sqlglot/parser.py", line 6196, in _match_r_paren
self.raise_error("Expecting )")
File ".../python3.8/site-packages/sqlglot/parser.py", line 1285, in raise_error
raise error
sqlglot.errors.ParseError: Expecting ). Line 1, Col: 20.
SELECT trim(',' FROM some_col);
```
**Official Documentation**
https://trino.io/docs/current/functions/string.html?highlight=trim#trim
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/trino.py`
Content:
```
1 from __future__ import annotations
2
3 from sqlglot import exp
4 from sqlglot.dialects.dialect import merge_without_target_sql
5 from sqlglot.dialects.presto import Presto
6
7
8 class Trino(Presto):
9 SUPPORTS_USER_DEFINED_TYPES = False
10 LOG_BASE_FIRST = True
11
12 class Generator(Presto.Generator):
13 TRANSFORMS = {
14 **Presto.Generator.TRANSFORMS,
15 exp.ArraySum: lambda self,
16 e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
17 exp.Merge: merge_without_target_sql,
18 }
19
20 SUPPORTED_JSON_PATH_PARTS = {
21 exp.JSONPathKey,
22 exp.JSONPathRoot,
23 exp.JSONPathSubscript,
24 }
25
26 class Tokenizer(Presto.Tokenizer):
27 HEX_STRINGS = [("X'", "'")]
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sqlglot/dialects/trino.py b/sqlglot/dialects/trino.py
--- a/sqlglot/dialects/trino.py
+++ b/sqlglot/dialects/trino.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from sqlglot import exp
-from sqlglot.dialects.dialect import merge_without_target_sql
+from sqlglot.dialects.dialect import merge_without_target_sql, trim_sql
from sqlglot.dialects.presto import Presto
@@ -9,12 +9,19 @@
SUPPORTS_USER_DEFINED_TYPES = False
LOG_BASE_FIRST = True
+ class Parser(Presto.Parser):
+ FUNCTION_PARSERS = {
+ **Presto.Parser.FUNCTION_PARSERS,
+ "TRIM": lambda self: self._parse_trim(),
+ }
+
class Generator(Presto.Generator):
TRANSFORMS = {
**Presto.Generator.TRANSFORMS,
exp.ArraySum: lambda self,
e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
exp.Merge: merge_without_target_sql,
+ exp.Trim: trim_sql,
}
SUPPORTED_JSON_PATH_PARTS = {
| {"golden_diff": "diff --git a/sqlglot/dialects/trino.py b/sqlglot/dialects/trino.py\n--- a/sqlglot/dialects/trino.py\n+++ b/sqlglot/dialects/trino.py\n@@ -1,7 +1,7 @@\n from __future__ import annotations\n \n from sqlglot import exp\n-from sqlglot.dialects.dialect import merge_without_target_sql\n+from sqlglot.dialects.dialect import merge_without_target_sql, trim_sql\n from sqlglot.dialects.presto import Presto\n \n \n@@ -9,12 +9,19 @@\n SUPPORTS_USER_DEFINED_TYPES = False\n LOG_BASE_FIRST = True\n \n+ class Parser(Presto.Parser):\n+ FUNCTION_PARSERS = {\n+ **Presto.Parser.FUNCTION_PARSERS,\n+ \"TRIM\": lambda self: self._parse_trim(),\n+ }\n+\n class Generator(Presto.Generator):\n TRANSFORMS = {\n **Presto.Generator.TRANSFORMS,\n exp.ArraySum: lambda self,\n e: f\"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)\",\n exp.Merge: merge_without_target_sql,\n+ exp.Trim: trim_sql,\n }\n \n SUPPORTED_JSON_PATH_PARTS = {\n", "issue": "Can't parse `trim` in TrinoSQL\n**Fully reproducible code snippet**\r\nPlease include a fully reproducible code snippet or the input sql, dialect, and expected output.\r\n```python\r\nimport sqlglot\r\nprint(sqlglot.__version__)\r\nsql = \"SELECT trim(',' FROM some_col);\"\r\nresult = sqlglot.parse(sql, read=\"trino\")\r\nprint(repr(result))\r\n```\r\n\r\nExpected:\r\n```\r\n23.12.2\r\n[Select(\r\n expressions=[\r\n Trim(\r\n this=Column(\r\n this=Identifier(this=some_col, quoted=False)),\r\n expression=Literal(this=,, is_string=True))])]\r\n```\r\n\r\nGot:\r\n```\r\n23.12.2\r\nTraceback (most recent call last):\r\n File \"proof.py\", line 7, in <module>\r\n result = sqlglot.parse(sql, read=\"trino\")\r\n File \".../python3.8/site-packages/sqlglot/__init__.py\", line 102, in parse\r\n return Dialect.get_or_raise(read or dialect).parse(sql, **opts)\r\n File \".../python3.8/site-packages/sqlglot/dialects/dialect.py\", line 506, in parse\r\n return self.parser(**opts).parse(self.tokenize(sql), sql)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 1175, in parse\r\n return self._parse(\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 1241, in _parse\r\n expressions.append(parse_method(self))\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 1476, in _parse_statement\r\n expression = self._parse_set_operations(expression) if expression else self._parse_select()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 2532, in _parse_select\r\n projections = self._parse_projections()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 2480, in _parse_projections\r\n return self._parse_expressions()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5695, in _parse_expressions\r\n return self._parse_csv(self._parse_expression)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5649, in _parse_csv\r\n parse_result = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3805, in _parse_expression\r\n return self._parse_alias(self._parse_conjunction())\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3808, in _parse_conjunction\r\n return self._parse_tokens(self._parse_equality, self.CONJUNCTION)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5663, in _parse_tokens\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3811, in _parse_equality\r\n return self._parse_tokens(self._parse_comparison, self.EQUALITY)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5663, in _parse_tokens\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3814, in _parse_comparison\r\n return self._parse_tokens(self._parse_range, self.COMPARISON)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5663, in _parse_tokens\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3817, in _parse_range\r\n this = this or self._parse_bitwise()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3941, in _parse_bitwise\r\n this = self._parse_term()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3973, in _parse_term\r\n return self._parse_tokens(self._parse_factor, self.TERM)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 5663, in _parse_tokens\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3977, in _parse_factor\r\n this = parse_method()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 3998, in _parse_unary\r\n return self._parse_at_time_zone(self._parse_type())\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4020, in _parse_type\r\n this = self._parse_column()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4220, in _parse_column\r\n this = self._parse_column_reference()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4224, in _parse_column_reference\r\n this = self._parse_field()\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4347, in _parse_field\r\n field = self._parse_primary() or self._parse_function(\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4370, in _parse_function\r\n func = self._parse_function_call(\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 4458, in _parse_function_call\r\n self._match_r_paren(this)\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 6196, in _match_r_paren\r\n self.raise_error(\"Expecting )\")\r\n File \".../python3.8/site-packages/sqlglot/parser.py\", line 1285, in raise_error\r\n raise error\r\nsqlglot.errors.ParseError: Expecting ). Line 1, Col: 20.\r\n SELECT trim(',' FROM some_col);\r\n```\r\n\r\n**Official Documentation**\r\nhttps://trino.io/docs/current/functions/string.html?highlight=trim#trim\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom sqlglot import exp\nfrom sqlglot.dialects.dialect import merge_without_target_sql\nfrom sqlglot.dialects.presto import Presto\n\n\nclass Trino(Presto):\n SUPPORTS_USER_DEFINED_TYPES = False\n LOG_BASE_FIRST = True\n\n class Generator(Presto.Generator):\n TRANSFORMS = {\n **Presto.Generator.TRANSFORMS,\n exp.ArraySum: lambda self,\n e: f\"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)\",\n exp.Merge: merge_without_target_sql,\n }\n\n SUPPORTED_JSON_PATH_PARTS = {\n exp.JSONPathKey,\n exp.JSONPathRoot,\n exp.JSONPathSubscript,\n }\n\n class Tokenizer(Presto.Tokenizer):\n HEX_STRINGS = [(\"X'\", \"'\")]\n", "path": "sqlglot/dialects/trino.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom sqlglot import exp\nfrom sqlglot.dialects.dialect import merge_without_target_sql, trim_sql\nfrom sqlglot.dialects.presto import Presto\n\n\nclass Trino(Presto):\n SUPPORTS_USER_DEFINED_TYPES = False\n LOG_BASE_FIRST = True\n\n class Parser(Presto.Parser):\n FUNCTION_PARSERS = {\n **Presto.Parser.FUNCTION_PARSERS,\n \"TRIM\": lambda self: self._parse_trim(),\n }\n\n class Generator(Presto.Generator):\n TRANSFORMS = {\n **Presto.Generator.TRANSFORMS,\n exp.ArraySum: lambda self,\n e: f\"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)\",\n exp.Merge: merge_without_target_sql,\n exp.Trim: trim_sql,\n }\n\n SUPPORTED_JSON_PATH_PARTS = {\n exp.JSONPathKey,\n exp.JSONPathRoot,\n exp.JSONPathSubscript,\n }\n\n class Tokenizer(Presto.Tokenizer):\n HEX_STRINGS = [(\"X'\", \"'\")]\n", "path": "sqlglot/dialects/trino.py"}]} | 1,899 | 294 |
gh_patches_debug_37696 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1514 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Moving Foreign key column throws up an error.
## Description
I'm not able to extract a set of columns where one column is a FK. Specifically, this is step 18 on our library workflow. I get an unhandled exception:
> Foreign key associated with column 'Publications.Authors_id' could not find table 'Library.Authors' with which to generate a foreign key to target column 'id'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/tables/operations/create.py`
Content:
```
1 from sqlalchemy import Column, TEXT, Table, MetaData
2 from sqlalchemy.ext import compiler
3 from sqlalchemy.schema import DDLElement
4
5 from db.columns.utils import init_mathesar_table_column_list_with_defaults
6 from db.schemas.operations.create import create_schema
7
8
9 def create_mathesar_table(name, schema, columns, engine, metadata=None):
10 """
11 This method creates a Postgres table in the specified schema using the
12 given name and column list. It adds internal mathesar columns to the
13 table.
14 """
15 columns = init_mathesar_table_column_list_with_defaults(columns)
16 create_schema(schema, engine)
17 # We need this so that we can create multiple mathesar tables in the
18 # same MetaData, enabling them to reference each other in the
19 # SQLAlchemy context (e.g., for creating a ForeignKey relationship)
20 if metadata is None:
21 metadata = MetaData(bind=engine, schema=schema)
22 table = Table(
23 name,
24 metadata,
25 *columns,
26 schema=schema
27 )
28 table.create(engine)
29 return table
30
31
32 def create_string_column_table(name, schema, column_names, engine):
33 """
34 This method creates a Postgres table in the specified schema, with all
35 columns being String type.
36 """
37 columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]
38 table = create_mathesar_table(name, schema, columns_, engine)
39 return table
40
41
42 class CreateTableAs(DDLElement):
43 def __init__(self, name, selectable):
44 self.name = name
45 self.selectable = selectable
46
47
48 @compiler.compiles(CreateTableAs)
49 def compile_create_table_as(element, compiler, **_):
50 return "CREATE TABLE %s AS (%s)" % (
51 element.name,
52 compiler.sql_compiler.process(element.selectable, literal_binds=True),
53 )
54
```
Path: `mathesar/api/serializers/tables.py`
Content:
```
1 from django.urls import reverse
2 from psycopg2.errors import DuplicateTable
3 from rest_framework import serializers, status
4 from rest_framework.exceptions import ValidationError
5 from sqlalchemy.exc import ProgrammingError
6
7 from db.types.operations.convert import get_db_type_enum_from_id
8
9 from mathesar.api.exceptions.validation_exceptions.exceptions import (
10 ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,
11 MultipleDataFileAPIException, UnknownDatabaseTypeIdentifier,
12 )
13 from mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException
14 from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException
15 from mathesar.api.exceptions.validation_exceptions import base_exceptions as base_validation_exceptions
16 from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions
17 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
18 from mathesar.api.serializers.columns import SimpleColumnSerializer
19 from mathesar.models.base import Column, Table, DataFile
20 from mathesar.utils.tables import gen_table_name, create_table_from_datafile, create_empty_table
21
22
23 class TableSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
24 columns = SimpleColumnSerializer(many=True, required=False)
25 records_url = serializers.SerializerMethodField()
26 constraints_url = serializers.SerializerMethodField()
27 columns_url = serializers.SerializerMethodField()
28 joinable_tables_url = serializers.SerializerMethodField()
29 type_suggestions_url = serializers.SerializerMethodField()
30 previews_url = serializers.SerializerMethodField()
31 name = serializers.CharField(required=False, allow_blank=True, default='')
32 import_target = serializers.PrimaryKeyRelatedField(
33 required=False, allow_null=True, queryset=Table.current_objects.all()
34 )
35 data_files = serializers.PrimaryKeyRelatedField(
36 required=False, many=True, queryset=DataFile.objects.all()
37 )
38
39 class Meta:
40 model = Table
41 fields = ['id', 'name', 'import_target', 'schema', 'created_at', 'updated_at', 'import_verified',
42 'columns', 'records_url', 'constraints_url', 'columns_url',
43 'joinable_tables_url', 'type_suggestions_url', 'previews_url',
44 'data_files', 'has_dependencies']
45
46 def get_records_url(self, obj):
47 if isinstance(obj, Table):
48 # Only get records if we are serializing an existing table
49 request = self.context['request']
50 return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))
51 else:
52 return None
53
54 def get_constraints_url(self, obj):
55 if isinstance(obj, Table):
56 # Only get constraints if we are serializing an existing table
57 request = self.context['request']
58 return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))
59 else:
60 return None
61
62 def get_columns_url(self, obj):
63 if isinstance(obj, Table):
64 # Only get columns if we are serializing an existing table
65 request = self.context['request']
66 return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))
67 else:
68 return None
69
70 def get_joinable_tables_url(self, obj):
71 if isinstance(obj, Table):
72 # Only get type suggestions if we are serializing an existing table
73 request = self.context['request']
74 return request.build_absolute_uri(reverse('table-joinable-tables', kwargs={'pk': obj.pk}))
75 else:
76 return None
77
78 def get_type_suggestions_url(self, obj):
79 if isinstance(obj, Table):
80 # Only get type suggestions if we are serializing an existing table
81 request = self.context['request']
82 return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))
83 else:
84 return None
85
86 def get_previews_url(self, obj):
87 if isinstance(obj, Table):
88 # Only get previews if we are serializing an existing table
89 request = self.context['request']
90 return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))
91 else:
92 return None
93
94 def validate_data_files(self, data_files):
95 if data_files and len(data_files) > 1:
96 raise MultipleDataFileAPIException()
97 return data_files
98
99 def create(self, validated_data):
100 schema = validated_data['schema']
101 data_files = validated_data.get('data_files')
102 name = validated_data.get('name') or gen_table_name(schema, data_files)
103 import_target = validated_data.get('import_target', None)
104
105 try:
106 if data_files:
107 table = create_table_from_datafile(data_files, name, schema)
108 if import_target:
109 table.import_target = import_target
110 table.is_temp = True
111 table.save()
112 else:
113 table = create_empty_table(name, schema)
114 except ProgrammingError as e:
115 if type(e.orig) == DuplicateTable:
116 raise DuplicateTableAPIException(
117 e,
118 message=f"Relation {validated_data['name']} already exists in schema {schema.id}",
119 field="name",
120 status_code=status.HTTP_400_BAD_REQUEST
121 )
122 else:
123 raise ProgrammingAPIException(e)
124 return table
125
126 def update(self, instance, validated_data):
127 if self.partial:
128 # Save the fields that are stored in the model.
129 present_model_fields = []
130 for model_field in instance.MODEL_FIELDS:
131 if model_field in validated_data:
132 setattr(instance, model_field, validated_data[model_field])
133 present_model_fields.append(model_field)
134 instance.save(update_fields=present_model_fields)
135 for key in present_model_fields:
136 del validated_data[key]
137 # Save the fields that are stored in the underlying DB.
138 try:
139 instance.update_sa_table(validated_data)
140 except ValueError as e:
141 raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
142 return instance
143
144 def validate(self, data):
145 if self.partial:
146 columns = data.get('columns', None)
147 if columns is not None:
148 for col in columns:
149 id = col.get('id', None)
150 if id is None:
151 message = "'id' field is required while batch updating columns."
152 raise base_validation_exceptions.MathesarValidationException(ValidationError, message=message)
153 return data
154
155
156 class TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):
157 name = serializers.CharField(required=False)
158 columns = SimpleColumnSerializer(many=True)
159
160 def validate_columns(self, columns):
161 table = self.context['table']
162 column_names = [col["name"] for col in columns]
163 if not len(column_names) == len(set(column_names)):
164 raise DistinctColumnRequiredAPIException()
165 if not len(columns) == len(table.sa_columns):
166 raise ColumnSizeMismatchAPIException()
167 for column in columns:
168 db_type_id = column['type']
169 db_type = get_db_type_enum_from_id(db_type_id)
170 if db_type is None:
171 raise UnknownDatabaseTypeIdentifier(db_type_id=db_type_id)
172 return columns
173
174
175 class MoveTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):
176 move_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)
177 target_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())
178
179
180 class SplitTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):
181 extract_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)
182 extracted_table_name = serializers.CharField()
183
184
185 class SplitTableResponseSerializer(MathesarErrorMessageMixin, serializers.Serializer):
186 extracted_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())
187 remainder_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())
188
189
190 class MappingSerializer(MathesarErrorMessageMixin, serializers.Serializer):
191 # TBD
192 pass
193
194
195 class TableImportSerializer(MathesarErrorMessageMixin, serializers.Serializer):
196 import_target = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all(), required=True)
197 data_files = serializers.PrimaryKeyRelatedField(required=True, many=True, queryset=DataFile.objects.all())
198 mappings = MappingSerializer(required=True, allow_null=True)
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/tables/operations/create.py b/db/tables/operations/create.py
--- a/db/tables/operations/create.py
+++ b/db/tables/operations/create.py
@@ -19,6 +19,11 @@
# SQLAlchemy context (e.g., for creating a ForeignKey relationship)
if metadata is None:
metadata = MetaData(bind=engine, schema=schema)
+ metadata.reflect()
+ # The exception raised by SQLAlchemy upon hitting a duplicate table in the
+ # schema is non-specific.
+ if (name, schema) in [(t.name, t.schema) for t in metadata.sorted_tables]:
+ raise DuplicateTable
table = Table(
name,
metadata,
@@ -29,6 +34,10 @@
return table
+class DuplicateTable(Exception):
+ pass
+
+
def create_string_column_table(name, schema, column_names, engine):
"""
This method creates a Postgres table in the specified schema, with all
diff --git a/mathesar/api/serializers/tables.py b/mathesar/api/serializers/tables.py
--- a/mathesar/api/serializers/tables.py
+++ b/mathesar/api/serializers/tables.py
@@ -1,10 +1,10 @@
from django.urls import reverse
-from psycopg2.errors import DuplicateTable
from rest_framework import serializers, status
from rest_framework.exceptions import ValidationError
from sqlalchemy.exc import ProgrammingError
from db.types.operations.convert import get_db_type_enum_from_id
+from db.tables.operations.create import DuplicateTable
from mathesar.api.exceptions.validation_exceptions.exceptions import (
ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,
@@ -111,16 +111,15 @@
table.save()
else:
table = create_empty_table(name, schema)
+ except DuplicateTable as e:
+ raise DuplicateTableAPIException(
+ e,
+ message=f"Relation {validated_data['name']} already exists in schema {schema.id}",
+ field="name",
+ status_code=status.HTTP_400_BAD_REQUEST
+ )
except ProgrammingError as e:
- if type(e.orig) == DuplicateTable:
- raise DuplicateTableAPIException(
- e,
- message=f"Relation {validated_data['name']} already exists in schema {schema.id}",
- field="name",
- status_code=status.HTTP_400_BAD_REQUEST
- )
- else:
- raise ProgrammingAPIException(e)
+ raise ProgrammingAPIException(e)
return table
def update(self, instance, validated_data):
| {"golden_diff": "diff --git a/db/tables/operations/create.py b/db/tables/operations/create.py\n--- a/db/tables/operations/create.py\n+++ b/db/tables/operations/create.py\n@@ -19,6 +19,11 @@\n # SQLAlchemy context (e.g., for creating a ForeignKey relationship)\n if metadata is None:\n metadata = MetaData(bind=engine, schema=schema)\n+ metadata.reflect()\n+ # The exception raised by SQLAlchemy upon hitting a duplicate table in the\n+ # schema is non-specific.\n+ if (name, schema) in [(t.name, t.schema) for t in metadata.sorted_tables]:\n+ raise DuplicateTable\n table = Table(\n name,\n metadata,\n@@ -29,6 +34,10 @@\n return table\n \n \n+class DuplicateTable(Exception):\n+ pass\n+\n+\n def create_string_column_table(name, schema, column_names, engine):\n \"\"\"\n This method creates a Postgres table in the specified schema, with all\ndiff --git a/mathesar/api/serializers/tables.py b/mathesar/api/serializers/tables.py\n--- a/mathesar/api/serializers/tables.py\n+++ b/mathesar/api/serializers/tables.py\n@@ -1,10 +1,10 @@\n from django.urls import reverse\n-from psycopg2.errors import DuplicateTable\n from rest_framework import serializers, status\n from rest_framework.exceptions import ValidationError\n from sqlalchemy.exc import ProgrammingError\n \n from db.types.operations.convert import get_db_type_enum_from_id\n+from db.tables.operations.create import DuplicateTable\n \n from mathesar.api.exceptions.validation_exceptions.exceptions import (\n ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,\n@@ -111,16 +111,15 @@\n table.save()\n else:\n table = create_empty_table(name, schema)\n+ except DuplicateTable as e:\n+ raise DuplicateTableAPIException(\n+ e,\n+ message=f\"Relation {validated_data['name']} already exists in schema {schema.id}\",\n+ field=\"name\",\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n except ProgrammingError as e:\n- if type(e.orig) == DuplicateTable:\n- raise DuplicateTableAPIException(\n- e,\n- message=f\"Relation {validated_data['name']} already exists in schema {schema.id}\",\n- field=\"name\",\n- status_code=status.HTTP_400_BAD_REQUEST\n- )\n- else:\n- raise ProgrammingAPIException(e)\n+ raise ProgrammingAPIException(e)\n return table\n \n def update(self, instance, validated_data):\n", "issue": "Moving Foreign key column throws up an error. \n## Description\r\nI'm not able to extract a set of columns where one column is a FK. Specifically, this is step 18 on our library workflow. I get an unhandled exception:\r\n\r\n> Foreign key associated with column 'Publications.Authors_id' could not find table 'Library.Authors' with which to generate a foreign key to target column 'id'\r\n\r\n\n", "before_files": [{"content": "from sqlalchemy import Column, TEXT, Table, MetaData\nfrom sqlalchemy.ext import compiler\nfrom sqlalchemy.schema import DDLElement\n\nfrom db.columns.utils import init_mathesar_table_column_list_with_defaults\nfrom db.schemas.operations.create import create_schema\n\n\ndef create_mathesar_table(name, schema, columns, engine, metadata=None):\n \"\"\"\n This method creates a Postgres table in the specified schema using the\n given name and column list. It adds internal mathesar columns to the\n table.\n \"\"\"\n columns = init_mathesar_table_column_list_with_defaults(columns)\n create_schema(schema, engine)\n # We need this so that we can create multiple mathesar tables in the\n # same MetaData, enabling them to reference each other in the\n # SQLAlchemy context (e.g., for creating a ForeignKey relationship)\n if metadata is None:\n metadata = MetaData(bind=engine, schema=schema)\n table = Table(\n name,\n metadata,\n *columns,\n schema=schema\n )\n table.create(engine)\n return table\n\n\ndef create_string_column_table(name, schema, column_names, engine):\n \"\"\"\n This method creates a Postgres table in the specified schema, with all\n columns being String type.\n \"\"\"\n columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]\n table = create_mathesar_table(name, schema, columns_, engine)\n return table\n\n\nclass CreateTableAs(DDLElement):\n def __init__(self, name, selectable):\n self.name = name\n self.selectable = selectable\n\n\[email protected](CreateTableAs)\ndef compile_create_table_as(element, compiler, **_):\n return \"CREATE TABLE %s AS (%s)\" % (\n element.name,\n compiler.sql_compiler.process(element.selectable, literal_binds=True),\n )\n", "path": "db/tables/operations/create.py"}, {"content": "from django.urls import reverse\nfrom psycopg2.errors import DuplicateTable\nfrom rest_framework import serializers, status\nfrom rest_framework.exceptions import ValidationError\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom db.types.operations.convert import get_db_type_enum_from_id\n\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,\n MultipleDataFileAPIException, UnknownDatabaseTypeIdentifier,\n)\nfrom mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException\nfrom mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\nfrom mathesar.api.exceptions.validation_exceptions import base_exceptions as base_validation_exceptions\nfrom mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.columns import SimpleColumnSerializer\nfrom mathesar.models.base import Column, Table, DataFile\nfrom mathesar.utils.tables import gen_table_name, create_table_from_datafile, create_empty_table\n\n\nclass TableSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, required=False)\n records_url = serializers.SerializerMethodField()\n constraints_url = serializers.SerializerMethodField()\n columns_url = serializers.SerializerMethodField()\n joinable_tables_url = serializers.SerializerMethodField()\n type_suggestions_url = serializers.SerializerMethodField()\n previews_url = serializers.SerializerMethodField()\n name = serializers.CharField(required=False, allow_blank=True, default='')\n import_target = serializers.PrimaryKeyRelatedField(\n required=False, allow_null=True, queryset=Table.current_objects.all()\n )\n data_files = serializers.PrimaryKeyRelatedField(\n required=False, many=True, queryset=DataFile.objects.all()\n )\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'import_target', 'schema', 'created_at', 'updated_at', 'import_verified',\n 'columns', 'records_url', 'constraints_url', 'columns_url',\n 'joinable_tables_url', 'type_suggestions_url', 'previews_url',\n 'data_files', 'has_dependencies']\n\n def get_records_url(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_constraints_url(self, obj):\n if isinstance(obj, Table):\n # Only get constraints if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_columns_url(self, obj):\n if isinstance(obj, Table):\n # Only get columns if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_joinable_tables_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-joinable-tables', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_type_suggestions_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_previews_url(self, obj):\n if isinstance(obj, Table):\n # Only get previews if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def validate_data_files(self, data_files):\n if data_files and len(data_files) > 1:\n raise MultipleDataFileAPIException()\n return data_files\n\n def create(self, validated_data):\n schema = validated_data['schema']\n data_files = validated_data.get('data_files')\n name = validated_data.get('name') or gen_table_name(schema, data_files)\n import_target = validated_data.get('import_target', None)\n\n try:\n if data_files:\n table = create_table_from_datafile(data_files, name, schema)\n if import_target:\n table.import_target = import_target\n table.is_temp = True\n table.save()\n else:\n table = create_empty_table(name, schema)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise DuplicateTableAPIException(\n e,\n message=f\"Relation {validated_data['name']} already exists in schema {schema.id}\",\n field=\"name\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n else:\n raise ProgrammingAPIException(e)\n return table\n\n def update(self, instance, validated_data):\n if self.partial:\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in instance.MODEL_FIELDS:\n if model_field in validated_data:\n setattr(instance, model_field, validated_data[model_field])\n present_model_fields.append(model_field)\n instance.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del validated_data[key]\n # Save the fields that are stored in the underlying DB.\n try:\n instance.update_sa_table(validated_data)\n except ValueError as e:\n raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n return instance\n\n def validate(self, data):\n if self.partial:\n columns = data.get('columns', None)\n if columns is not None:\n for col in columns:\n id = col.get('id', None)\n if id is None:\n message = \"'id' field is required while batch updating columns.\"\n raise base_validation_exceptions.MathesarValidationException(ValidationError, message=message)\n return data\n\n\nclass TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n name = serializers.CharField(required=False)\n columns = SimpleColumnSerializer(many=True)\n\n def validate_columns(self, columns):\n table = self.context['table']\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise DistinctColumnRequiredAPIException()\n if not len(columns) == len(table.sa_columns):\n raise ColumnSizeMismatchAPIException()\n for column in columns:\n db_type_id = column['type']\n db_type = get_db_type_enum_from_id(db_type_id)\n if db_type is None:\n raise UnknownDatabaseTypeIdentifier(db_type_id=db_type_id)\n return columns\n\n\nclass MoveTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n move_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n target_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())\n\n\nclass SplitTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n extract_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n extracted_table_name = serializers.CharField()\n\n\nclass SplitTableResponseSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n extracted_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())\n remainder_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())\n\n\nclass MappingSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n # TBD\n pass\n\n\nclass TableImportSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n import_target = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all(), required=True)\n data_files = serializers.PrimaryKeyRelatedField(required=True, many=True, queryset=DataFile.objects.all())\n mappings = MappingSerializer(required=True, allow_null=True)\n", "path": "mathesar/api/serializers/tables.py"}], "after_files": [{"content": "from sqlalchemy import Column, TEXT, Table, MetaData\nfrom sqlalchemy.ext import compiler\nfrom sqlalchemy.schema import DDLElement\n\nfrom db.columns.utils import init_mathesar_table_column_list_with_defaults\nfrom db.schemas.operations.create import create_schema\n\n\ndef create_mathesar_table(name, schema, columns, engine, metadata=None):\n \"\"\"\n This method creates a Postgres table in the specified schema using the\n given name and column list. It adds internal mathesar columns to the\n table.\n \"\"\"\n columns = init_mathesar_table_column_list_with_defaults(columns)\n create_schema(schema, engine)\n # We need this so that we can create multiple mathesar tables in the\n # same MetaData, enabling them to reference each other in the\n # SQLAlchemy context (e.g., for creating a ForeignKey relationship)\n if metadata is None:\n metadata = MetaData(bind=engine, schema=schema)\n metadata.reflect()\n # The exception raised by SQLAlchemy upon hitting a duplicate table in the\n # schema is non-specific.\n if (name, schema) in [(t.name, t.schema) for t in metadata.sorted_tables]:\n raise DuplicateTable\n table = Table(\n name,\n metadata,\n *columns,\n schema=schema\n )\n table.create(engine)\n return table\n\n\nclass DuplicateTable(Exception):\n pass\n\n\ndef create_string_column_table(name, schema, column_names, engine):\n \"\"\"\n This method creates a Postgres table in the specified schema, with all\n columns being String type.\n \"\"\"\n columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]\n table = create_mathesar_table(name, schema, columns_, engine)\n return table\n\n\nclass CreateTableAs(DDLElement):\n def __init__(self, name, selectable):\n self.name = name\n self.selectable = selectable\n\n\[email protected](CreateTableAs)\ndef compile_create_table_as(element, compiler, **_):\n return \"CREATE TABLE %s AS (%s)\" % (\n element.name,\n compiler.sql_compiler.process(element.selectable, literal_binds=True),\n )\n", "path": "db/tables/operations/create.py"}, {"content": "from django.urls import reverse\nfrom rest_framework import serializers, status\nfrom rest_framework.exceptions import ValidationError\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom db.types.operations.convert import get_db_type_enum_from_id\nfrom db.tables.operations.create import DuplicateTable\n\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,\n MultipleDataFileAPIException, UnknownDatabaseTypeIdentifier,\n)\nfrom mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException\nfrom mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\nfrom mathesar.api.exceptions.validation_exceptions import base_exceptions as base_validation_exceptions\nfrom mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.columns import SimpleColumnSerializer\nfrom mathesar.models.base import Column, Table, DataFile\nfrom mathesar.utils.tables import gen_table_name, create_table_from_datafile, create_empty_table\n\n\nclass TableSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, required=False)\n records_url = serializers.SerializerMethodField()\n constraints_url = serializers.SerializerMethodField()\n columns_url = serializers.SerializerMethodField()\n joinable_tables_url = serializers.SerializerMethodField()\n type_suggestions_url = serializers.SerializerMethodField()\n previews_url = serializers.SerializerMethodField()\n name = serializers.CharField(required=False, allow_blank=True, default='')\n import_target = serializers.PrimaryKeyRelatedField(\n required=False, allow_null=True, queryset=Table.current_objects.all()\n )\n data_files = serializers.PrimaryKeyRelatedField(\n required=False, many=True, queryset=DataFile.objects.all()\n )\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'import_target', 'schema', 'created_at', 'updated_at', 'import_verified',\n 'columns', 'records_url', 'constraints_url', 'columns_url',\n 'joinable_tables_url', 'type_suggestions_url', 'previews_url',\n 'data_files', 'has_dependencies']\n\n def get_records_url(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_constraints_url(self, obj):\n if isinstance(obj, Table):\n # Only get constraints if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_columns_url(self, obj):\n if isinstance(obj, Table):\n # Only get columns if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_joinable_tables_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-joinable-tables', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_type_suggestions_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_previews_url(self, obj):\n if isinstance(obj, Table):\n # Only get previews if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def validate_data_files(self, data_files):\n if data_files and len(data_files) > 1:\n raise MultipleDataFileAPIException()\n return data_files\n\n def create(self, validated_data):\n schema = validated_data['schema']\n data_files = validated_data.get('data_files')\n name = validated_data.get('name') or gen_table_name(schema, data_files)\n import_target = validated_data.get('import_target', None)\n\n try:\n if data_files:\n table = create_table_from_datafile(data_files, name, schema)\n if import_target:\n table.import_target = import_target\n table.is_temp = True\n table.save()\n else:\n table = create_empty_table(name, schema)\n except DuplicateTable as e:\n raise DuplicateTableAPIException(\n e,\n message=f\"Relation {validated_data['name']} already exists in schema {schema.id}\",\n field=\"name\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except ProgrammingError as e:\n raise ProgrammingAPIException(e)\n return table\n\n def update(self, instance, validated_data):\n if self.partial:\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in instance.MODEL_FIELDS:\n if model_field in validated_data:\n setattr(instance, model_field, validated_data[model_field])\n present_model_fields.append(model_field)\n instance.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del validated_data[key]\n # Save the fields that are stored in the underlying DB.\n try:\n instance.update_sa_table(validated_data)\n except ValueError as e:\n raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n return instance\n\n def validate(self, data):\n if self.partial:\n columns = data.get('columns', None)\n if columns is not None:\n for col in columns:\n id = col.get('id', None)\n if id is None:\n message = \"'id' field is required while batch updating columns.\"\n raise base_validation_exceptions.MathesarValidationException(ValidationError, message=message)\n return data\n\n\nclass TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n name = serializers.CharField(required=False)\n columns = SimpleColumnSerializer(many=True)\n\n def validate_columns(self, columns):\n table = self.context['table']\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise DistinctColumnRequiredAPIException()\n if not len(columns) == len(table.sa_columns):\n raise ColumnSizeMismatchAPIException()\n for column in columns:\n db_type_id = column['type']\n db_type = get_db_type_enum_from_id(db_type_id)\n if db_type is None:\n raise UnknownDatabaseTypeIdentifier(db_type_id=db_type_id)\n return columns\n\n\nclass MoveTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n move_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n target_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())\n\n\nclass SplitTableRequestSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n extract_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n extracted_table_name = serializers.CharField()\n\n\nclass SplitTableResponseSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n extracted_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())\n remainder_table = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all())\n\n\nclass MappingSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n # TBD\n pass\n\n\nclass TableImportSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n import_target = serializers.PrimaryKeyRelatedField(queryset=Table.current_objects.all(), required=True)\n data_files = serializers.PrimaryKeyRelatedField(required=True, many=True, queryset=DataFile.objects.all())\n mappings = MappingSerializer(required=True, allow_null=True)\n", "path": "mathesar/api/serializers/tables.py"}]} | 3,046 | 561 |
gh_patches_debug_6902 | rasdani/github-patches | git_diff | napari__napari-3929 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
missing doc items (may be copy-docs problem)
## 🐛 Bug
I was looking at napari.org after seeing #3916 get merged (a big update to plugin docs), and it looks like the toc is missing items. This seems to have something to do with the copy-docs script.
In particular,
* it looks like files that are not in the toc are missing
* some items in the toc go missing
* the order of the items in the toc seems to change from the authored order
@tlambert03 please feel free to add detail
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/copy-docs.py`
Content:
```
1 """
2 Copy docs from the napari repo into the napari.github.io repo
3 and update the table of contents.
4 By default, will assume that there is a folder named napari.github.io
5 in the same directory as the napari folder, if not a different copy
6 destination can be provided.
7
8 Read ORGANIZATION.md to learn more about how the documentation sources
9 are organized, and how everything comes together.
10
11 python -m copy-docs [dstdir]
12 """
13
14 import copy
15 import os
16 import os.path as osp
17 import shutil
18 import sys
19 from fnmatch import fnmatch
20
21 import yaml
22
23 # path to copy and locations to copy to if different
24 TO_COPY = [
25 'ORGANIZATION.md',
26 'glossary.md',
27 'developers',
28 'community',
29 'howtos',
30 'release',
31 'roadmaps',
32 'images',
33 osp.join('_templates', 'autosummary'),
34 *[
35 (dire, osp.join(dire, 'stable'))
36 for dire in ('api', 'guides', 'plugins')
37 ],
38 ]
39
40 # paths to ignore
41 IGNORE = [
42 osp.join('images', 'logo.png'),
43 ]
44
45 SRC = osp.dirname(__file__)
46
47 DOC_EXTS = ['.md', '.rst', '.ipynb']
48
49 TOC_IGNORE = [
50 'api/stable',
51 'images',
52 '_templates',
53 'ORGANIZATION.md',
54 'glossary.md', # this document will still be at the destination ToC
55 'guides/stable/_layer_events.md',
56 'guides/stable/_viewer_events.md',
57 ]
58
59
60 def exclude_filter(path):
61 """Exclude files in the ignore list and duplicated files."""
62 for ignore in IGNORE:
63 if fnmatch(path, osp.join(SRC, ignore)): # in ignore list
64 return True
65 else:
66 if osp.isdir(path) or osp.splitext(path)[1] != '.md':
67 return False
68 with open(path) as f:
69 firstline = f.readline()
70 return firstline.startswith('```{include}') # duplicate file
71
72
73 def copy_path(srcdir, dstdir, path, newpath=None, *, exclude=None):
74 """Copy a path from the source directory to the destination directory,
75 with the given path relative to the directory roots.
76
77 Parameters
78 ----------
79 srcdir : path-like
80 Source directory root to copy from.
81 dstdir : path-like
82 Destination directory root to copy to.
83 path : path-like
84 Path relative to the `srcdir` of the path to copy from.
85 newpath : path-like, optional
86 Path relative to the `dstdir` of the path to copy to.
87 If not provided, will default to the value of `path`.
88 exclude : function(path-like) -> bool, keyword-only, optional
89 Conditional function on whether to exclude the given path.
90
91 Returns
92 -------
93 files : list of path-like
94 Paths of the copied files.
95 """
96 if newpath is None:
97 newpath = path
98
99 src = osp.join(srcdir, path)
100 dst = osp.join(dstdir, newpath)
101
102 if exclude(src): # skip this path
103 return []
104
105 print(f'copying {src} to {dst}')
106
107 if osp.isfile(src):
108 shutil.copyfile(src, dst)
109 return [newpath]
110 elif osp.isdir(src):
111 if osp.exists(dst): # if the destination directory exists, delete it
112 shutil.rmtree(dst)
113
114 os.mkdir(dst)
115
116 files = []
117
118 for fpath in os.listdir(src): # recursively copy each child path
119 p = osp.join(path, fpath)
120 np = osp.join(newpath, fpath)
121 files += copy_path(srcdir, dstdir, p, np, exclude=exclude)
122
123 return files
124 else:
125 raise RuntimeError(f'unknown path type {src}')
126
127
128 def copy_paths(src, dst, paths, *, exclude=None):
129 """Copy files/directories given a list of their paths from
130 the source directory to the destination directory.
131
132 Parameters
133 ----------
134 src : path-like
135 Source directory to copy from.
136 dst : path-like
137 Destination directory to copy to.
138 paths : list of (path-like or 2-tuple of path-like)
139 Paths of the files/directories to copy relative to the source directory.
140 Pairs of paths in the list signify that the path to copy to is different
141 than the path copied from.
142 exclude : function(path-like) -> bool, keyword-only, optional
143 Conditional function on whether to exclude the given path.
144
145 Returns
146 -------
147 files : list of path-like
148 Paths of the copied files.
149 """
150 files = []
151
152 for path in paths:
153 if isinstance(path, tuple):
154 files += copy_path(src, dst, path[0], path[1], exclude=exclude)
155 else:
156 files += copy_path(src, dst, path, exclude=exclude)
157
158 return files
159
160
161 def update_toc(toc, paths, ignore=[]):
162 """Update the table of contents according to the paths of all files copied over.
163
164 Parameters
165 ----------
166 toc : JSON
167 Table of contents according to the JupyterBook specification.
168 paths : list of path-like
169 Paths of the files copied over.
170 ignore : list of path-like
171 List of directories to ignore when updating the table of contents.
172
173 Returns
174 -------
175 new_toc : JSON
176 Updated table of contents.
177 """
178 new_toc = copy.deepcopy(toc)
179
180 remaining_paths = []
181
182 # remove all paths in ignore list and those with the wrong extension
183 for path in paths:
184 base, ext = osp.splitext(path)
185
186 for prefix in ignore: # check if path should be ignored
187 if path.startswith(prefix):
188 break
189 else: # not on the ignore list
190 if ext in DOC_EXTS: # relevant filetype
191 remaining_paths.append(
192 base
193 ) # the toc does not include extensions
194
195 chapters = new_toc[1]['chapters']
196
197 for chapter in chapters:
198 if (
199 'file' not in chapter
200 or (index := chapter['file']) not in remaining_paths
201 ):
202 continue # skip irrelevant chapters
203
204 parent_dir = osp.dirname(index)
205 remaining_paths.remove(index)
206
207 sections = chapter['sections']
208 files = [section['file'] for section in sections]
209
210 # find and remove deleted files from toc
211 j = 0
212 for path in files:
213 if path in remaining_paths:
214 remaining_paths.remove(path)
215 j += 1
216 else:
217 print(f'deleting {path} from toc')
218 del sections[j] # delete from toc
219
220 new_files = filter(
221 lambda path: path.startswith(parent_dir), remaining_paths
222 )
223 for path in new_files:
224 print(f'adding {path} to toc')
225 sections.append({'file': path})
226 remaining_paths.remove(path)
227
228 return new_toc
229
230
231 def main(args):
232 dst = osp.join(
233 osp.dirname(osp.dirname(osp.dirname(__file__))), 'napari.github.io'
234 )
235
236 try:
237 dst = args[1]
238 except IndexError:
239 pass
240
241 files = copy_paths(SRC, dst, TO_COPY, exclude=exclude_filter)
242 toc_file = osp.join(dst, '_toc.yml')
243
244 with open(toc_file) as f:
245 toc = yaml.safe_load(f)
246
247 if toc is None:
248 print(f'toc file {toc_file} empty')
249 return
250
251 new_toc = update_toc(toc, files, TOC_IGNORE)
252
253 with open(toc_file, 'w') as f:
254 yaml.dump(new_toc, f)
255
256
257 if __name__ == '__main__':
258 main(sys.argv)
259
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/copy-docs.py b/docs/copy-docs.py
--- a/docs/copy-docs.py
+++ b/docs/copy-docs.py
@@ -54,6 +54,13 @@
'glossary.md', # this document will still be at the destination ToC
'guides/stable/_layer_events.md',
'guides/stable/_viewer_events.md',
+ 'plugins/stable/_npe2_contributions.md',
+ 'plugins/stable/_npe2_manifest.md',
+ 'plugins/stable/_npe2_readers_guide.md',
+ 'plugins/stable/_npe2_widgets_guide.md',
+ 'plugins/stable/_npe2_writers_guide.md',
+ 'plugins/stable/_npe2_sample_data_guide.md',
+ 'plugins/stable/_layer_data_guide.md',
]
| {"golden_diff": "diff --git a/docs/copy-docs.py b/docs/copy-docs.py\n--- a/docs/copy-docs.py\n+++ b/docs/copy-docs.py\n@@ -54,6 +54,13 @@\n 'glossary.md', # this document will still be at the destination ToC\n 'guides/stable/_layer_events.md',\n 'guides/stable/_viewer_events.md',\n+ 'plugins/stable/_npe2_contributions.md',\n+ 'plugins/stable/_npe2_manifest.md',\n+ 'plugins/stable/_npe2_readers_guide.md',\n+ 'plugins/stable/_npe2_widgets_guide.md',\n+ 'plugins/stable/_npe2_writers_guide.md',\n+ 'plugins/stable/_npe2_sample_data_guide.md',\n+ 'plugins/stable/_layer_data_guide.md',\n ]\n", "issue": "missing doc items (may be copy-docs problem)\n## \ud83d\udc1b Bug\r\n\r\nI was looking at napari.org after seeing #3916 get merged (a big update to plugin docs), and it looks like the toc is missing items. This seems to have something to do with the copy-docs script.\r\n\r\nIn particular, \r\n* it looks like files that are not in the toc are missing\r\n* some items in the toc go missing\r\n* the order of the items in the toc seems to change from the authored order\r\n\r\n@tlambert03 please feel free to add detail\r\n\n", "before_files": [{"content": "\"\"\"\nCopy docs from the napari repo into the napari.github.io repo\nand update the table of contents.\nBy default, will assume that there is a folder named napari.github.io\nin the same directory as the napari folder, if not a different copy\ndestination can be provided.\n\nRead ORGANIZATION.md to learn more about how the documentation sources\nare organized, and how everything comes together.\n\npython -m copy-docs [dstdir]\n\"\"\"\n\nimport copy\nimport os\nimport os.path as osp\nimport shutil\nimport sys\nfrom fnmatch import fnmatch\n\nimport yaml\n\n# path to copy and locations to copy to if different\nTO_COPY = [\n 'ORGANIZATION.md',\n 'glossary.md',\n 'developers',\n 'community',\n 'howtos',\n 'release',\n 'roadmaps',\n 'images',\n osp.join('_templates', 'autosummary'),\n *[\n (dire, osp.join(dire, 'stable'))\n for dire in ('api', 'guides', 'plugins')\n ],\n]\n\n# paths to ignore\nIGNORE = [\n osp.join('images', 'logo.png'),\n]\n\nSRC = osp.dirname(__file__)\n\nDOC_EXTS = ['.md', '.rst', '.ipynb']\n\nTOC_IGNORE = [\n 'api/stable',\n 'images',\n '_templates',\n 'ORGANIZATION.md',\n 'glossary.md', # this document will still be at the destination ToC\n 'guides/stable/_layer_events.md',\n 'guides/stable/_viewer_events.md',\n]\n\n\ndef exclude_filter(path):\n \"\"\"Exclude files in the ignore list and duplicated files.\"\"\"\n for ignore in IGNORE:\n if fnmatch(path, osp.join(SRC, ignore)): # in ignore list\n return True\n else:\n if osp.isdir(path) or osp.splitext(path)[1] != '.md':\n return False\n with open(path) as f:\n firstline = f.readline()\n return firstline.startswith('```{include}') # duplicate file\n\n\ndef copy_path(srcdir, dstdir, path, newpath=None, *, exclude=None):\n \"\"\"Copy a path from the source directory to the destination directory,\n with the given path relative to the directory roots.\n\n Parameters\n ----------\n srcdir : path-like\n Source directory root to copy from.\n dstdir : path-like\n Destination directory root to copy to.\n path : path-like\n Path relative to the `srcdir` of the path to copy from.\n newpath : path-like, optional\n Path relative to the `dstdir` of the path to copy to.\n If not provided, will default to the value of `path`.\n exclude : function(path-like) -> bool, keyword-only, optional\n Conditional function on whether to exclude the given path.\n\n Returns\n -------\n files : list of path-like\n Paths of the copied files.\n \"\"\"\n if newpath is None:\n newpath = path\n\n src = osp.join(srcdir, path)\n dst = osp.join(dstdir, newpath)\n\n if exclude(src): # skip this path\n return []\n\n print(f'copying {src} to {dst}')\n\n if osp.isfile(src):\n shutil.copyfile(src, dst)\n return [newpath]\n elif osp.isdir(src):\n if osp.exists(dst): # if the destination directory exists, delete it\n shutil.rmtree(dst)\n\n os.mkdir(dst)\n\n files = []\n\n for fpath in os.listdir(src): # recursively copy each child path\n p = osp.join(path, fpath)\n np = osp.join(newpath, fpath)\n files += copy_path(srcdir, dstdir, p, np, exclude=exclude)\n\n return files\n else:\n raise RuntimeError(f'unknown path type {src}')\n\n\ndef copy_paths(src, dst, paths, *, exclude=None):\n \"\"\"Copy files/directories given a list of their paths from\n the source directory to the destination directory.\n\n Parameters\n ----------\n src : path-like\n Source directory to copy from.\n dst : path-like\n Destination directory to copy to.\n paths : list of (path-like or 2-tuple of path-like)\n Paths of the files/directories to copy relative to the source directory.\n Pairs of paths in the list signify that the path to copy to is different\n than the path copied from.\n exclude : function(path-like) -> bool, keyword-only, optional\n Conditional function on whether to exclude the given path.\n\n Returns\n -------\n files : list of path-like\n Paths of the copied files.\n \"\"\"\n files = []\n\n for path in paths:\n if isinstance(path, tuple):\n files += copy_path(src, dst, path[0], path[1], exclude=exclude)\n else:\n files += copy_path(src, dst, path, exclude=exclude)\n\n return files\n\n\ndef update_toc(toc, paths, ignore=[]):\n \"\"\"Update the table of contents according to the paths of all files copied over.\n\n Parameters\n ----------\n toc : JSON\n Table of contents according to the JupyterBook specification.\n paths : list of path-like\n Paths of the files copied over.\n ignore : list of path-like\n List of directories to ignore when updating the table of contents.\n\n Returns\n -------\n new_toc : JSON\n Updated table of contents.\n \"\"\"\n new_toc = copy.deepcopy(toc)\n\n remaining_paths = []\n\n # remove all paths in ignore list and those with the wrong extension\n for path in paths:\n base, ext = osp.splitext(path)\n\n for prefix in ignore: # check if path should be ignored\n if path.startswith(prefix):\n break\n else: # not on the ignore list\n if ext in DOC_EXTS: # relevant filetype\n remaining_paths.append(\n base\n ) # the toc does not include extensions\n\n chapters = new_toc[1]['chapters']\n\n for chapter in chapters:\n if (\n 'file' not in chapter\n or (index := chapter['file']) not in remaining_paths\n ):\n continue # skip irrelevant chapters\n\n parent_dir = osp.dirname(index)\n remaining_paths.remove(index)\n\n sections = chapter['sections']\n files = [section['file'] for section in sections]\n\n # find and remove deleted files from toc\n j = 0\n for path in files:\n if path in remaining_paths:\n remaining_paths.remove(path)\n j += 1\n else:\n print(f'deleting {path} from toc')\n del sections[j] # delete from toc\n\n new_files = filter(\n lambda path: path.startswith(parent_dir), remaining_paths\n )\n for path in new_files:\n print(f'adding {path} to toc')\n sections.append({'file': path})\n remaining_paths.remove(path)\n\n return new_toc\n\n\ndef main(args):\n dst = osp.join(\n osp.dirname(osp.dirname(osp.dirname(__file__))), 'napari.github.io'\n )\n\n try:\n dst = args[1]\n except IndexError:\n pass\n\n files = copy_paths(SRC, dst, TO_COPY, exclude=exclude_filter)\n toc_file = osp.join(dst, '_toc.yml')\n\n with open(toc_file) as f:\n toc = yaml.safe_load(f)\n\n if toc is None:\n print(f'toc file {toc_file} empty')\n return\n\n new_toc = update_toc(toc, files, TOC_IGNORE)\n\n with open(toc_file, 'w') as f:\n yaml.dump(new_toc, f)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "path": "docs/copy-docs.py"}], "after_files": [{"content": "\"\"\"\nCopy docs from the napari repo into the napari.github.io repo\nand update the table of contents.\nBy default, will assume that there is a folder named napari.github.io\nin the same directory as the napari folder, if not a different copy\ndestination can be provided.\n\nRead ORGANIZATION.md to learn more about how the documentation sources\nare organized, and how everything comes together.\n\npython -m copy-docs [dstdir]\n\"\"\"\n\nimport copy\nimport os\nimport os.path as osp\nimport shutil\nimport sys\nfrom fnmatch import fnmatch\n\nimport yaml\n\n# path to copy and locations to copy to if different\nTO_COPY = [\n 'ORGANIZATION.md',\n 'glossary.md',\n 'developers',\n 'community',\n 'howtos',\n 'release',\n 'roadmaps',\n 'images',\n osp.join('_templates', 'autosummary'),\n *[\n (dire, osp.join(dire, 'stable'))\n for dire in ('api', 'guides', 'plugins')\n ],\n]\n\n# paths to ignore\nIGNORE = [\n osp.join('images', 'logo.png'),\n]\n\nSRC = osp.dirname(__file__)\n\nDOC_EXTS = ['.md', '.rst', '.ipynb']\n\nTOC_IGNORE = [\n 'api/stable',\n 'images',\n '_templates',\n 'ORGANIZATION.md',\n 'glossary.md', # this document will still be at the destination ToC\n 'guides/stable/_layer_events.md',\n 'guides/stable/_viewer_events.md',\n 'plugins/stable/_npe2_contributions.md',\n 'plugins/stable/_npe2_manifest.md',\n 'plugins/stable/_npe2_readers_guide.md',\n 'plugins/stable/_npe2_widgets_guide.md',\n 'plugins/stable/_npe2_writers_guide.md',\n 'plugins/stable/_npe2_sample_data_guide.md',\n 'plugins/stable/_layer_data_guide.md',\n]\n\n\ndef exclude_filter(path):\n \"\"\"Exclude files in the ignore list and duplicated files.\"\"\"\n for ignore in IGNORE:\n if fnmatch(path, osp.join(SRC, ignore)): # in ignore list\n return True\n else:\n if osp.isdir(path) or osp.splitext(path)[1] != '.md':\n return False\n with open(path) as f:\n firstline = f.readline()\n return firstline.startswith('```{include}') # duplicate file\n\n\ndef copy_path(srcdir, dstdir, path, newpath=None, *, exclude=None):\n \"\"\"Copy a path from the source directory to the destination directory,\n with the given path relative to the directory roots.\n\n Parameters\n ----------\n srcdir : path-like\n Source directory root to copy from.\n dstdir : path-like\n Destination directory root to copy to.\n path : path-like\n Path relative to the `srcdir` of the path to copy from.\n newpath : path-like, optional\n Path relative to the `dstdir` of the path to copy to.\n If not provided, will default to the value of `path`.\n exclude : function(path-like) -> bool, keyword-only, optional\n Conditional function on whether to exclude the given path.\n\n Returns\n -------\n files : list of path-like\n Paths of the copied files.\n \"\"\"\n if newpath is None:\n newpath = path\n\n src = osp.join(srcdir, path)\n dst = osp.join(dstdir, newpath)\n\n if exclude(src): # skip this path\n return []\n\n print(f'copying {src} to {dst}')\n\n if osp.isfile(src):\n shutil.copyfile(src, dst)\n return [newpath]\n elif osp.isdir(src):\n if osp.exists(dst): # if the destination directory exists, delete it\n shutil.rmtree(dst)\n\n os.mkdir(dst)\n\n files = []\n\n for fpath in os.listdir(src): # recursively copy each child path\n p = osp.join(path, fpath)\n np = osp.join(newpath, fpath)\n files += copy_path(srcdir, dstdir, p, np, exclude=exclude)\n\n return files\n else:\n raise RuntimeError(f'unknown path type {src}')\n\n\ndef copy_paths(src, dst, paths, *, exclude=None):\n \"\"\"Copy files/directories given a list of their paths from\n the source directory to the destination directory.\n\n Parameters\n ----------\n src : path-like\n Source directory to copy from.\n dst : path-like\n Destination directory to copy to.\n paths : list of (path-like or 2-tuple of path-like)\n Paths of the files/directories to copy relative to the source directory.\n Pairs of paths in the list signify that the path to copy to is different\n than the path copied from.\n exclude : function(path-like) -> bool, keyword-only, optional\n Conditional function on whether to exclude the given path.\n\n Returns\n -------\n files : list of path-like\n Paths of the copied files.\n \"\"\"\n files = []\n\n for path in paths:\n if isinstance(path, tuple):\n files += copy_path(src, dst, path[0], path[1], exclude=exclude)\n else:\n files += copy_path(src, dst, path, exclude=exclude)\n\n return files\n\n\ndef update_toc(toc, paths, ignore=[]):\n \"\"\"Update the table of contents according to the paths of all files copied over.\n\n Parameters\n ----------\n toc : JSON\n Table of contents according to the JupyterBook specification.\n paths : list of path-like\n Paths of the files copied over.\n ignore : list of path-like\n List of directories to ignore when updating the table of contents.\n\n Returns\n -------\n new_toc : JSON\n Updated table of contents.\n \"\"\"\n new_toc = copy.deepcopy(toc)\n\n remaining_paths = []\n\n # remove all paths in ignore list and those with the wrong extension\n for path in paths:\n base, ext = osp.splitext(path)\n\n for prefix in ignore: # check if path should be ignored\n if path.startswith(prefix):\n break\n else: # not on the ignore list\n if ext in DOC_EXTS: # relevant filetype\n remaining_paths.append(\n base\n ) # the toc does not include extensions\n\n chapters = new_toc[1]['chapters']\n\n for chapter in chapters:\n if (\n 'file' not in chapter\n or (index := chapter['file']) not in remaining_paths\n ):\n continue # skip irrelevant chapters\n\n parent_dir = osp.dirname(index)\n remaining_paths.remove(index)\n\n sections = chapter['sections']\n files = [section['file'] for section in sections]\n\n # find and remove deleted files from toc\n j = 0\n for path in files:\n if path in remaining_paths:\n remaining_paths.remove(path)\n j += 1\n else:\n print(f'deleting {path} from toc')\n del sections[j] # delete from toc\n\n new_files = filter(\n lambda path: path.startswith(parent_dir), remaining_paths\n )\n for path in new_files:\n print(f'adding {path} to toc')\n sections.append({'file': path})\n remaining_paths.remove(path)\n\n return new_toc\n\n\ndef main(args):\n dst = osp.join(\n osp.dirname(osp.dirname(osp.dirname(__file__))), 'napari.github.io'\n )\n\n try:\n dst = args[1]\n except IndexError:\n pass\n\n files = copy_paths(SRC, dst, TO_COPY, exclude=exclude_filter)\n toc_file = osp.join(dst, '_toc.yml')\n\n with open(toc_file) as f:\n toc = yaml.safe_load(f)\n\n if toc is None:\n print(f'toc file {toc_file} empty')\n return\n\n new_toc = update_toc(toc, files, TOC_IGNORE)\n\n with open(toc_file, 'w') as f:\n yaml.dump(new_toc, f)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "path": "docs/copy-docs.py"}]} | 2,758 | 190 |
gh_patches_debug_6777 | rasdani/github-patches | git_diff | nautobot__nautobot-604 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OPTIONS returns all Status choices instead of ones only related to the specific model
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.8
* Nautobot version: 1.0.1
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
Cables should only have three status choices and virtual machines should have 6. Numbers are arbitrary, but this is with a fresh install of Nautobot.
```python
>>> nb.dcim.cables.choices()['status']
[{'value': 'active', 'display': 'Active'}, {'value': 'available', 'display': 'Available'}, {'value': 'connected', 'display': 'Connected'}, {'value': 'container', 'display': 'Container'}, {'value': 'decommissioned', 'display': 'Decommissioned'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'deprecated', 'display': 'Deprecated'}, {'value': 'deprovisioning', 'display': 'Deprovisioning'}, {'value': 'dhcp', 'display': 'DHCP'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'inventory', 'display': 'Inventory'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'provisioning', 'display': 'Provisioning'}, {'value': 'reserved', 'display': 'Reserved'}, {'value': 'retired', 'display': 'Retired'}, {'value': 'slaac', 'display': 'SLAAC'}, {'value': 'staged', 'display': 'Staged'}, {'value': 'staging', 'display': 'Staging'}]
>>> nb.virtualization.virtual_machines.choices()['status']
[{'value': 'active', 'display': 'Active'}, {'value': 'available', 'display': 'Available'}, {'value': 'connected', 'display': 'Connected'}, {'value': 'container', 'display': 'Container'}, {'value': 'decommissioned', 'display': 'Decommissioned'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'deprecated', 'display': 'Deprecated'}, {'value': 'deprovisioning', 'display': 'Deprovisioning'}, {'value': 'dhcp', 'display': 'DHCP'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'inventory', 'display': 'Inventory'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'provisioning', 'display': 'Provisioning'}, {'value': 'reserved', 'display': 'Reserved'}, {'value': 'retired', 'display': 'Retired'}, {'value': 'slaac', 'display': 'SLAAC'}, {'value': 'staged', 'display': 'Staged'}, {'value': 'staging', 'display': 'Staging'}]
```
I haven't been able to dig into it too much, but I believe this is the culprit.
https://github.com/nautobot/nautobot/blob/develop/nautobot/extras/api/serializers.py#L621
<!-- What did you expect to happen? -->
### Expected Behavior
```python
>>> nb.dcim.cables.choices()['status']
[{'value': 'connected', 'display': 'Connected'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'planned', 'display': 'Planned'}]
>>> nb.virtualization.virtual_machines.choices()['status']
[{'value': 'active', 'display': 'Active'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'staged', 'display': 'Staged'}]
```
<!-- What happened instead? -->
### Observed Behavior
See **Steps to Reproduce**.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/extras/api/fields.py`
Content:
```
1 from collections import OrderedDict
2
3 from rest_framework import serializers
4
5
6 class MultipleChoiceJSONField(serializers.MultipleChoiceField):
7 """A MultipleChoiceField that renders the received value as a JSON-compatible list rather than a set."""
8
9 def to_internal_value(self, data):
10 set_value = super().to_internal_value(data)
11 return sorted(set_value)
12
13
14 class StatusSerializerField(serializers.SlugRelatedField):
15 """Serializer field for `Status` object fields."""
16
17 show_choices = True
18
19 def __init__(self, **kwargs):
20 kwargs.setdefault("slug_field", "slug")
21 super().__init__(**kwargs)
22
23 def to_representation(self, obj):
24 """Make this field compatible w/ the existing API for `ChoiceField`."""
25 if obj == "":
26 return None
27
28 return OrderedDict(
29 [
30 ("value", obj.slug),
31 ("label", str(obj)),
32 ]
33 )
34
35 def to_internal_value(self, data):
36 """Always lower-case the custom choice value."""
37 if hasattr(data, "lower"):
38 data = data.lower()
39 return super().to_internal_value(data)
40
41 def get_choices(self, cutoff=None):
42 """
43 Return a nested list of dicts for enum choices.
44
45 This had to be overloaded since the base method calls
46 `to_representation()` which in our case is an OrderedDict and can't be
47 nested.
48 """
49 queryset = self.get_queryset()
50 if queryset is None:
51 # Ensure that field.choices returns something sensible
52 # even when accessed with a read-only field.
53 return {}
54
55 if cutoff is not None:
56 queryset = queryset[:cutoff]
57
58 return OrderedDict([(item.slug, self.display_value(item)) for item in queryset])
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/extras/api/fields.py b/nautobot/extras/api/fields.py
--- a/nautobot/extras/api/fields.py
+++ b/nautobot/extras/api/fields.py
@@ -38,6 +38,12 @@
data = data.lower()
return super().to_internal_value(data)
+ def get_queryset(self):
+ """Only emit status options for this model/field combination."""
+ queryset = super().get_queryset()
+ model = self.parent.Meta.model
+ return queryset.get_for_model(model)
+
def get_choices(self, cutoff=None):
"""
Return a nested list of dicts for enum choices.
| {"golden_diff": "diff --git a/nautobot/extras/api/fields.py b/nautobot/extras/api/fields.py\n--- a/nautobot/extras/api/fields.py\n+++ b/nautobot/extras/api/fields.py\n@@ -38,6 +38,12 @@\n data = data.lower()\n return super().to_internal_value(data)\n \n+ def get_queryset(self):\n+ \"\"\"Only emit status options for this model/field combination.\"\"\"\n+ queryset = super().get_queryset()\n+ model = self.parent.Meta.model\n+ return queryset.get_for_model(model)\n+\n def get_choices(self, cutoff=None):\n \"\"\"\n Return a nested list of dicts for enum choices.\n", "issue": "OPTIONS returns all Status choices instead of ones only related to the specific model\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.8\r\n* Nautobot version: 1.0.1\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\nCables should only have three status choices and virtual machines should have 6. Numbers are arbitrary, but this is with a fresh install of Nautobot.\r\n\r\n```python\r\n>>> nb.dcim.cables.choices()['status']\r\n[{'value': 'active', 'display': 'Active'}, {'value': 'available', 'display': 'Available'}, {'value': 'connected', 'display': 'Connected'}, {'value': 'container', 'display': 'Container'}, {'value': 'decommissioned', 'display': 'Decommissioned'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'deprecated', 'display': 'Deprecated'}, {'value': 'deprovisioning', 'display': 'Deprovisioning'}, {'value': 'dhcp', 'display': 'DHCP'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'inventory', 'display': 'Inventory'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'provisioning', 'display': 'Provisioning'}, {'value': 'reserved', 'display': 'Reserved'}, {'value': 'retired', 'display': 'Retired'}, {'value': 'slaac', 'display': 'SLAAC'}, {'value': 'staged', 'display': 'Staged'}, {'value': 'staging', 'display': 'Staging'}]\r\n>>> nb.virtualization.virtual_machines.choices()['status']\r\n[{'value': 'active', 'display': 'Active'}, {'value': 'available', 'display': 'Available'}, {'value': 'connected', 'display': 'Connected'}, {'value': 'container', 'display': 'Container'}, {'value': 'decommissioned', 'display': 'Decommissioned'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'deprecated', 'display': 'Deprecated'}, {'value': 'deprovisioning', 'display': 'Deprovisioning'}, {'value': 'dhcp', 'display': 'DHCP'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'inventory', 'display': 'Inventory'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'provisioning', 'display': 'Provisioning'}, {'value': 'reserved', 'display': 'Reserved'}, {'value': 'retired', 'display': 'Retired'}, {'value': 'slaac', 'display': 'SLAAC'}, {'value': 'staged', 'display': 'Staged'}, {'value': 'staging', 'display': 'Staging'}]\r\n```\r\n\r\nI haven't been able to dig into it too much, but I believe this is the culprit.\r\n\r\nhttps://github.com/nautobot/nautobot/blob/develop/nautobot/extras/api/serializers.py#L621\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\n```python\r\n>>> nb.dcim.cables.choices()['status']\r\n[{'value': 'connected', 'display': 'Connected'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'planned', 'display': 'Planned'}]\r\n>>> nb.virtualization.virtual_machines.choices()['status']\r\n[{'value': 'active', 'display': 'Active'}, {'value': 'decommissioning', 'display': 'Decommissioning'}, {'value': 'failed', 'display': 'Failed'}, {'value': 'offline', 'display': 'Offline'}, {'value': 'planned', 'display': 'Planned'}, {'value': 'staged', 'display': 'Staged'}]\r\n```\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nSee **Steps to Reproduce**.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework import serializers\n\n\nclass MultipleChoiceJSONField(serializers.MultipleChoiceField):\n \"\"\"A MultipleChoiceField that renders the received value as a JSON-compatible list rather than a set.\"\"\"\n\n def to_internal_value(self, data):\n set_value = super().to_internal_value(data)\n return sorted(set_value)\n\n\nclass StatusSerializerField(serializers.SlugRelatedField):\n \"\"\"Serializer field for `Status` object fields.\"\"\"\n\n show_choices = True\n\n def __init__(self, **kwargs):\n kwargs.setdefault(\"slug_field\", \"slug\")\n super().__init__(**kwargs)\n\n def to_representation(self, obj):\n \"\"\"Make this field compatible w/ the existing API for `ChoiceField`.\"\"\"\n if obj == \"\":\n return None\n\n return OrderedDict(\n [\n (\"value\", obj.slug),\n (\"label\", str(obj)),\n ]\n )\n\n def to_internal_value(self, data):\n \"\"\"Always lower-case the custom choice value.\"\"\"\n if hasattr(data, \"lower\"):\n data = data.lower()\n return super().to_internal_value(data)\n\n def get_choices(self, cutoff=None):\n \"\"\"\n Return a nested list of dicts for enum choices.\n\n This had to be overloaded since the base method calls\n `to_representation()` which in our case is an OrderedDict and can't be\n nested.\n \"\"\"\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict([(item.slug, self.display_value(item)) for item in queryset])\n", "path": "nautobot/extras/api/fields.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework import serializers\n\n\nclass MultipleChoiceJSONField(serializers.MultipleChoiceField):\n \"\"\"A MultipleChoiceField that renders the received value as a JSON-compatible list rather than a set.\"\"\"\n\n def to_internal_value(self, data):\n set_value = super().to_internal_value(data)\n return sorted(set_value)\n\n\nclass StatusSerializerField(serializers.SlugRelatedField):\n \"\"\"Serializer field for `Status` object fields.\"\"\"\n\n show_choices = True\n\n def __init__(self, **kwargs):\n kwargs.setdefault(\"slug_field\", \"slug\")\n super().__init__(**kwargs)\n\n def to_representation(self, obj):\n \"\"\"Make this field compatible w/ the existing API for `ChoiceField`.\"\"\"\n if obj == \"\":\n return None\n\n return OrderedDict(\n [\n (\"value\", obj.slug),\n (\"label\", str(obj)),\n ]\n )\n\n def to_internal_value(self, data):\n \"\"\"Always lower-case the custom choice value.\"\"\"\n if hasattr(data, \"lower\"):\n data = data.lower()\n return super().to_internal_value(data)\n\n def get_queryset(self):\n \"\"\"Only emit status options for this model/field combination.\"\"\"\n queryset = super().get_queryset()\n model = self.parent.Meta.model\n return queryset.get_for_model(model)\n\n def get_choices(self, cutoff=None):\n \"\"\"\n Return a nested list of dicts for enum choices.\n\n This had to be overloaded since the base method calls\n `to_representation()` which in our case is an OrderedDict and can't be\n nested.\n \"\"\"\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict([(item.slug, self.display_value(item)) for item in queryset])\n", "path": "nautobot/extras/api/fields.py"}]} | 1,809 | 148 |
gh_patches_debug_19871 | rasdani/github-patches | git_diff | ManimCommunity__manim-1516 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove references to t_min and t_max in ParametricFunction
## Description of bug / unexpected behavior
With PR #1161 the old way of setting the parameter's range (e.g. by setting `t_min` and `t_max`) was replaced by a single parameter `t_range`. However, the docs still show usage of `t_min` and `t_max`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/mobject/functions.py`
Content:
```
1 """Mobjects representing function graphs."""
2
3 __all__ = ["ParametricFunction", "FunctionGraph"]
4
5
6 import numpy as np
7
8 from .. import config
9 from ..constants import *
10 from ..mobject.types.vectorized_mobject import VMobject
11 from ..utils.color import YELLOW
12
13
14 class ParametricFunction(VMobject):
15 """A parametric curve.
16
17 Examples
18 --------
19
20 .. manim:: PlotParametricFunction
21 :save_last_frame:
22
23 class PlotParametricFunction(Scene):
24 def func(self, t):
25 return np.array((np.sin(2 * t), np.sin(3 * t), 0))
26
27 def construct(self):
28 func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)
29 self.add(func.scale(3))
30
31 .. manim:: ThreeDParametricSpring
32 :save_last_frame:
33
34 class ThreeDParametricSpring(ThreeDScene):
35 def construct(self):
36 curve1 = ParametricFunction(
37 lambda u: np.array([
38 1.2 * np.cos(u),
39 1.2 * np.sin(u),
40 u * 0.05
41 ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,
42 ).set_shade_in_3d(True)
43 axes = ThreeDAxes()
44 self.add(axes, curve1)
45 self.set_camera_orientation(phi=80 * DEGREES, theta=-60 * DEGREES)
46 self.wait()
47 """
48
49 def __init__(
50 self,
51 function=None,
52 t_range=None,
53 dt=1e-8,
54 discontinuities=None,
55 use_smoothing=True,
56 **kwargs
57 ):
58 self.function = function
59 t_range = np.array([0, 1, 0.01]) if t_range is None else t_range
60 if len(t_range) == 2:
61 t_range = [*t_range, 0.01]
62
63 self.dt = dt
64 self.discontinuities = [] if discontinuities is None else discontinuities
65 self.use_smoothing = use_smoothing
66 self.t_min, self.t_max, self.t_step = t_range
67
68 VMobject.__init__(self, **kwargs)
69
70 def get_function(self):
71 return self.function
72
73 def get_point_from_function(self, t):
74 return self.function(t)
75
76 def generate_points(self):
77
78 discontinuities = filter(
79 lambda t: self.t_min <= t <= self.t_max, self.discontinuities
80 )
81 discontinuities = np.array(list(discontinuities))
82 boundary_times = [
83 self.t_min,
84 self.t_max,
85 *(discontinuities - self.dt),
86 *(discontinuities + self.dt),
87 ]
88 boundary_times.sort()
89 for t1, t2 in zip(boundary_times[0::2], boundary_times[1::2]):
90 t_range = [*np.arange(t1, t2, self.t_step), t2]
91 points = np.array([self.function(t) for t in t_range])
92 self.start_new_path(points[0])
93 self.add_points_as_corners(points[1:])
94 if self.use_smoothing:
95 # TODO: not in line with upstream, approx_smooth does not exist
96 self.make_smooth()
97 return self
98
99
100 class FunctionGraph(ParametricFunction):
101 def __init__(self, function, x_range=None, color=YELLOW, **kwargs):
102
103 if x_range is None:
104 x_range = np.array([-config["frame_x_radius"], config["frame_x_radius"]])
105
106 self.x_range = x_range
107 self.parametric_function = lambda t: np.array([t, function(t), 0])
108 self.function = function
109 super().__init__(self.parametric_function, self.x_range, color=color, **kwargs)
110
111 def get_function(self):
112 return self.function
113
114 def get_point_from_function(self, x):
115 return self.parametric_function(x)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/mobject/functions.py b/manim/mobject/functions.py
--- a/manim/mobject/functions.py
+++ b/manim/mobject/functions.py
@@ -25,7 +25,7 @@
return np.array((np.sin(2 * t), np.sin(3 * t), 0))
def construct(self):
- func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)
+ func = ParametricFunction(self.func, t_range = np.array([0, TAU]), fill_opacity=0).set_color(RED)
self.add(func.scale(3))
.. manim:: ThreeDParametricSpring
@@ -38,7 +38,7 @@
1.2 * np.cos(u),
1.2 * np.sin(u),
u * 0.05
- ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,
+ ]), color=RED, t_range = np.array([-3*TAU, 5*TAU, 0.01])
).set_shade_in_3d(True)
axes = ThreeDAxes()
self.add(axes, curve1)
| {"golden_diff": "diff --git a/manim/mobject/functions.py b/manim/mobject/functions.py\n--- a/manim/mobject/functions.py\n+++ b/manim/mobject/functions.py\n@@ -25,7 +25,7 @@\n return np.array((np.sin(2 * t), np.sin(3 * t), 0))\n \n def construct(self):\n- func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)\n+ func = ParametricFunction(self.func, t_range = np.array([0, TAU]), fill_opacity=0).set_color(RED)\n self.add(func.scale(3))\n \n .. manim:: ThreeDParametricSpring\n@@ -38,7 +38,7 @@\n 1.2 * np.cos(u),\n 1.2 * np.sin(u),\n u * 0.05\n- ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,\n+ ]), color=RED, t_range = np.array([-3*TAU, 5*TAU, 0.01])\n ).set_shade_in_3d(True)\n axes = ThreeDAxes()\n self.add(axes, curve1)\n", "issue": "Remove references to t_min and t_max in ParametricFunction\n## Description of bug / unexpected behavior\r\n\r\nWith PR #1161 the old way of setting the parameter's range (e.g. by setting `t_min` and `t_max`) was replaced by a single parameter `t_range`. However, the docs still show usage of `t_min` and `t_max`.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Mobjects representing function graphs.\"\"\"\n\n__all__ = [\"ParametricFunction\", \"FunctionGraph\"]\n\n\nimport numpy as np\n\nfrom .. import config\nfrom ..constants import *\nfrom ..mobject.types.vectorized_mobject import VMobject\nfrom ..utils.color import YELLOW\n\n\nclass ParametricFunction(VMobject):\n \"\"\"A parametric curve.\n\n Examples\n --------\n\n .. manim:: PlotParametricFunction\n :save_last_frame:\n\n class PlotParametricFunction(Scene):\n def func(self, t):\n return np.array((np.sin(2 * t), np.sin(3 * t), 0))\n\n def construct(self):\n func = ParametricFunction(self.func, t_max = TAU, fill_opacity=0).set_color(RED)\n self.add(func.scale(3))\n\n .. manim:: ThreeDParametricSpring\n :save_last_frame:\n\n class ThreeDParametricSpring(ThreeDScene):\n def construct(self):\n curve1 = ParametricFunction(\n lambda u: np.array([\n 1.2 * np.cos(u),\n 1.2 * np.sin(u),\n u * 0.05\n ]), color=RED, t_min=-3 * TAU, t_max=5 * TAU,\n ).set_shade_in_3d(True)\n axes = ThreeDAxes()\n self.add(axes, curve1)\n self.set_camera_orientation(phi=80 * DEGREES, theta=-60 * DEGREES)\n self.wait()\n \"\"\"\n\n def __init__(\n self,\n function=None,\n t_range=None,\n dt=1e-8,\n discontinuities=None,\n use_smoothing=True,\n **kwargs\n ):\n self.function = function\n t_range = np.array([0, 1, 0.01]) if t_range is None else t_range\n if len(t_range) == 2:\n t_range = [*t_range, 0.01]\n\n self.dt = dt\n self.discontinuities = [] if discontinuities is None else discontinuities\n self.use_smoothing = use_smoothing\n self.t_min, self.t_max, self.t_step = t_range\n\n VMobject.__init__(self, **kwargs)\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, t):\n return self.function(t)\n\n def generate_points(self):\n\n discontinuities = filter(\n lambda t: self.t_min <= t <= self.t_max, self.discontinuities\n )\n discontinuities = np.array(list(discontinuities))\n boundary_times = [\n self.t_min,\n self.t_max,\n *(discontinuities - self.dt),\n *(discontinuities + self.dt),\n ]\n boundary_times.sort()\n for t1, t2 in zip(boundary_times[0::2], boundary_times[1::2]):\n t_range = [*np.arange(t1, t2, self.t_step), t2]\n points = np.array([self.function(t) for t in t_range])\n self.start_new_path(points[0])\n self.add_points_as_corners(points[1:])\n if self.use_smoothing:\n # TODO: not in line with upstream, approx_smooth does not exist\n self.make_smooth()\n return self\n\n\nclass FunctionGraph(ParametricFunction):\n def __init__(self, function, x_range=None, color=YELLOW, **kwargs):\n\n if x_range is None:\n x_range = np.array([-config[\"frame_x_radius\"], config[\"frame_x_radius\"]])\n\n self.x_range = x_range\n self.parametric_function = lambda t: np.array([t, function(t), 0])\n self.function = function\n super().__init__(self.parametric_function, self.x_range, color=color, **kwargs)\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, x):\n return self.parametric_function(x)\n", "path": "manim/mobject/functions.py"}], "after_files": [{"content": "\"\"\"Mobjects representing function graphs.\"\"\"\n\n__all__ = [\"ParametricFunction\", \"FunctionGraph\"]\n\n\nimport numpy as np\n\nfrom .. import config\nfrom ..constants import *\nfrom ..mobject.types.vectorized_mobject import VMobject\nfrom ..utils.color import YELLOW\n\n\nclass ParametricFunction(VMobject):\n \"\"\"A parametric curve.\n\n Examples\n --------\n\n .. manim:: PlotParametricFunction\n :save_last_frame:\n\n class PlotParametricFunction(Scene):\n def func(self, t):\n return np.array((np.sin(2 * t), np.sin(3 * t), 0))\n\n def construct(self):\n func = ParametricFunction(self.func, t_range = np.array([0, TAU]), fill_opacity=0).set_color(RED)\n self.add(func.scale(3))\n\n .. manim:: ThreeDParametricSpring\n :save_last_frame:\n\n class ThreeDParametricSpring(ThreeDScene):\n def construct(self):\n curve1 = ParametricFunction(\n lambda u: np.array([\n 1.2 * np.cos(u),\n 1.2 * np.sin(u),\n u * 0.05\n ]), color=RED, t_range = np.array([-3*TAU, 5*TAU, 0.01])\n ).set_shade_in_3d(True)\n axes = ThreeDAxes()\n self.add(axes, curve1)\n self.set_camera_orientation(phi=80 * DEGREES, theta=-60 * DEGREES)\n self.wait()\n \"\"\"\n\n def __init__(\n self,\n function=None,\n t_range=None,\n dt=1e-8,\n discontinuities=None,\n use_smoothing=True,\n **kwargs\n ):\n self.function = function\n t_range = np.array([0, 1, 0.01]) if t_range is None else t_range\n if len(t_range) == 2:\n t_range = [*t_range, 0.01]\n\n self.dt = dt\n self.discontinuities = [] if discontinuities is None else discontinuities\n self.use_smoothing = use_smoothing\n self.t_min, self.t_max, self.t_step = t_range\n\n VMobject.__init__(self, **kwargs)\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, t):\n return self.function(t)\n\n def generate_points(self):\n\n discontinuities = filter(\n lambda t: self.t_min <= t <= self.t_max, self.discontinuities\n )\n discontinuities = np.array(list(discontinuities))\n boundary_times = [\n self.t_min,\n self.t_max,\n *(discontinuities - self.dt),\n *(discontinuities + self.dt),\n ]\n boundary_times.sort()\n for t1, t2 in zip(boundary_times[0::2], boundary_times[1::2]):\n t_range = [*np.arange(t1, t2, self.t_step), t2]\n points = np.array([self.function(t) for t in t_range])\n self.start_new_path(points[0])\n self.add_points_as_corners(points[1:])\n if self.use_smoothing:\n # TODO: not in line with upstream, approx_smooth does not exist\n self.make_smooth()\n return self\n\n\nclass FunctionGraph(ParametricFunction):\n def __init__(self, function, x_range=None, color=YELLOW, **kwargs):\n\n if x_range is None:\n x_range = np.array([-config[\"frame_x_radius\"], config[\"frame_x_radius\"]])\n\n self.x_range = x_range\n self.parametric_function = lambda t: np.array([t, function(t), 0])\n self.function = function\n super().__init__(self.parametric_function, self.x_range, color=color, **kwargs)\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, x):\n return self.parametric_function(x)\n", "path": "manim/mobject/functions.py"}]} | 1,458 | 276 |
gh_patches_debug_31390 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-5388 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Mod] Mention spam accounting for roles
# Feature request
After finalizing https://github.com/Cog-Creators/Red-DiscordBot/pull/4359 (allowing for duplicate mentions). A small discussion was brought up (I forgot with whom), to possibly make something that would / could account for role mentions as well.
#### Select the type of feature you are requesting:
<!-- To check a box, replace the space between the [] with a x -->
- [ ] Cog
- [x] Command
- [ ] API functionality
#### Describe your requested feature
This could either be it's own count system (separate from total user mentions. So user mentions could be set to 7, while role mentions could be set to 3), or it would take both user mentions and role mentions into the same context. (each would account for 7 in this example).
I wanted to make this issue to gather feedback, I am more than willing to work on this myself, though, I want to make 100% sure that this is something worth working on, getting proper feedback on how to set this up and how it should action.
The reason why I believe this should be something for Core Mod is because while it can 100% be used as a third party cog, I figured this would give us and any user who uses core mod better advantages and control over antimentionspam we offer.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/cogs/mod/events.py`
Content:
```
1 import logging
2 from datetime import timezone
3 from collections import defaultdict, deque
4
5 import discord
6 from redbot.core import i18n, modlog, commands
7 from redbot.core.utils.mod import is_mod_or_superior
8 from .abc import MixinMeta
9
10 _ = i18n.Translator("Mod", __file__)
11 log = logging.getLogger("red.mod")
12
13
14 class Events(MixinMeta):
15 """
16 This is a mixin for the core mod cog
17 Has a bunch of things split off to here.
18 """
19
20 async def check_duplicates(self, message):
21 guild = message.guild
22 author = message.author
23
24 guild_cache = self.cache.get(guild.id, None)
25 if guild_cache is None:
26 repeats = await self.config.guild(guild).delete_repeats()
27 if repeats == -1:
28 return False
29 guild_cache = self.cache[guild.id] = defaultdict(lambda: deque(maxlen=repeats))
30
31 if not message.content:
32 return False
33
34 guild_cache[author].append(message.content)
35 msgs = guild_cache[author]
36 if len(msgs) == msgs.maxlen and len(set(msgs)) == 1:
37 try:
38 await message.delete()
39 return True
40 except discord.HTTPException:
41 pass
42 return False
43
44 async def check_mention_spam(self, message):
45 guild, author = message.guild, message.author
46 mention_spam = await self.config.guild(guild).mention_spam.all()
47
48 if mention_spam["strict"]: # if strict is enabled
49 mentions = message.raw_mentions
50 else: # if not enabled
51 mentions = set(message.mentions)
52
53 if mention_spam["ban"]:
54 if len(mentions) >= mention_spam["ban"]:
55 try:
56 await guild.ban(author, reason=_("Mention spam (Autoban)"))
57 except discord.HTTPException:
58 log.warning(
59 "Failed to ban a member ({member}) for mention spam in server {guild}.".format(
60 member=author.id, guild=guild.id
61 )
62 )
63 else:
64 await modlog.create_case(
65 self.bot,
66 guild,
67 message.created_at,
68 "ban",
69 author,
70 guild.me,
71 _("Mention spam (Autoban)"),
72 until=None,
73 channel=None,
74 )
75 return True
76
77 if mention_spam["kick"]:
78 if len(mentions) >= mention_spam["kick"]:
79 try:
80 await guild.kick(author, reason=_("Mention Spam (Autokick)"))
81 except discord.HTTPException:
82 log.warning(
83 "Failed to kick a member ({member}) for mention spam in server {guild}".format(
84 member=author.id, guild=guild.id
85 )
86 )
87 else:
88 await modlog.create_case(
89 self.bot,
90 guild,
91 message.created_at,
92 "kick",
93 author,
94 guild.me,
95 _("Mention spam (Autokick)"),
96 until=None,
97 channel=None,
98 )
99 return True
100
101 if mention_spam["warn"]:
102 if len(mentions) >= mention_spam["warn"]:
103 try:
104 await author.send(_("Please do not mass mention people!"))
105 except (discord.HTTPException, discord.Forbidden):
106 try:
107 await message.channel.send(
108 _("{member}, Please do not mass mention people!").format(
109 member=author.mention
110 )
111 )
112 except (discord.HTTPException, discord.Forbidden):
113 log.warning(
114 "Failed to warn a member ({member}) for mention spam in server {guild}".format(
115 member=author.id, guild=guild.id
116 )
117 )
118 return False
119
120 await modlog.create_case(
121 self.bot,
122 guild,
123 message.created_at,
124 "warning",
125 author,
126 guild.me,
127 _("Mention spam (Autowarn)"),
128 until=None,
129 channel=None,
130 )
131 return True
132 return False
133
134 @commands.Cog.listener()
135 async def on_message(self, message):
136 author = message.author
137 if message.guild is None or self.bot.user == author:
138 return
139
140 if await self.bot.cog_disabled_in_guild(self, message.guild):
141 return
142
143 valid_user = isinstance(author, discord.Member) and not author.bot
144 if not valid_user:
145 return
146
147 # Bots and mods or superior are ignored from the filter
148 mod_or_superior = await is_mod_or_superior(self.bot, obj=author)
149 if mod_or_superior:
150 return
151 # As are anyone configured to be
152 if await self.bot.is_automod_immune(message):
153 return
154
155 await i18n.set_contextual_locales_from_guild(self.bot, message.guild)
156
157 deleted = await self.check_duplicates(message)
158 if not deleted:
159 await self.check_mention_spam(message)
160
161 @commands.Cog.listener()
162 async def on_user_update(self, before: discord.User, after: discord.User):
163 if before.name != after.name:
164 track_all_names = await self.config.track_all_names()
165 if not track_all_names:
166 return
167 async with self.config.user(before).past_names() as name_list:
168 while None in name_list: # clean out null entries from a bug
169 name_list.remove(None)
170 if before.name in name_list:
171 # Ensure order is maintained without duplicates occurring
172 name_list.remove(before.name)
173 name_list.append(before.name)
174 while len(name_list) > 20:
175 name_list.pop(0)
176
177 @commands.Cog.listener()
178 async def on_member_update(self, before: discord.Member, after: discord.Member):
179 if before.nick != after.nick and before.nick is not None:
180 guild = after.guild
181 if (not guild) or await self.bot.cog_disabled_in_guild(self, guild):
182 return
183 track_all_names = await self.config.track_all_names()
184 track_nicknames = await self.config.guild(guild).track_nicknames()
185 if (not track_all_names) or (not track_nicknames):
186 return
187 async with self.config.member(before).past_nicks() as nick_list:
188 while None in nick_list: # clean out null entries from a bug
189 nick_list.remove(None)
190 if before.nick in nick_list:
191 nick_list.remove(before.nick)
192 nick_list.append(before.nick)
193 while len(nick_list) > 20:
194 nick_list.pop(0)
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redbot/cogs/mod/events.py b/redbot/cogs/mod/events.py
--- a/redbot/cogs/mod/events.py
+++ b/redbot/cogs/mod/events.py
@@ -46,12 +46,12 @@
mention_spam = await self.config.guild(guild).mention_spam.all()
if mention_spam["strict"]: # if strict is enabled
- mentions = message.raw_mentions
+ mentions = len(message.raw_mentions) + len(message.raw_role_mentions)
else: # if not enabled
- mentions = set(message.mentions)
+ mentions = len(set(message.mentions)) + len(set(message.role_mentions))
if mention_spam["ban"]:
- if len(mentions) >= mention_spam["ban"]:
+ if mentions >= mention_spam["ban"]:
try:
await guild.ban(author, reason=_("Mention spam (Autoban)"))
except discord.HTTPException:
@@ -75,7 +75,7 @@
return True
if mention_spam["kick"]:
- if len(mentions) >= mention_spam["kick"]:
+ if mentions >= mention_spam["kick"]:
try:
await guild.kick(author, reason=_("Mention Spam (Autokick)"))
except discord.HTTPException:
@@ -99,7 +99,7 @@
return True
if mention_spam["warn"]:
- if len(mentions) >= mention_spam["warn"]:
+ if mentions >= mention_spam["warn"]:
try:
await author.send(_("Please do not mass mention people!"))
except (discord.HTTPException, discord.Forbidden):
| {"golden_diff": "diff --git a/redbot/cogs/mod/events.py b/redbot/cogs/mod/events.py\n--- a/redbot/cogs/mod/events.py\n+++ b/redbot/cogs/mod/events.py\n@@ -46,12 +46,12 @@\n mention_spam = await self.config.guild(guild).mention_spam.all()\n \n if mention_spam[\"strict\"]: # if strict is enabled\n- mentions = message.raw_mentions\n+ mentions = len(message.raw_mentions) + len(message.raw_role_mentions)\n else: # if not enabled\n- mentions = set(message.mentions)\n+ mentions = len(set(message.mentions)) + len(set(message.role_mentions))\n \n if mention_spam[\"ban\"]:\n- if len(mentions) >= mention_spam[\"ban\"]:\n+ if mentions >= mention_spam[\"ban\"]:\n try:\n await guild.ban(author, reason=_(\"Mention spam (Autoban)\"))\n except discord.HTTPException:\n@@ -75,7 +75,7 @@\n return True\n \n if mention_spam[\"kick\"]:\n- if len(mentions) >= mention_spam[\"kick\"]:\n+ if mentions >= mention_spam[\"kick\"]:\n try:\n await guild.kick(author, reason=_(\"Mention Spam (Autokick)\"))\n except discord.HTTPException:\n@@ -99,7 +99,7 @@\n return True\n \n if mention_spam[\"warn\"]:\n- if len(mentions) >= mention_spam[\"warn\"]:\n+ if mentions >= mention_spam[\"warn\"]:\n try:\n await author.send(_(\"Please do not mass mention people!\"))\n except (discord.HTTPException, discord.Forbidden):\n", "issue": "[Mod] Mention spam accounting for roles\n# Feature request\r\n\r\nAfter finalizing https://github.com/Cog-Creators/Red-DiscordBot/pull/4359 (allowing for duplicate mentions). A small discussion was brought up (I forgot with whom), to possibly make something that would / could account for role mentions as well. \r\n\r\n#### Select the type of feature you are requesting:\r\n\r\n<!-- To check a box, replace the space between the [] with a x -->\r\n\r\n- [ ] Cog\r\n- [x] Command\r\n- [ ] API functionality\r\n\r\n#### Describe your requested feature\r\n\r\nThis could either be it's own count system (separate from total user mentions. So user mentions could be set to 7, while role mentions could be set to 3), or it would take both user mentions and role mentions into the same context. (each would account for 7 in this example). \r\n\r\nI wanted to make this issue to gather feedback, I am more than willing to work on this myself, though, I want to make 100% sure that this is something worth working on, getting proper feedback on how to set this up and how it should action.\r\n\r\nThe reason why I believe this should be something for Core Mod is because while it can 100% be used as a third party cog, I figured this would give us and any user who uses core mod better advantages and control over antimentionspam we offer.\r\n\n", "before_files": [{"content": "import logging\nfrom datetime import timezone\nfrom collections import defaultdict, deque\n\nimport discord\nfrom redbot.core import i18n, modlog, commands\nfrom redbot.core.utils.mod import is_mod_or_superior\nfrom .abc import MixinMeta\n\n_ = i18n.Translator(\"Mod\", __file__)\nlog = logging.getLogger(\"red.mod\")\n\n\nclass Events(MixinMeta):\n \"\"\"\n This is a mixin for the core mod cog\n Has a bunch of things split off to here.\n \"\"\"\n\n async def check_duplicates(self, message):\n guild = message.guild\n author = message.author\n\n guild_cache = self.cache.get(guild.id, None)\n if guild_cache is None:\n repeats = await self.config.guild(guild).delete_repeats()\n if repeats == -1:\n return False\n guild_cache = self.cache[guild.id] = defaultdict(lambda: deque(maxlen=repeats))\n\n if not message.content:\n return False\n\n guild_cache[author].append(message.content)\n msgs = guild_cache[author]\n if len(msgs) == msgs.maxlen and len(set(msgs)) == 1:\n try:\n await message.delete()\n return True\n except discord.HTTPException:\n pass\n return False\n\n async def check_mention_spam(self, message):\n guild, author = message.guild, message.author\n mention_spam = await self.config.guild(guild).mention_spam.all()\n\n if mention_spam[\"strict\"]: # if strict is enabled\n mentions = message.raw_mentions\n else: # if not enabled\n mentions = set(message.mentions)\n\n if mention_spam[\"ban\"]:\n if len(mentions) >= mention_spam[\"ban\"]:\n try:\n await guild.ban(author, reason=_(\"Mention spam (Autoban)\"))\n except discord.HTTPException:\n log.warning(\n \"Failed to ban a member ({member}) for mention spam in server {guild}.\".format(\n member=author.id, guild=guild.id\n )\n )\n else:\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"ban\",\n author,\n guild.me,\n _(\"Mention spam (Autoban)\"),\n until=None,\n channel=None,\n )\n return True\n\n if mention_spam[\"kick\"]:\n if len(mentions) >= mention_spam[\"kick\"]:\n try:\n await guild.kick(author, reason=_(\"Mention Spam (Autokick)\"))\n except discord.HTTPException:\n log.warning(\n \"Failed to kick a member ({member}) for mention spam in server {guild}\".format(\n member=author.id, guild=guild.id\n )\n )\n else:\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"kick\",\n author,\n guild.me,\n _(\"Mention spam (Autokick)\"),\n until=None,\n channel=None,\n )\n return True\n\n if mention_spam[\"warn\"]:\n if len(mentions) >= mention_spam[\"warn\"]:\n try:\n await author.send(_(\"Please do not mass mention people!\"))\n except (discord.HTTPException, discord.Forbidden):\n try:\n await message.channel.send(\n _(\"{member}, Please do not mass mention people!\").format(\n member=author.mention\n )\n )\n except (discord.HTTPException, discord.Forbidden):\n log.warning(\n \"Failed to warn a member ({member}) for mention spam in server {guild}\".format(\n member=author.id, guild=guild.id\n )\n )\n return False\n\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"warning\",\n author,\n guild.me,\n _(\"Mention spam (Autowarn)\"),\n until=None,\n channel=None,\n )\n return True\n return False\n\n @commands.Cog.listener()\n async def on_message(self, message):\n author = message.author\n if message.guild is None or self.bot.user == author:\n return\n\n if await self.bot.cog_disabled_in_guild(self, message.guild):\n return\n\n valid_user = isinstance(author, discord.Member) and not author.bot\n if not valid_user:\n return\n\n # Bots and mods or superior are ignored from the filter\n mod_or_superior = await is_mod_or_superior(self.bot, obj=author)\n if mod_or_superior:\n return\n # As are anyone configured to be\n if await self.bot.is_automod_immune(message):\n return\n\n await i18n.set_contextual_locales_from_guild(self.bot, message.guild)\n\n deleted = await self.check_duplicates(message)\n if not deleted:\n await self.check_mention_spam(message)\n\n @commands.Cog.listener()\n async def on_user_update(self, before: discord.User, after: discord.User):\n if before.name != after.name:\n track_all_names = await self.config.track_all_names()\n if not track_all_names:\n return\n async with self.config.user(before).past_names() as name_list:\n while None in name_list: # clean out null entries from a bug\n name_list.remove(None)\n if before.name in name_list:\n # Ensure order is maintained without duplicates occurring\n name_list.remove(before.name)\n name_list.append(before.name)\n while len(name_list) > 20:\n name_list.pop(0)\n\n @commands.Cog.listener()\n async def on_member_update(self, before: discord.Member, after: discord.Member):\n if before.nick != after.nick and before.nick is not None:\n guild = after.guild\n if (not guild) or await self.bot.cog_disabled_in_guild(self, guild):\n return\n track_all_names = await self.config.track_all_names()\n track_nicknames = await self.config.guild(guild).track_nicknames()\n if (not track_all_names) or (not track_nicknames):\n return\n async with self.config.member(before).past_nicks() as nick_list:\n while None in nick_list: # clean out null entries from a bug\n nick_list.remove(None)\n if before.nick in nick_list:\n nick_list.remove(before.nick)\n nick_list.append(before.nick)\n while len(nick_list) > 20:\n nick_list.pop(0)\n", "path": "redbot/cogs/mod/events.py"}], "after_files": [{"content": "import logging\nfrom datetime import timezone\nfrom collections import defaultdict, deque\n\nimport discord\nfrom redbot.core import i18n, modlog, commands\nfrom redbot.core.utils.mod import is_mod_or_superior\nfrom .abc import MixinMeta\n\n_ = i18n.Translator(\"Mod\", __file__)\nlog = logging.getLogger(\"red.mod\")\n\n\nclass Events(MixinMeta):\n \"\"\"\n This is a mixin for the core mod cog\n Has a bunch of things split off to here.\n \"\"\"\n\n async def check_duplicates(self, message):\n guild = message.guild\n author = message.author\n\n guild_cache = self.cache.get(guild.id, None)\n if guild_cache is None:\n repeats = await self.config.guild(guild).delete_repeats()\n if repeats == -1:\n return False\n guild_cache = self.cache[guild.id] = defaultdict(lambda: deque(maxlen=repeats))\n\n if not message.content:\n return False\n\n guild_cache[author].append(message.content)\n msgs = guild_cache[author]\n if len(msgs) == msgs.maxlen and len(set(msgs)) == 1:\n try:\n await message.delete()\n return True\n except discord.HTTPException:\n pass\n return False\n\n async def check_mention_spam(self, message):\n guild, author = message.guild, message.author\n mention_spam = await self.config.guild(guild).mention_spam.all()\n\n if mention_spam[\"strict\"]: # if strict is enabled\n mentions = len(message.raw_mentions) + len(message.raw_role_mentions)\n else: # if not enabled\n mentions = len(set(message.mentions)) + len(set(message.role_mentions))\n\n if mention_spam[\"ban\"]:\n if mentions >= mention_spam[\"ban\"]:\n try:\n await guild.ban(author, reason=_(\"Mention spam (Autoban)\"))\n except discord.HTTPException:\n log.warning(\n \"Failed to ban a member ({member}) for mention spam in server {guild}.\".format(\n member=author.id, guild=guild.id\n )\n )\n else:\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"ban\",\n author,\n guild.me,\n _(\"Mention spam (Autoban)\"),\n until=None,\n channel=None,\n )\n return True\n\n if mention_spam[\"kick\"]:\n if mentions >= mention_spam[\"kick\"]:\n try:\n await guild.kick(author, reason=_(\"Mention Spam (Autokick)\"))\n except discord.HTTPException:\n log.warning(\n \"Failed to kick a member ({member}) for mention spam in server {guild}\".format(\n member=author.id, guild=guild.id\n )\n )\n else:\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"kick\",\n author,\n guild.me,\n _(\"Mention spam (Autokick)\"),\n until=None,\n channel=None,\n )\n return True\n\n if mention_spam[\"warn\"]:\n if mentions >= mention_spam[\"warn\"]:\n try:\n await author.send(_(\"Please do not mass mention people!\"))\n except (discord.HTTPException, discord.Forbidden):\n try:\n await message.channel.send(\n _(\"{member}, Please do not mass mention people!\").format(\n member=author.mention\n )\n )\n except (discord.HTTPException, discord.Forbidden):\n log.warning(\n \"Failed to warn a member ({member}) for mention spam in server {guild}\".format(\n member=author.id, guild=guild.id\n )\n )\n return False\n\n await modlog.create_case(\n self.bot,\n guild,\n message.created_at,\n \"warning\",\n author,\n guild.me,\n _(\"Mention spam (Autowarn)\"),\n until=None,\n channel=None,\n )\n return True\n return False\n\n @commands.Cog.listener()\n async def on_message(self, message):\n author = message.author\n if message.guild is None or self.bot.user == author:\n return\n\n if await self.bot.cog_disabled_in_guild(self, message.guild):\n return\n\n valid_user = isinstance(author, discord.Member) and not author.bot\n if not valid_user:\n return\n\n # Bots and mods or superior are ignored from the filter\n mod_or_superior = await is_mod_or_superior(self.bot, obj=author)\n if mod_or_superior:\n return\n # As are anyone configured to be\n if await self.bot.is_automod_immune(message):\n return\n\n await i18n.set_contextual_locales_from_guild(self.bot, message.guild)\n\n deleted = await self.check_duplicates(message)\n if not deleted:\n await self.check_mention_spam(message)\n\n @commands.Cog.listener()\n async def on_user_update(self, before: discord.User, after: discord.User):\n if before.name != after.name:\n track_all_names = await self.config.track_all_names()\n if not track_all_names:\n return\n async with self.config.user(before).past_names() as name_list:\n while None in name_list: # clean out null entries from a bug\n name_list.remove(None)\n if before.name in name_list:\n # Ensure order is maintained without duplicates occurring\n name_list.remove(before.name)\n name_list.append(before.name)\n while len(name_list) > 20:\n name_list.pop(0)\n\n @commands.Cog.listener()\n async def on_member_update(self, before: discord.Member, after: discord.Member):\n if before.nick != after.nick and before.nick is not None:\n guild = after.guild\n if (not guild) or await self.bot.cog_disabled_in_guild(self, guild):\n return\n track_all_names = await self.config.track_all_names()\n track_nicknames = await self.config.guild(guild).track_nicknames()\n if (not track_all_names) or (not track_nicknames):\n return\n async with self.config.member(before).past_nicks() as nick_list:\n while None in nick_list: # clean out null entries from a bug\n nick_list.remove(None)\n if before.nick in nick_list:\n nick_list.remove(before.nick)\n nick_list.append(before.nick)\n while len(nick_list) > 20:\n nick_list.pop(0)\n", "path": "redbot/cogs/mod/events.py"}]} | 2,423 | 358 |
gh_patches_debug_3330 | rasdani/github-patches | git_diff | LibraryOfCongress__concordia-240 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable Adobe DTM for Analytics (closes #160)
This embeds the code but Adobe’s instructions violate web
performance guidelines and we should review this carefully
to see how much Adobe is affecting site performance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `concordia/context_processors.py`
Content:
```
1 from django.conf import settings
2
3
4 def system_configuration(request):
5 """
6 Expose some system configuration to the default template context
7 """
8
9 return {"SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None)}
10
11
12 def site_navigation(request):
13 data = {"VIEW_NAME": request.resolver_match.view_name}
14
15 data["VIEW_NAME_FOR_CSS"] = data["VIEW_NAME"].replace(":", "--")
16
17 path_components = request.path.strip("/").split("/")
18 for i, component in enumerate(path_components, start=1):
19 data["PATH_LEVEL_%d" % i] = component
20
21 return data
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/concordia/context_processors.py b/concordia/context_processors.py
--- a/concordia/context_processors.py
+++ b/concordia/context_processors.py
@@ -6,7 +6,10 @@
Expose some system configuration to the default template context
"""
- return {"SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None)}
+ return {
+ "SENTRY_PUBLIC_DSN": getattr(settings, "SENTRY_PUBLIC_DSN", None),
+ "CONCORDIA_ENVIRONMENT": settings.CONCORDIA_ENVIRONMENT,
+ }
def site_navigation(request):
| {"golden_diff": "diff --git a/concordia/context_processors.py b/concordia/context_processors.py\n--- a/concordia/context_processors.py\n+++ b/concordia/context_processors.py\n@@ -6,7 +6,10 @@\n Expose some system configuration to the default template context\n \"\"\"\n \n- return {\"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None)}\n+ return {\n+ \"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None),\n+ \"CONCORDIA_ENVIRONMENT\": settings.CONCORDIA_ENVIRONMENT,\n+ }\n \n \n def site_navigation(request):\n", "issue": "Enable Adobe DTM for Analytics (closes #160)\nThis embeds the code but Adobe\u2019s instructions violate web\r\nperformance guidelines and we should review this carefully\r\nto see how much Adobe is affecting site performance.\n", "before_files": [{"content": "from django.conf import settings\n\n\ndef system_configuration(request):\n \"\"\"\n Expose some system configuration to the default template context\n \"\"\"\n\n return {\"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None)}\n\n\ndef site_navigation(request):\n data = {\"VIEW_NAME\": request.resolver_match.view_name}\n\n data[\"VIEW_NAME_FOR_CSS\"] = data[\"VIEW_NAME\"].replace(\":\", \"--\")\n\n path_components = request.path.strip(\"/\").split(\"/\")\n for i, component in enumerate(path_components, start=1):\n data[\"PATH_LEVEL_%d\" % i] = component\n\n return data\n", "path": "concordia/context_processors.py"}], "after_files": [{"content": "from django.conf import settings\n\n\ndef system_configuration(request):\n \"\"\"\n Expose some system configuration to the default template context\n \"\"\"\n\n return {\n \"SENTRY_PUBLIC_DSN\": getattr(settings, \"SENTRY_PUBLIC_DSN\", None),\n \"CONCORDIA_ENVIRONMENT\": settings.CONCORDIA_ENVIRONMENT,\n }\n\n\ndef site_navigation(request):\n data = {\"VIEW_NAME\": request.resolver_match.view_name}\n\n data[\"VIEW_NAME_FOR_CSS\"] = data[\"VIEW_NAME\"].replace(\":\", \"--\")\n\n path_components = request.path.strip(\"/\").split(\"/\")\n for i, component in enumerate(path_components, start=1):\n data[\"PATH_LEVEL_%d\" % i] = component\n\n return data\n", "path": "concordia/context_processors.py"}]} | 477 | 134 |
gh_patches_debug_26332 | rasdani/github-patches | git_diff | jupyter__docker-stacks-388 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consider not writing in user home
All the images (starting from base notebook) write part of the configuration inside the user home folder, and assume that `/home/$NB_USER/work` will be mounted. This has a practical limitation that it is not easy to setup a hub such that useful folders like `.ssh`, or e.g. `.bash_history` persist across container restarts.
I might be missing underlying assumptions and use cases, but I suggest to assume that home itself would be mounted, and instead modify the global configuration options.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scipy-notebook/mplimporthook.py`
Content:
```
1 """Startup script for IPython kernel.
2
3 Installs an import hook to configure the matplotlib backend on the fly.
4
5 Originally from @minrk at
6 https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py
7 Repurposed for docker-stacks to address repeat bugs like
8 https://github.com/jupyter/docker-stacks/issues/235.
9 """
10 import sys
11 from IPython import get_ipython
12
13 class MatplotlibFinder(object):
14 """Import hook that notices when matplotlib.pyplot or pylab is imported
15 and tries to configure the matplotlib backend appropriately for the
16 environment.
17 """
18 _called = False
19
20 def find_module(self, fullname, path=None):
21 if self._called:
22 # already handled
23 return
24
25 if fullname not in ('pylab', 'matplotlib.pyplot'):
26 # not matplotlib
27 return
28
29 # don't call me again
30 self._called = True
31
32 try:
33 # remove myself from the import hooks
34 sys.meta_path = [loader for loader in sys.meta_path if loader is not self]
35 except ValueError:
36 pass
37
38 ip = get_ipython()
39 if ip is None:
40 # not in an interactive environment
41 return
42
43 if ip.pylab_gui_select:
44 # backend already selected
45 return
46
47 if hasattr(ip, 'kernel'):
48 # default to inline in kernel environments
49 ip.enable_matplotlib('inline')
50 else:
51 print('enabling matplotlib')
52 ip.enable_matplotlib()
53
54 # install the finder immediately
55 sys.meta_path.insert(0, MatplotlibFinder())
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scipy-notebook/mplimporthook.py b/scipy-notebook/mplimporthook.py
deleted file mode 100644
--- a/scipy-notebook/mplimporthook.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""Startup script for IPython kernel.
-
-Installs an import hook to configure the matplotlib backend on the fly.
-
-Originally from @minrk at
-https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py
-Repurposed for docker-stacks to address repeat bugs like
-https://github.com/jupyter/docker-stacks/issues/235.
-"""
-import sys
-from IPython import get_ipython
-
-class MatplotlibFinder(object):
- """Import hook that notices when matplotlib.pyplot or pylab is imported
- and tries to configure the matplotlib backend appropriately for the
- environment.
- """
- _called = False
-
- def find_module(self, fullname, path=None):
- if self._called:
- # already handled
- return
-
- if fullname not in ('pylab', 'matplotlib.pyplot'):
- # not matplotlib
- return
-
- # don't call me again
- self._called = True
-
- try:
- # remove myself from the import hooks
- sys.meta_path = [loader for loader in sys.meta_path if loader is not self]
- except ValueError:
- pass
-
- ip = get_ipython()
- if ip is None:
- # not in an interactive environment
- return
-
- if ip.pylab_gui_select:
- # backend already selected
- return
-
- if hasattr(ip, 'kernel'):
- # default to inline in kernel environments
- ip.enable_matplotlib('inline')
- else:
- print('enabling matplotlib')
- ip.enable_matplotlib()
-
-# install the finder immediately
-sys.meta_path.insert(0, MatplotlibFinder())
\ No newline at end of file
| {"golden_diff": "diff --git a/scipy-notebook/mplimporthook.py b/scipy-notebook/mplimporthook.py\ndeleted file mode 100644\n--- a/scipy-notebook/mplimporthook.py\n+++ /dev/null\n@@ -1,55 +0,0 @@\n-\"\"\"Startup script for IPython kernel.\n-\n-Installs an import hook to configure the matplotlib backend on the fly.\n-\n-Originally from @minrk at \n-https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py\n-Repurposed for docker-stacks to address repeat bugs like\n-https://github.com/jupyter/docker-stacks/issues/235.\n-\"\"\"\n-import sys\n-from IPython import get_ipython\n-\n-class MatplotlibFinder(object):\n- \"\"\"Import hook that notices when matplotlib.pyplot or pylab is imported\n- and tries to configure the matplotlib backend appropriately for the\n- environment.\n- \"\"\"\n- _called = False\n- \n- def find_module(self, fullname, path=None):\n- if self._called:\n- # already handled\n- return\n- \n- if fullname not in ('pylab', 'matplotlib.pyplot'):\n- # not matplotlib\n- return\n- \n- # don't call me again\n- self._called = True\n- \n- try:\n- # remove myself from the import hooks\n- sys.meta_path = [loader for loader in sys.meta_path if loader is not self]\n- except ValueError:\n- pass\n- \n- ip = get_ipython()\n- if ip is None:\n- # not in an interactive environment\n- return\n- \n- if ip.pylab_gui_select:\n- # backend already selected\n- return\n- \n- if hasattr(ip, 'kernel'):\n- # default to inline in kernel environments\n- ip.enable_matplotlib('inline')\n- else:\n- print('enabling matplotlib')\n- ip.enable_matplotlib()\n-\n-# install the finder immediately\n-sys.meta_path.insert(0, MatplotlibFinder())\n\\ No newline at end of file\n", "issue": "Consider not writing in user home\nAll the images (starting from base notebook) write part of the configuration inside the user home folder, and assume that `/home/$NB_USER/work` will be mounted. This has a practical limitation that it is not easy to setup a hub such that useful folders like `.ssh`, or e.g. `.bash_history` persist across container restarts.\r\n\r\nI might be missing underlying assumptions and use cases, but I suggest to assume that home itself would be mounted, and instead modify the global configuration options.\n", "before_files": [{"content": "\"\"\"Startup script for IPython kernel.\n\nInstalls an import hook to configure the matplotlib backend on the fly.\n\nOriginally from @minrk at \nhttps://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py\nRepurposed for docker-stacks to address repeat bugs like\nhttps://github.com/jupyter/docker-stacks/issues/235.\n\"\"\"\nimport sys\nfrom IPython import get_ipython\n\nclass MatplotlibFinder(object):\n \"\"\"Import hook that notices when matplotlib.pyplot or pylab is imported\n and tries to configure the matplotlib backend appropriately for the\n environment.\n \"\"\"\n _called = False\n \n def find_module(self, fullname, path=None):\n if self._called:\n # already handled\n return\n \n if fullname not in ('pylab', 'matplotlib.pyplot'):\n # not matplotlib\n return\n \n # don't call me again\n self._called = True\n \n try:\n # remove myself from the import hooks\n sys.meta_path = [loader for loader in sys.meta_path if loader is not self]\n except ValueError:\n pass\n \n ip = get_ipython()\n if ip is None:\n # not in an interactive environment\n return\n \n if ip.pylab_gui_select:\n # backend already selected\n return\n \n if hasattr(ip, 'kernel'):\n # default to inline in kernel environments\n ip.enable_matplotlib('inline')\n else:\n print('enabling matplotlib')\n ip.enable_matplotlib()\n\n# install the finder immediately\nsys.meta_path.insert(0, MatplotlibFinder())", "path": "scipy-notebook/mplimporthook.py"}], "after_files": [{"content": null, "path": "scipy-notebook/mplimporthook.py"}]} | 812 | 463 |
gh_patches_debug_14024 | rasdani/github-patches | git_diff | ivy-llc__ivy-16042 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cosine_similarity
#15051
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/nn/functional/common.py`
Content:
```
1 # local
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py
--- a/ivy/functional/frontends/paddle/nn/functional/common.py
+++ b/ivy/functional/frontends/paddle/nn/functional/common.py
@@ -1 +1,25 @@
# local
+import ivy
+from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
+from ivy.func_wrapper import with_unsupported_dtypes
+
+
+@with_unsupported_dtypes({"2.0.1 and below": ("float16", "bfloat16")}, "torch")
+@to_ivy_arrays_and_back
+def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):
+ if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:
+ numerator = ivy.sum(x1 * x2, axis=axis)
+ x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)
+ x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)
+ else:
+ numerator = ivy.sum(x1 * x2)
+ x1_squared_norm = ivy.sum(ivy.square(x1))
+ x2_squared_norm = ivy.sum(ivy.square(x2))
+
+ x1_norm = ivy.sqrt(x1_squared_norm)
+ x2_norm = ivy.sqrt(x2_squared_norm)
+ norm_mm = x1_norm * x2_norm
+ denominator = ivy.maximum(norm_mm, eps)
+
+ cosine = numerator / denominator
+ return cosine
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py\n--- a/ivy/functional/frontends/paddle/nn/functional/common.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/common.py\n@@ -1 +1,25 @@\n # local\n+import ivy\n+from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n+from ivy.func_wrapper import with_unsupported_dtypes\n+\n+\n+@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n+@to_ivy_arrays_and_back\n+def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n+ if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n+ numerator = ivy.sum(x1 * x2, axis=axis)\n+ x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n+ x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n+ else:\n+ numerator = ivy.sum(x1 * x2)\n+ x1_squared_norm = ivy.sum(ivy.square(x1))\n+ x2_squared_norm = ivy.sum(ivy.square(x2))\n+\n+ x1_norm = ivy.sqrt(x1_squared_norm)\n+ x2_norm = ivy.sqrt(x2_squared_norm)\n+ norm_mm = x1_norm * x2_norm\n+ denominator = ivy.maximum(norm_mm, eps)\n+\n+ cosine = numerator / denominator\n+ return cosine\n", "issue": "cosine_similarity\n#15051 \n", "before_files": [{"content": "# local\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\", \"bfloat16\")}, \"torch\")\n@to_ivy_arrays_and_back\ndef cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n numerator = ivy.sum(x1 * x2, axis=axis)\n x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n else:\n numerator = ivy.sum(x1 * x2)\n x1_squared_norm = ivy.sum(ivy.square(x1))\n x2_squared_norm = ivy.sum(ivy.square(x2))\n\n x1_norm = ivy.sqrt(x1_squared_norm)\n x2_norm = ivy.sqrt(x2_squared_norm)\n norm_mm = x1_norm * x2_norm\n denominator = ivy.maximum(norm_mm, eps)\n\n cosine = numerator / denominator\n return cosine\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}]} | 283 | 380 |
gh_patches_debug_31672 | rasdani/github-patches | git_diff | python-discord__bot-1088 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Catch error when source command cannot retrieve lines or file
Sentry Issue: [BOT-7K](https://sentry.io/organizations/python-discord/issues/1816540252/?referrer=github_integration)
This happens if the target command is one which has been added via internal eval or some other dynamic method, so it's _really_ low priority.
I think it'd be most convenient to let the error propagate a bit and handle it here https://github.com/python-discord/bot/blob/3649c2a03dc158a25f2c3be98db8691f903a1953/bot/cogs/source.py#L59
```
OSError: could not get source code
(1 additional frame(s) were not displayed)
...
File "bot/cogs/source.py", line 59, in source_command
embed = await self.build_embed(source_item)
File "bot/cogs/source.py", line 102, in build_embed
url, location, first_line = self.get_source_link(source_object)
File "bot/cogs/source.py", line 84, in get_source_link
lines, first_line_no = inspect.getsourcelines(src)
File "inspect.py", line 967, in getsourcelines
lines, lnum = findsource(object)
File "inspect.py", line 798, in findsource
raise OSError('could not get source code')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/source.py`
Content:
```
1 import inspect
2 from pathlib import Path
3 from typing import Optional, Tuple, Union
4
5 from discord import Embed
6 from discord.ext import commands
7
8 from bot.bot import Bot
9 from bot.constants import URLs
10
11 SourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]
12
13
14 class SourceConverter(commands.Converter):
15 """Convert an argument into a help command, tag, command, or cog."""
16
17 async def convert(self, ctx: commands.Context, argument: str) -> SourceType:
18 """Convert argument into source object."""
19 if argument.lower().startswith("help"):
20 return ctx.bot.help_command
21
22 cog = ctx.bot.get_cog(argument)
23 if cog:
24 return cog
25
26 cmd = ctx.bot.get_command(argument)
27 if cmd:
28 return cmd
29
30 tags_cog = ctx.bot.get_cog("Tags")
31 show_tag = True
32
33 if not tags_cog:
34 show_tag = False
35 elif argument.lower() in tags_cog._cache:
36 return argument.lower()
37
38 raise commands.BadArgument(
39 f"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog."
40 )
41
42
43 class BotSource(commands.Cog):
44 """Displays information about the bot's source code."""
45
46 def __init__(self, bot: Bot):
47 self.bot = bot
48
49 @commands.command(name="source", aliases=("src",))
50 async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:
51 """Display information and a GitHub link to the source code of a command, tag, or cog."""
52 if not source_item:
53 embed = Embed(title="Bot's GitHub Repository")
54 embed.add_field(name="Repository", value=f"[Go to GitHub]({URLs.github_bot_repo})")
55 embed.set_thumbnail(url="https://avatars1.githubusercontent.com/u/9919")
56 await ctx.send(embed=embed)
57 return
58
59 embed = await self.build_embed(source_item)
60 await ctx.send(embed=embed)
61
62 def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
63 """Build GitHub link of source item, return this link, file location and first line number."""
64 if isinstance(source_item, commands.HelpCommand):
65 src = type(source_item)
66 filename = inspect.getsourcefile(src)
67 elif isinstance(source_item, commands.Command):
68 if source_item.cog_name == "Alias":
69 cmd_name = source_item.callback.__name__.replace("_alias", "")
70 cmd = self.bot.get_command(cmd_name.replace("_", " "))
71 src = cmd.callback.__code__
72 filename = src.co_filename
73 else:
74 src = source_item.callback.__code__
75 filename = src.co_filename
76 elif isinstance(source_item, str):
77 tags_cog = self.bot.get_cog("Tags")
78 filename = tags_cog._cache[source_item]["location"]
79 else:
80 src = type(source_item)
81 filename = inspect.getsourcefile(src)
82
83 if not isinstance(source_item, str):
84 lines, first_line_no = inspect.getsourcelines(src)
85 lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
86 else:
87 first_line_no = None
88 lines_extension = ""
89
90 # Handle tag file location differently than others to avoid errors in some cases
91 if not first_line_no:
92 file_location = Path(filename).relative_to("/bot/")
93 else:
94 file_location = Path(filename).relative_to(Path.cwd()).as_posix()
95
96 url = f"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}"
97
98 return url, file_location, first_line_no or None
99
100 async def build_embed(self, source_object: SourceType) -> Optional[Embed]:
101 """Build embed based on source object."""
102 url, location, first_line = self.get_source_link(source_object)
103
104 if isinstance(source_object, commands.HelpCommand):
105 title = "Help Command"
106 description = source_object.__doc__.splitlines()[1]
107 elif isinstance(source_object, commands.Command):
108 if source_object.cog_name == "Alias":
109 cmd_name = source_object.callback.__name__.replace("_alias", "")
110 cmd = self.bot.get_command(cmd_name.replace("_", " "))
111 description = cmd.short_doc
112 else:
113 description = source_object.short_doc
114
115 title = f"Command: {source_object.qualified_name}"
116 elif isinstance(source_object, str):
117 title = f"Tag: {source_object}"
118 description = ""
119 else:
120 title = f"Cog: {source_object.qualified_name}"
121 description = source_object.description.splitlines()[0]
122
123 embed = Embed(title=title, description=description)
124 embed.add_field(name="Source Code", value=f"[Go to GitHub]({url})")
125 line_text = f":{first_line}" if first_line else ""
126 embed.set_footer(text=f"{location}{line_text}")
127
128 return embed
129
130
131 def setup(bot: Bot) -> None:
132 """Load the BotSource cog."""
133 bot.add_cog(BotSource(bot))
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/source.py b/bot/cogs/source.py
--- a/bot/cogs/source.py
+++ b/bot/cogs/source.py
@@ -60,11 +60,12 @@
await ctx.send(embed=embed)
def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
- """Build GitHub link of source item, return this link, file location and first line number."""
- if isinstance(source_item, commands.HelpCommand):
- src = type(source_item)
- filename = inspect.getsourcefile(src)
- elif isinstance(source_item, commands.Command):
+ """
+ Build GitHub link of source item, return this link, file location and first line number.
+
+ Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).
+ """
+ if isinstance(source_item, commands.Command):
if source_item.cog_name == "Alias":
cmd_name = source_item.callback.__name__.replace("_alias", "")
cmd = self.bot.get_command(cmd_name.replace("_", " "))
@@ -78,10 +79,17 @@
filename = tags_cog._cache[source_item]["location"]
else:
src = type(source_item)
- filename = inspect.getsourcefile(src)
+ try:
+ filename = inspect.getsourcefile(src)
+ except TypeError:
+ raise commands.BadArgument("Cannot get source for a dynamically-created object.")
if not isinstance(source_item, str):
- lines, first_line_no = inspect.getsourcelines(src)
+ try:
+ lines, first_line_no = inspect.getsourcelines(src)
+ except OSError:
+ raise commands.BadArgument("Cannot get source for a dynamically-created object.")
+
lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
else:
first_line_no = None
| {"golden_diff": "diff --git a/bot/cogs/source.py b/bot/cogs/source.py\n--- a/bot/cogs/source.py\n+++ b/bot/cogs/source.py\n@@ -60,11 +60,12 @@\n await ctx.send(embed=embed)\n \n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n- \"\"\"Build GitHub link of source item, return this link, file location and first line number.\"\"\"\n- if isinstance(source_item, commands.HelpCommand):\n- src = type(source_item)\n- filename = inspect.getsourcefile(src)\n- elif isinstance(source_item, commands.Command):\n+ \"\"\"\n+ Build GitHub link of source item, return this link, file location and first line number.\n+\n+ Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n+ \"\"\"\n+ if isinstance(source_item, commands.Command):\n if source_item.cog_name == \"Alias\":\n cmd_name = source_item.callback.__name__.replace(\"_alias\", \"\")\n cmd = self.bot.get_command(cmd_name.replace(\"_\", \" \"))\n@@ -78,10 +79,17 @@\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n- filename = inspect.getsourcefile(src)\n+ try:\n+ filename = inspect.getsourcefile(src)\n+ except TypeError:\n+ raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n \n if not isinstance(source_item, str):\n- lines, first_line_no = inspect.getsourcelines(src)\n+ try:\n+ lines, first_line_no = inspect.getsourcelines(src)\n+ except OSError:\n+ raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n+\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n", "issue": "Catch error when source command cannot retrieve lines or file\nSentry Issue: [BOT-7K](https://sentry.io/organizations/python-discord/issues/1816540252/?referrer=github_integration)\r\n\r\nThis happens if the target command is one which has been added via internal eval or some other dynamic method, so it's _really_ low priority.\r\n\r\nI think it'd be most convenient to let the error propagate a bit and handle it here https://github.com/python-discord/bot/blob/3649c2a03dc158a25f2c3be98db8691f903a1953/bot/cogs/source.py#L59\r\n\r\n```\r\nOSError: could not get source code\r\n(1 additional frame(s) were not displayed)\r\n...\r\n File \"bot/cogs/source.py\", line 59, in source_command\r\n embed = await self.build_embed(source_item)\r\n File \"bot/cogs/source.py\", line 102, in build_embed\r\n url, location, first_line = self.get_source_link(source_object)\r\n File \"bot/cogs/source.py\", line 84, in get_source_link\r\n lines, first_line_no = inspect.getsourcelines(src)\r\n File \"inspect.py\", line 967, in getsourcelines\r\n lines, lnum = findsource(object)\r\n File \"inspect.py\", line 798, in findsource\r\n raise OSError('could not get source code')\r\n```\n", "before_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower().startswith(\"help\"):\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n raise commands.BadArgument(\n f\"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"Build GitHub link of source item, return this link, file location and first line number.\"\"\"\n if isinstance(source_item, commands.HelpCommand):\n src = type(source_item)\n filename = inspect.getsourcefile(src)\n elif isinstance(source_item, commands.Command):\n if source_item.cog_name == \"Alias\":\n cmd_name = source_item.callback.__name__.replace(\"_alias\", \"\")\n cmd = self.bot.get_command(cmd_name.replace(\"_\", \" \"))\n src = cmd.callback.__code__\n filename = src.co_filename\n else:\n src = source_item.callback.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n filename = inspect.getsourcefile(src)\n\n if not isinstance(source_item, str):\n lines, first_line_no = inspect.getsourcelines(src)\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n if source_object.cog_name == \"Alias\":\n cmd_name = source_object.callback.__name__.replace(\"_alias\", \"\")\n cmd = self.bot.get_command(cmd_name.replace(\"_\", \" \"))\n description = cmd.short_doc\n else:\n description = source_object.short_doc\n\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/cogs/source.py"}], "after_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower().startswith(\"help\"):\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n raise commands.BadArgument(\n f\"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"\n Build GitHub link of source item, return this link, file location and first line number.\n\n Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n \"\"\"\n if isinstance(source_item, commands.Command):\n if source_item.cog_name == \"Alias\":\n cmd_name = source_item.callback.__name__.replace(\"_alias\", \"\")\n cmd = self.bot.get_command(cmd_name.replace(\"_\", \" \"))\n src = cmd.callback.__code__\n filename = src.co_filename\n else:\n src = source_item.callback.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n try:\n filename = inspect.getsourcefile(src)\n except TypeError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n if not isinstance(source_item, str):\n try:\n lines, first_line_no = inspect.getsourcelines(src)\n except OSError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n if source_object.cog_name == \"Alias\":\n cmd_name = source_object.callback.__name__.replace(\"_alias\", \"\")\n cmd = self.bot.get_command(cmd_name.replace(\"_\", \" \"))\n description = cmd.short_doc\n else:\n description = source_object.short_doc\n\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/cogs/source.py"}]} | 1,985 | 423 |
gh_patches_debug_30203 | rasdani/github-patches | git_diff | Mailu__Mailu-2709 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PROXY_AUTH_WHITELIST is validating address in REAL_IP_HEADER instead of proxy IP address
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** — be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [x] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `master`
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoi…" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
When using a proxy for authentication, we have the option to set proxy IPs/CIDRs to trust for authentication with `PROXY_AUTH_WHITELIST` setting. Whenever we are using a proxy for authentication we are also supposed to set `REAL_IP_HEADER` and `REAL_IP_FROM` so mailu can determine where the request came from when the proxy intercepted it. However, when `REAL_IP_HEADER` and `REAL_IP_FROM` are set, the mailu frontend is validating the IP address in the header from the proxy, rather than the proxy IP itself, for `PROXY_AUTH_WHITELIST`.
I would guess that this has something to do with how nginx is configured for the frontend, as this only happens when BOTH `REAL_IP_FROM` and `PROXY_AUTH_WHITELIST` are configured. If only `PROXY_AUTH_WHITELIST` is set, then the user is permitted to login and access webmail (at increased security risk of IP spoofing).
## Replication Steps
<!--
Steps for replicating your issue
-->
1. Deploy mailu with a reverse proxy to perform authentication. I am using traefik2 for proxy and authentik for IdP.
2. Configure a user in IdP that will authenticate to mailu. Optionally enable PROXY_AUTH_CREATE in mailu config.
3. Set mailu configuration `PROXY_AUTH_WHITELIST` to use the proxy's IP address. Leave `REAL_IP_HEADER` and `REAL_IP_FROM` unset. Restart/redeploy mailu to pick up the new configuration.
4. Confirm authentication through proxy works by navigating to mailu through the proxy, using a different host / source IP address than the one used by the proxy. A successful login will load Webmail.
5. Set mailu configuration `REAL_IP_FROM` to use the proxy's IP address. Set `REAL_IP_HEADER` to a header that the proxy will set, such as `X-Real-IP`. Restart/redeploy mailu to pick up the new configuration.
6. Navigate to mailu through the proxy, using a different host / source IP address than the one used by the proxy. Complete the login if prompted.
## Observed behaviour
<!--
Explain or paste the result you received.
-->
Mailu returns with HTTP 500 error, where `X.X.X.X` is the IP address of the host running the browser, found from the header:
```
Internal Server Error
X.X.X.X is not on PROXY_AUTH_WHITELIST
```
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesn’t work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
User should be permitted to login without encountering this error.
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
Logs from the frontend:
```
<host IP> - - [17/Mar/2023:19:50:25 -0500] "GET /webmail HTTP/1.1" 301 162 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
<host IP> - - [17/Mar/2023:19:50:25 -0500] "GET /webmail/ HTTP/1.1" 302 138 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
<host IP> - - [17/Mar/2023:19:50:25 -0500] "GET /sso/login?url=/webmail/ HTTP/1.1" 500 155 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/sso/views/base.py`
Content:
```
1 from werkzeug.utils import redirect
2 from mailu import models, utils
3 from mailu.sso import sso, forms
4 from mailu.ui import access
5
6 from flask import current_app as app
7 import flask
8 import flask_login
9 import secrets
10 import ipaddress
11 from urllib.parse import urlparse, urljoin
12 from werkzeug.urls import url_unquote
13
14 @sso.route('/login', methods=['GET', 'POST'])
15 def login():
16 if flask.request.headers.get(app.config['PROXY_AUTH_HEADER']) and not 'noproxyauth' in flask.request.url:
17 return _proxy()
18
19 client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)
20 form = forms.LoginForm()
21
22 fields = []
23
24 if 'url' in flask.request.args and not 'homepage' in flask.request.url:
25 fields.append(form.submitAdmin)
26 else:
27 form.submitAdmin.label.text = form.submitAdmin.label.text + ' Admin'
28 form.submitWebmail.label.text = form.submitWebmail.label.text + ' Webmail'
29 if str(app.config["WEBMAIL"]).upper() != "NONE":
30 fields.append(form.submitWebmail)
31 if str(app.config["ADMIN"]).upper() != "FALSE":
32 fields.append(form.submitAdmin)
33 fields = [fields]
34
35 if form.validate_on_submit():
36 if destination := _has_usable_redirect():
37 pass
38 else:
39 if form.submitAdmin.data:
40 destination = app.config['WEB_ADMIN']
41 elif form.submitWebmail.data:
42 destination = app.config['WEB_WEBMAIL']
43 device_cookie, device_cookie_username = utils.limiter.parse_device_cookie(flask.request.cookies.get('rate_limit'))
44 username = form.email.data
45 if username != device_cookie_username and utils.limiter.should_rate_limit_ip(client_ip):
46 flask.flash('Too many attempts from your IP (rate-limit)', 'error')
47 return flask.render_template('login.html', form=form, fields=fields)
48 if utils.limiter.should_rate_limit_user(username, client_ip, device_cookie, device_cookie_username):
49 flask.flash('Too many attempts for this user (rate-limit)', 'error')
50 return flask.render_template('login.html', form=form, fields=fields)
51 user = models.User.login(username, form.pw.data)
52 if user:
53 flask.session.regenerate()
54 flask_login.login_user(user)
55 response = flask.redirect(destination)
56 response.set_cookie('rate_limit', utils.limiter.device_cookie(username), max_age=31536000, path=flask.url_for('sso.login'), secure=app.config['SESSION_COOKIE_SECURE'], httponly=True)
57 flask.current_app.logger.info(f'Login succeeded for {username} from {client_ip} pwned={form.pwned.data}.')
58 if msg := utils.isBadOrPwned(form):
59 flask.flash(msg, "error")
60 return response
61 else:
62 utils.limiter.rate_limit_user(username, client_ip, device_cookie, device_cookie_username) if models.User.get(username) else utils.limiter.rate_limit_ip(client_ip, username)
63 flask.current_app.logger.warn(f'Login failed for {username} from {client_ip}.')
64 flask.flash('Wrong e-mail or password', 'error')
65 return flask.render_template('login.html', form=form, fields=fields)
66
67 @sso.route('/logout', methods=['GET'])
68 @access.authenticated
69 def logout():
70 flask_login.logout_user()
71 flask.session.destroy()
72 response = flask.redirect(app.config['PROXY_AUTH_LOGOUT_URL'] or flask.url_for('.login'))
73 for cookie in ['roundcube_sessauth', 'roundcube_sessid', 'smsession']:
74 response.set_cookie(cookie, 'empty', expires=0)
75 return response
76
77 """
78 Redirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...
79 https://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html
80 """
81 def _has_usable_redirect():
82 if 'homepage' in flask.request.url:
83 return None
84 if url := flask.request.args.get('url'):
85 url = url_unquote(url)
86 target = urlparse(urljoin(flask.request.url, url))
87 if target.netloc == urlparse(flask.request.url).netloc:
88 return target.geturl()
89 return None
90
91 """
92 https://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy
93 """
94 def _proxy():
95 ip = ipaddress.ip_address(flask.request.remote_addr)
96 if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):
97 return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)
98
99 email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])
100 if not email:
101 return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])
102
103 url = _has_usable_redirect() or app.config['WEB_ADMIN']
104
105 user = models.User.get(email)
106 if user:
107 flask.session.regenerate()
108 flask_login.login_user(user)
109 return flask.redirect(url)
110
111 if not app.config['PROXY_AUTH_CREATE']:
112 return flask.abort(500, 'You don\'t exist. Go away! (%s)' % email)
113
114 client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)
115 try:
116 localpart, desireddomain = email.rsplit('@')
117 except Exception as e:
118 flask.current_app.logger.error('Error creating a new user via proxy for %s from %s: %s' % (email, client_ip, str(e)), e)
119 return flask.abort(500, 'You don\'t exist. Go away! (%s)' % email)
120 domain = models.Domain.query.get(desireddomain) or flask.abort(500, 'You don\'t exist. Go away! (domain=%s)' % desireddomain)
121 if not domain.max_users == -1 and len(domain.users) >= domain.max_users:
122 flask.current_app.logger.warning('Too many users for domain %s' % domain)
123 return flask.abort(500, 'Too many users in (domain=%s)' % domain)
124 user = models.User(localpart=localpart, domain=domain)
125 user.set_password(secrets.token_urlsafe())
126 models.db.session.add(user)
127 models.db.session.commit()
128 flask.session.regenerate()
129 flask_login.login_user(user)
130 user.send_welcome()
131 flask.current_app.logger.info(f'Login succeeded by proxy created user: {user} from {client_ip} through {flask.request.remote_addr}.')
132 return flask.redirect(url)
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/admin/mailu/sso/views/base.py b/core/admin/mailu/sso/views/base.py
--- a/core/admin/mailu/sso/views/base.py
+++ b/core/admin/mailu/sso/views/base.py
@@ -78,8 +78,8 @@
Redirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...
https://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html
"""
-def _has_usable_redirect():
- if 'homepage' in flask.request.url:
+def _has_usable_redirect(is_proxied=False):
+ if 'homepage' in flask.request.url and not is_proxied:
return None
if url := flask.request.args.get('url'):
url = url_unquote(url)
@@ -92,15 +92,16 @@
https://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy
"""
def _proxy():
- ip = ipaddress.ip_address(flask.request.remote_addr)
+ proxy_ip = flask.request.headers.get('X-Forwarded-By', flask.request.remote_addr)
+ ip = ipaddress.ip_address(proxy_ip)
if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):
- return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)
+ return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % proxy_ip)
email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])
if not email:
return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])
- url = _has_usable_redirect() or app.config['WEB_ADMIN']
+ url = _has_usable_redirect(True) or app.config['WEB_ADMIN']
user = models.User.get(email)
if user:
| {"golden_diff": "diff --git a/core/admin/mailu/sso/views/base.py b/core/admin/mailu/sso/views/base.py\n--- a/core/admin/mailu/sso/views/base.py\n+++ b/core/admin/mailu/sso/views/base.py\n@@ -78,8 +78,8 @@\n Redirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...\n https://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html\n \"\"\"\n-def _has_usable_redirect():\n- if 'homepage' in flask.request.url:\n+def _has_usable_redirect(is_proxied=False):\n+ if 'homepage' in flask.request.url and not is_proxied:\n return None\n if url := flask.request.args.get('url'):\n url = url_unquote(url)\n@@ -92,15 +92,16 @@\n https://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy\n \"\"\"\n def _proxy():\n- ip = ipaddress.ip_address(flask.request.remote_addr)\n+ proxy_ip = flask.request.headers.get('X-Forwarded-By', flask.request.remote_addr)\n+ ip = ipaddress.ip_address(proxy_ip)\n if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):\n- return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)\n+ return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % proxy_ip)\n \n email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])\n if not email:\n return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])\n \n- url = _has_usable_redirect() or app.config['WEB_ADMIN']\n+ url = _has_usable_redirect(True) or app.config['WEB_ADMIN']\n \n user = models.User.get(email)\n if user:\n", "issue": "PROXY_AUTH_WHITELIST is validating address in REAL_IP_HEADER instead of proxy IP address\n<!--\r\n\r\nThank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.\r\nFor **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).\r\n\r\nTo be able to help you best, we need some more information.\r\n\r\nBefore you open your issue\r\n- Check if no issue or pull-request for this already exists.\r\n- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- You understand `Mailu` is made by volunteers in their **free time** \u2014 be concise, civil and accept that delays can occur.\r\n- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\nPlease put your text outside of the comment blocks to be visible. You can use the button \"Preview\" above to check.\r\n\r\n-->\r\n\r\n## Environment & Version\r\n\r\n### Environment\r\n\r\n- [x] docker compose\r\n- [ ] kubernetes\r\n- [ ] docker swarm\r\n\r\n### Version\r\n\r\n- Version: `master`\r\n\r\n<!--\r\nTo find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).\r\n\r\n$> docker ps -a | grep mailu\r\n140b09d4b09c mailu/roundcube:1.7 \"docker-php-entrypoi\u2026\" 2 weeks ago Up 2 days (healthy) 80/tcp\r\n$> grep MAILU_VERSION docker-compose.yml mailu.env\r\n-->\r\n\r\n## Description\r\n<!--\r\nFurther explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.\r\n-->\r\nWhen using a proxy for authentication, we have the option to set proxy IPs/CIDRs to trust for authentication with `PROXY_AUTH_WHITELIST` setting. Whenever we are using a proxy for authentication we are also supposed to set `REAL_IP_HEADER` and `REAL_IP_FROM` so mailu can determine where the request came from when the proxy intercepted it. However, when `REAL_IP_HEADER` and `REAL_IP_FROM` are set, the mailu frontend is validating the IP address in the header from the proxy, rather than the proxy IP itself, for `PROXY_AUTH_WHITELIST`.\r\n\r\nI would guess that this has something to do with how nginx is configured for the frontend, as this only happens when BOTH `REAL_IP_FROM` and `PROXY_AUTH_WHITELIST` are configured. If only `PROXY_AUTH_WHITELIST` is set, then the user is permitted to login and access webmail (at increased security risk of IP spoofing).\r\n\r\n## Replication Steps\r\n<!--\r\nSteps for replicating your issue\r\n-->\r\n1. Deploy mailu with a reverse proxy to perform authentication. I am using traefik2 for proxy and authentik for IdP.\r\n2. Configure a user in IdP that will authenticate to mailu. Optionally enable PROXY_AUTH_CREATE in mailu config.\r\n3. Set mailu configuration `PROXY_AUTH_WHITELIST` to use the proxy's IP address. Leave `REAL_IP_HEADER` and `REAL_IP_FROM` unset. Restart/redeploy mailu to pick up the new configuration.\r\n4. Confirm authentication through proxy works by navigating to mailu through the proxy, using a different host / source IP address than the one used by the proxy. A successful login will load Webmail.\r\n5. Set mailu configuration `REAL_IP_FROM` to use the proxy's IP address. Set `REAL_IP_HEADER` to a header that the proxy will set, such as `X-Real-IP`. Restart/redeploy mailu to pick up the new configuration.\r\n6. Navigate to mailu through the proxy, using a different host / source IP address than the one used by the proxy. Complete the login if prompted.\r\n\r\n## Observed behaviour\r\n<!--\r\nExplain or paste the result you received.\r\n-->\r\nMailu returns with HTTP 500 error, where `X.X.X.X` is the IP address of the host running the browser, found from the header:\r\n```\r\nInternal Server Error\r\nX.X.X.X is not on PROXY_AUTH_WHITELIST\r\n```\r\n\r\n## Expected behaviour\r\n<!--\r\nExplain what results you expected - be as specific as possible.\r\nJust saying \"it doesn\u2019t work as expected\" is not useful. It's also helpful to describe what you actually experienced.\r\n-->\r\nUser should be permitted to login without encountering this error.\r\n\r\n## Logs\r\n<!--\r\nOften it is very useful to include log fragments of the involved component.\r\nYou can get the logs via `docker logs <container name> --tail 1000`.\r\nFor example for the admin container: `docker logs mailu_admin_1 --tail 1000`\r\nor using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`\r\n\r\nIf you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:\r\n\r\n```\r\nYour logs here!\r\n```\r\n-->\r\nLogs from the frontend:\r\n```\r\n<host IP> - - [17/Mar/2023:19:50:25 -0500] \"GET /webmail HTTP/1.1\" 301 162 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36\"\r\n<host IP> - - [17/Mar/2023:19:50:25 -0500] \"GET /webmail/ HTTP/1.1\" 302 138 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36\"\r\n<host IP> - - [17/Mar/2023:19:50:25 -0500] \"GET /sso/login?url=/webmail/ HTTP/1.1\" 500 155 \"-\" \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36\"\r\n```\n", "before_files": [{"content": "from werkzeug.utils import redirect\nfrom mailu import models, utils\nfrom mailu.sso import sso, forms\nfrom mailu.ui import access\n\nfrom flask import current_app as app\nimport flask\nimport flask_login\nimport secrets\nimport ipaddress\nfrom urllib.parse import urlparse, urljoin\nfrom werkzeug.urls import url_unquote\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n if flask.request.headers.get(app.config['PROXY_AUTH_HEADER']) and not 'noproxyauth' in flask.request.url:\n return _proxy()\n\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n form = forms.LoginForm()\n\n fields = []\n\n if 'url' in flask.request.args and not 'homepage' in flask.request.url:\n fields.append(form.submitAdmin)\n else:\n form.submitAdmin.label.text = form.submitAdmin.label.text + ' Admin'\n form.submitWebmail.label.text = form.submitWebmail.label.text + ' Webmail'\n if str(app.config[\"WEBMAIL\"]).upper() != \"NONE\":\n fields.append(form.submitWebmail)\n if str(app.config[\"ADMIN\"]).upper() != \"FALSE\":\n fields.append(form.submitAdmin)\n fields = [fields]\n\n if form.validate_on_submit():\n if destination := _has_usable_redirect():\n pass\n else:\n if form.submitAdmin.data:\n destination = app.config['WEB_ADMIN']\n elif form.submitWebmail.data:\n destination = app.config['WEB_WEBMAIL']\n device_cookie, device_cookie_username = utils.limiter.parse_device_cookie(flask.request.cookies.get('rate_limit'))\n username = form.email.data\n if username != device_cookie_username and utils.limiter.should_rate_limit_ip(client_ip):\n flask.flash('Too many attempts from your IP (rate-limit)', 'error')\n return flask.render_template('login.html', form=form, fields=fields)\n if utils.limiter.should_rate_limit_user(username, client_ip, device_cookie, device_cookie_username):\n flask.flash('Too many attempts for this user (rate-limit)', 'error')\n return flask.render_template('login.html', form=form, fields=fields)\n user = models.User.login(username, form.pw.data)\n if user:\n flask.session.regenerate()\n flask_login.login_user(user)\n response = flask.redirect(destination)\n response.set_cookie('rate_limit', utils.limiter.device_cookie(username), max_age=31536000, path=flask.url_for('sso.login'), secure=app.config['SESSION_COOKIE_SECURE'], httponly=True)\n flask.current_app.logger.info(f'Login succeeded for {username} from {client_ip} pwned={form.pwned.data}.')\n if msg := utils.isBadOrPwned(form):\n flask.flash(msg, \"error\")\n return response\n else:\n utils.limiter.rate_limit_user(username, client_ip, device_cookie, device_cookie_username) if models.User.get(username) else utils.limiter.rate_limit_ip(client_ip, username)\n flask.current_app.logger.warn(f'Login failed for {username} from {client_ip}.')\n flask.flash('Wrong e-mail or password', 'error')\n return flask.render_template('login.html', form=form, fields=fields)\n\[email protected]('/logout', methods=['GET'])\[email protected]\ndef logout():\n flask_login.logout_user()\n flask.session.destroy()\n response = flask.redirect(app.config['PROXY_AUTH_LOGOUT_URL'] or flask.url_for('.login'))\n for cookie in ['roundcube_sessauth', 'roundcube_sessid', 'smsession']:\n response.set_cookie(cookie, 'empty', expires=0)\n return response\n\n\"\"\"\nRedirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...\nhttps://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html\n\"\"\"\ndef _has_usable_redirect():\n if 'homepage' in flask.request.url:\n return None\n if url := flask.request.args.get('url'):\n url = url_unquote(url)\n target = urlparse(urljoin(flask.request.url, url))\n if target.netloc == urlparse(flask.request.url).netloc:\n return target.geturl()\n return None\n\n\"\"\"\nhttps://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy\n\"\"\"\ndef _proxy():\n ip = ipaddress.ip_address(flask.request.remote_addr)\n if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):\n return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)\n\n email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])\n if not email:\n return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])\n\n url = _has_usable_redirect() or app.config['WEB_ADMIN']\n\n user = models.User.get(email)\n if user:\n flask.session.regenerate()\n flask_login.login_user(user)\n return flask.redirect(url)\n\n if not app.config['PROXY_AUTH_CREATE']:\n return flask.abort(500, 'You don\\'t exist. Go away! (%s)' % email)\n\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n try:\n localpart, desireddomain = email.rsplit('@')\n except Exception as e:\n flask.current_app.logger.error('Error creating a new user via proxy for %s from %s: %s' % (email, client_ip, str(e)), e)\n return flask.abort(500, 'You don\\'t exist. Go away! (%s)' % email)\n domain = models.Domain.query.get(desireddomain) or flask.abort(500, 'You don\\'t exist. Go away! (domain=%s)' % desireddomain)\n if not domain.max_users == -1 and len(domain.users) >= domain.max_users:\n flask.current_app.logger.warning('Too many users for domain %s' % domain)\n return flask.abort(500, 'Too many users in (domain=%s)' % domain)\n user = models.User(localpart=localpart, domain=domain)\n user.set_password(secrets.token_urlsafe())\n models.db.session.add(user)\n models.db.session.commit()\n flask.session.regenerate()\n flask_login.login_user(user)\n user.send_welcome()\n flask.current_app.logger.info(f'Login succeeded by proxy created user: {user} from {client_ip} through {flask.request.remote_addr}.')\n return flask.redirect(url)\n", "path": "core/admin/mailu/sso/views/base.py"}], "after_files": [{"content": "from werkzeug.utils import redirect\nfrom mailu import models, utils\nfrom mailu.sso import sso, forms\nfrom mailu.ui import access\n\nfrom flask import current_app as app\nimport flask\nimport flask_login\nimport secrets\nimport ipaddress\nfrom urllib.parse import urlparse, urljoin\nfrom werkzeug.urls import url_unquote\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n if flask.request.headers.get(app.config['PROXY_AUTH_HEADER']) and not 'noproxyauth' in flask.request.url:\n return _proxy()\n\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n form = forms.LoginForm()\n\n fields = []\n\n if 'url' in flask.request.args and not 'homepage' in flask.request.url:\n fields.append(form.submitAdmin)\n else:\n form.submitAdmin.label.text = form.submitAdmin.label.text + ' Admin'\n form.submitWebmail.label.text = form.submitWebmail.label.text + ' Webmail'\n if str(app.config[\"WEBMAIL\"]).upper() != \"NONE\":\n fields.append(form.submitWebmail)\n if str(app.config[\"ADMIN\"]).upper() != \"FALSE\":\n fields.append(form.submitAdmin)\n fields = [fields]\n\n if form.validate_on_submit():\n if destination := _has_usable_redirect():\n pass\n else:\n if form.submitAdmin.data:\n destination = app.config['WEB_ADMIN']\n elif form.submitWebmail.data:\n destination = app.config['WEB_WEBMAIL']\n device_cookie, device_cookie_username = utils.limiter.parse_device_cookie(flask.request.cookies.get('rate_limit'))\n username = form.email.data\n if username != device_cookie_username and utils.limiter.should_rate_limit_ip(client_ip):\n flask.flash('Too many attempts from your IP (rate-limit)', 'error')\n return flask.render_template('login.html', form=form, fields=fields)\n if utils.limiter.should_rate_limit_user(username, client_ip, device_cookie, device_cookie_username):\n flask.flash('Too many attempts for this user (rate-limit)', 'error')\n return flask.render_template('login.html', form=form, fields=fields)\n user = models.User.login(username, form.pw.data)\n if user:\n flask.session.regenerate()\n flask_login.login_user(user)\n response = flask.redirect(destination)\n response.set_cookie('rate_limit', utils.limiter.device_cookie(username), max_age=31536000, path=flask.url_for('sso.login'), secure=app.config['SESSION_COOKIE_SECURE'], httponly=True)\n flask.current_app.logger.info(f'Login succeeded for {username} from {client_ip} pwned={form.pwned.data}.')\n if msg := utils.isBadOrPwned(form):\n flask.flash(msg, \"error\")\n return response\n else:\n utils.limiter.rate_limit_user(username, client_ip, device_cookie, device_cookie_username) if models.User.get(username) else utils.limiter.rate_limit_ip(client_ip, username)\n flask.current_app.logger.warn(f'Login failed for {username} from {client_ip}.')\n flask.flash('Wrong e-mail or password', 'error')\n return flask.render_template('login.html', form=form, fields=fields)\n\[email protected]('/logout', methods=['GET'])\[email protected]\ndef logout():\n flask_login.logout_user()\n flask.session.destroy()\n response = flask.redirect(app.config['PROXY_AUTH_LOGOUT_URL'] or flask.url_for('.login'))\n for cookie in ['roundcube_sessauth', 'roundcube_sessid', 'smsession']:\n response.set_cookie(cookie, 'empty', expires=0)\n return response\n\n\"\"\"\nRedirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...\nhttps://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html\n\"\"\"\ndef _has_usable_redirect(is_proxied=False):\n if 'homepage' in flask.request.url and not is_proxied:\n return None\n if url := flask.request.args.get('url'):\n url = url_unquote(url)\n target = urlparse(urljoin(flask.request.url, url))\n if target.netloc == urlparse(flask.request.url).netloc:\n return target.geturl()\n return None\n\n\"\"\"\nhttps://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy\n\"\"\"\ndef _proxy():\n proxy_ip = flask.request.headers.get('X-Forwarded-By', flask.request.remote_addr)\n ip = ipaddress.ip_address(proxy_ip)\n if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):\n return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % proxy_ip)\n\n email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])\n if not email:\n return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])\n\n url = _has_usable_redirect(True) or app.config['WEB_ADMIN']\n\n user = models.User.get(email)\n if user:\n flask.session.regenerate()\n flask_login.login_user(user)\n return flask.redirect(url)\n\n if not app.config['PROXY_AUTH_CREATE']:\n return flask.abort(500, 'You don\\'t exist. Go away! (%s)' % email)\n\n client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)\n try:\n localpart, desireddomain = email.rsplit('@')\n except Exception as e:\n flask.current_app.logger.error('Error creating a new user via proxy for %s from %s: %s' % (email, client_ip, str(e)), e)\n return flask.abort(500, 'You don\\'t exist. Go away! (%s)' % email)\n domain = models.Domain.query.get(desireddomain) or flask.abort(500, 'You don\\'t exist. Go away! (domain=%s)' % desireddomain)\n if not domain.max_users == -1 and len(domain.users) >= domain.max_users:\n flask.current_app.logger.warning('Too many users for domain %s' % domain)\n return flask.abort(500, 'Too many users in (domain=%s)' % domain)\n user = models.User(localpart=localpart, domain=domain)\n user.set_password(secrets.token_urlsafe())\n models.db.session.add(user)\n models.db.session.commit()\n flask.session.regenerate()\n flask_login.login_user(user)\n user.send_welcome()\n flask.current_app.logger.info(f'Login succeeded by proxy created user: {user} from {client_ip} through {flask.request.remote_addr}.')\n return flask.redirect(url)\n", "path": "core/admin/mailu/sso/views/base.py"}]} | 3,458 | 439 |
gh_patches_debug_28883 | rasdani/github-patches | git_diff | python-discord__bot-219 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
It should be mandatory to add a reason for !bb watch
Leaving it optional means we don't always know why someone was watched. This is important information, so we should make it mandatory instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/bigbrother.py`
Content:
```
1 import asyncio
2 import logging
3 import re
4 from collections import defaultdict, deque
5 from typing import List, Union
6
7 from discord import Color, Embed, Guild, Member, Message, TextChannel, User
8 from discord.ext.commands import Bot, Context, group
9
10 from bot.constants import BigBrother as BigBrotherConfig, Channels, Emojis, Guild as GuildConfig, Keys, Roles, URLs
11 from bot.decorators import with_role
12 from bot.pagination import LinePaginator
13 from bot.utils import messages
14 from bot.utils.moderation import post_infraction
15
16 log = logging.getLogger(__name__)
17
18 URL_RE = re.compile(r"(https?://[^\s]+)")
19
20
21 class BigBrother:
22 """User monitoring to assist with moderation."""
23
24 HEADERS = {'X-API-Key': Keys.site_api}
25
26 def __init__(self, bot: Bot):
27 self.bot = bot
28 self.watched_users = {} # { user_id: log_channel_id }
29 self.channel_queues = defaultdict(lambda: defaultdict(deque)) # { user_id: { channel_id: queue(messages) }
30 self.last_log = [None, None, 0] # [user_id, channel_id, message_count]
31 self.consuming = False
32
33 self.bot.loop.create_task(self.get_watched_users())
34
35 def update_cache(self, api_response: List[dict]):
36 """
37 Updates the internal cache of watched users from the given `api_response`.
38 This function will only add (or update) existing keys, it will not delete
39 keys that were not present in the API response.
40 A user is only added if the bot can find a channel
41 with the given `channel_id` in its channel cache.
42 """
43
44 for entry in api_response:
45 user_id = int(entry['user_id'])
46 channel_id = int(entry['channel_id'])
47 channel = self.bot.get_channel(channel_id)
48
49 if channel is not None:
50 self.watched_users[user_id] = channel
51 else:
52 log.error(
53 f"Site specified to relay messages by `{user_id}` in `{channel_id}`, "
54 "but the given channel could not be found. Ignoring."
55 )
56
57 async def get_watched_users(self):
58 """Retrieves watched users from the API."""
59
60 await self.bot.wait_until_ready()
61 async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:
62 data = await response.json()
63 self.update_cache(data)
64
65 async def on_member_ban(self, guild: Guild, user: Union[User, Member]):
66 if guild.id == GuildConfig.id and user.id in self.watched_users:
67 url = f"{URLs.site_bigbrother_api}?user_id={user.id}"
68 channel = self.watched_users[user.id]
69
70 async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:
71 del self.watched_users[user.id]
72 del self.channel_queues[user.id]
73 if response.status == 204:
74 await channel.send(
75 f"{Emojis.bb_message}:hammer: {user} got banned, so "
76 f"`BigBrother` will no longer relay their messages to {channel}"
77 )
78
79 else:
80 data = await response.json()
81 reason = data.get('error_message', "no message provided")
82 await channel.send(
83 f"{Emojis.bb_message}:x: {user} got banned, but trying to remove them from"
84 f"BigBrother's user dictionary on the API returned an error: {reason}"
85 )
86
87 async def on_message(self, msg: Message):
88 """Queues up messages sent by watched users."""
89
90 if msg.author.id in self.watched_users:
91 if not self.consuming:
92 self.bot.loop.create_task(self.consume_messages())
93
94 log.trace(f"Received message: {msg.content} ({len(msg.attachments)} attachments)")
95 self.channel_queues[msg.author.id][msg.channel.id].append(msg)
96
97 async def consume_messages(self):
98 """Consumes the message queues to log watched users' messages."""
99
100 if not self.consuming:
101 self.consuming = True
102 log.trace("Sleeping before consuming...")
103 await asyncio.sleep(BigBrotherConfig.log_delay)
104
105 log.trace("Begin consuming messages.")
106 channel_queues = self.channel_queues.copy()
107 self.channel_queues.clear()
108 for user_id, queues in channel_queues.items():
109 for _, queue in queues.items():
110 channel = self.watched_users[user_id]
111 while queue:
112 msg = queue.popleft()
113 log.trace(f"Consuming message: {msg.clean_content} ({len(msg.attachments)} attachments)")
114
115 self.last_log[2] += 1 # Increment message count.
116 await self.send_header(msg, channel)
117 await self.log_message(msg, channel)
118
119 if self.channel_queues:
120 log.trace("Queue not empty; continue consumption.")
121 self.bot.loop.create_task(self.consume_messages())
122 else:
123 log.trace("Done consuming messages.")
124 self.consuming = False
125
126 async def send_header(self, message: Message, destination: TextChannel):
127 """
128 Sends a log message header to the given channel.
129
130 A header is only sent if the user or channel are different than the previous, or if the configured message
131 limit for a single header has been exceeded.
132
133 :param message: the first message in the queue
134 :param destination: the channel in which to send the header
135 """
136
137 last_user, last_channel, msg_count = self.last_log
138 limit = BigBrotherConfig.header_message_limit
139
140 # Send header if user/channel are different or if message limit exceeded.
141 if message.author.id != last_user or message.channel.id != last_channel or msg_count > limit:
142 self.last_log = [message.author.id, message.channel.id, 0]
143
144 embed = Embed(description=f"{message.author.mention} in [#{message.channel.name}]({message.jump_url})")
145 embed.set_author(name=message.author.nick or message.author.name, icon_url=message.author.avatar_url)
146 await destination.send(embed=embed)
147
148 @staticmethod
149 async def log_message(message: Message, destination: TextChannel):
150 """
151 Logs a watched user's message in the given channel.
152
153 Attachments are also sent. All non-image or non-video URLs are put in inline code blocks to prevent preview
154 embeds from being automatically generated.
155
156 :param message: the message to log
157 :param destination: the channel in which to log the message
158 """
159
160 content = message.clean_content
161 if content:
162 # Put all non-media URLs in inline code blocks.
163 media_urls = {embed.url for embed in message.embeds if embed.type in ("image", "video")}
164 for url in URL_RE.findall(content):
165 if url not in media_urls:
166 content = content.replace(url, f"`{url}`")
167
168 await destination.send(content)
169
170 await messages.send_attachments(message, destination)
171
172 @group(name='bigbrother', aliases=('bb',), invoke_without_command=True)
173 @with_role(Roles.owner, Roles.admin, Roles.moderator)
174 async def bigbrother_group(self, ctx: Context):
175 """Monitor users, NSA-style."""
176
177 await ctx.invoke(self.bot.get_command("help"), "bigbrother")
178
179 @bigbrother_group.command(name='watched', aliases=('all',))
180 @with_role(Roles.owner, Roles.admin, Roles.moderator)
181 async def watched_command(self, ctx: Context, from_cache: bool = True):
182 """
183 Shows all users that are currently monitored and in which channel.
184 By default, the users are returned from the cache.
185 If this is not desired, `from_cache` can be given as a falsy value, e.g. e.g. 'no'.
186 """
187
188 if from_cache:
189 lines = tuple(
190 f"• <@{user_id}> in <#{self.watched_users[user_id].id}>"
191 for user_id in self.watched_users
192 )
193 await LinePaginator.paginate(
194 lines or ("There's nothing here yet.",),
195 ctx,
196 Embed(title="Watched users (cached)", color=Color.blue()),
197 empty=False
198 )
199
200 else:
201 async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:
202 if response.status == 200:
203 data = await response.json()
204 self.update_cache(data)
205 lines = tuple(f"• <@{entry['user_id']}> in <#{entry['channel_id']}>" for entry in data)
206
207 await LinePaginator.paginate(
208 lines or ("There's nothing here yet.",),
209 ctx,
210 Embed(title="Watched users", color=Color.blue()),
211 empty=False
212 )
213
214 else:
215 await ctx.send(f":x: got non-200 response from the API")
216
217 @bigbrother_group.command(name='watch', aliases=('w',))
218 @with_role(Roles.owner, Roles.admin, Roles.moderator)
219 async def watch_command(self, ctx: Context, user: User, *, reason: str = None):
220 """
221 Relay messages sent by the given `user` to the `#big-brother-logs` channel
222
223 If a `reason` is specified, a note is added for `user`
224 """
225
226 channel_id = Channels.big_brother_logs
227
228 post_data = {
229 'user_id': str(user.id),
230 'channel_id': str(channel_id)
231 }
232
233 async with self.bot.http_session.post(
234 URLs.site_bigbrother_api,
235 headers=self.HEADERS,
236 json=post_data
237 ) as response:
238 if response.status == 204:
239 await ctx.send(f":ok_hand: will now relay messages sent by {user} in <#{channel_id}>")
240
241 channel = self.bot.get_channel(channel_id)
242 if channel is None:
243 log.error(
244 f"could not update internal cache, failed to find a channel with ID {channel_id}"
245 )
246 else:
247 self.watched_users[user.id] = channel
248
249 else:
250 data = await response.json()
251 reason = data.get('error_message', "no message provided")
252 await ctx.send(f":x: the API returned an error: {reason}")
253
254 # Add a note (shadow warning) if a reason is specified
255 if reason:
256 reason = "bb watch: " + reason # Prepend for situational awareness
257 await post_infraction(ctx, user, type="warning", reason=reason, hidden=True)
258
259 @bigbrother_group.command(name='unwatch', aliases=('uw',))
260 @with_role(Roles.owner, Roles.admin, Roles.moderator)
261 async def unwatch_command(self, ctx: Context, user: User):
262 """Stop relaying messages by the given `user`."""
263
264 url = f"{URLs.site_bigbrother_api}?user_id={user.id}"
265 async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:
266 if response.status == 204:
267 await ctx.send(f":ok_hand: will no longer relay messages sent by {user}")
268
269 if user.id in self.watched_users:
270 del self.watched_users[user.id]
271 if user.id in self.channel_queues:
272 del self.channel_queues[user.id]
273 else:
274 log.warning(f"user {user.id} was unwatched but was not found in the cache")
275
276 else:
277 data = await response.json()
278 reason = data.get('error_message', "no message provided")
279 await ctx.send(f":x: the API returned an error: {reason}")
280
281
282 def setup(bot: Bot):
283 bot.add_cog(BigBrother(bot))
284 log.info("Cog loaded: BigBrother")
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/bigbrother.py b/bot/cogs/bigbrother.py
--- a/bot/cogs/bigbrother.py
+++ b/bot/cogs/bigbrother.py
@@ -216,11 +216,12 @@
@bigbrother_group.command(name='watch', aliases=('w',))
@with_role(Roles.owner, Roles.admin, Roles.moderator)
- async def watch_command(self, ctx: Context, user: User, *, reason: str = None):
+ async def watch_command(self, ctx: Context, user: User, *, reason: str):
"""
Relay messages sent by the given `user` to the `#big-brother-logs` channel
- If a `reason` is specified, a note is added for `user`
+ A `reason` for watching is required, which is added for the user to be watched as a
+ note (aka: shadow warning)
"""
channel_id = Channels.big_brother_logs
@@ -251,10 +252,9 @@
reason = data.get('error_message', "no message provided")
await ctx.send(f":x: the API returned an error: {reason}")
- # Add a note (shadow warning) if a reason is specified
- if reason:
- reason = "bb watch: " + reason # Prepend for situational awareness
- await post_infraction(ctx, user, type="warning", reason=reason, hidden=True)
+ # Add a note (shadow warning) with the reason for watching
+ reason = "bb watch: " + reason # Prepend for situational awareness
+ await post_infraction(ctx, user, type="warning", reason=reason, hidden=True)
@bigbrother_group.command(name='unwatch', aliases=('uw',))
@with_role(Roles.owner, Roles.admin, Roles.moderator)
| {"golden_diff": "diff --git a/bot/cogs/bigbrother.py b/bot/cogs/bigbrother.py\n--- a/bot/cogs/bigbrother.py\n+++ b/bot/cogs/bigbrother.py\n@@ -216,11 +216,12 @@\n \n @bigbrother_group.command(name='watch', aliases=('w',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n- async def watch_command(self, ctx: Context, user: User, *, reason: str = None):\n+ async def watch_command(self, ctx: Context, user: User, *, reason: str):\n \"\"\"\n Relay messages sent by the given `user` to the `#big-brother-logs` channel\n \n- If a `reason` is specified, a note is added for `user`\n+ A `reason` for watching is required, which is added for the user to be watched as a\n+ note (aka: shadow warning)\n \"\"\"\n \n channel_id = Channels.big_brother_logs\n@@ -251,10 +252,9 @@\n reason = data.get('error_message', \"no message provided\")\n await ctx.send(f\":x: the API returned an error: {reason}\")\n \n- # Add a note (shadow warning) if a reason is specified\n- if reason:\n- reason = \"bb watch: \" + reason # Prepend for situational awareness\n- await post_infraction(ctx, user, type=\"warning\", reason=reason, hidden=True)\n+ # Add a note (shadow warning) with the reason for watching\n+ reason = \"bb watch: \" + reason # Prepend for situational awareness\n+ await post_infraction(ctx, user, type=\"warning\", reason=reason, hidden=True)\n \n @bigbrother_group.command(name='unwatch', aliases=('uw',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n", "issue": "It should be mandatory to add a reason for !bb watch\nLeaving it optional means we don't always know why someone was watched. This is important information, so we should make it mandatory instead.\n", "before_files": [{"content": "import asyncio\nimport logging\nimport re\nfrom collections import defaultdict, deque\nfrom typing import List, Union\n\nfrom discord import Color, Embed, Guild, Member, Message, TextChannel, User\nfrom discord.ext.commands import Bot, Context, group\n\nfrom bot.constants import BigBrother as BigBrotherConfig, Channels, Emojis, Guild as GuildConfig, Keys, Roles, URLs\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\nfrom bot.utils import messages\nfrom bot.utils.moderation import post_infraction\n\nlog = logging.getLogger(__name__)\n\nURL_RE = re.compile(r\"(https?://[^\\s]+)\")\n\n\nclass BigBrother:\n \"\"\"User monitoring to assist with moderation.\"\"\"\n\n HEADERS = {'X-API-Key': Keys.site_api}\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.watched_users = {} # { user_id: log_channel_id }\n self.channel_queues = defaultdict(lambda: defaultdict(deque)) # { user_id: { channel_id: queue(messages) }\n self.last_log = [None, None, 0] # [user_id, channel_id, message_count]\n self.consuming = False\n\n self.bot.loop.create_task(self.get_watched_users())\n\n def update_cache(self, api_response: List[dict]):\n \"\"\"\n Updates the internal cache of watched users from the given `api_response`.\n This function will only add (or update) existing keys, it will not delete\n keys that were not present in the API response.\n A user is only added if the bot can find a channel\n with the given `channel_id` in its channel cache.\n \"\"\"\n\n for entry in api_response:\n user_id = int(entry['user_id'])\n channel_id = int(entry['channel_id'])\n channel = self.bot.get_channel(channel_id)\n\n if channel is not None:\n self.watched_users[user_id] = channel\n else:\n log.error(\n f\"Site specified to relay messages by `{user_id}` in `{channel_id}`, \"\n \"but the given channel could not be found. Ignoring.\"\n )\n\n async def get_watched_users(self):\n \"\"\"Retrieves watched users from the API.\"\"\"\n\n await self.bot.wait_until_ready()\n async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:\n data = await response.json()\n self.update_cache(data)\n\n async def on_member_ban(self, guild: Guild, user: Union[User, Member]):\n if guild.id == GuildConfig.id and user.id in self.watched_users:\n url = f\"{URLs.site_bigbrother_api}?user_id={user.id}\"\n channel = self.watched_users[user.id]\n\n async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:\n del self.watched_users[user.id]\n del self.channel_queues[user.id]\n if response.status == 204:\n await channel.send(\n f\"{Emojis.bb_message}:hammer: {user} got banned, so \"\n f\"`BigBrother` will no longer relay their messages to {channel}\"\n )\n\n else:\n data = await response.json()\n reason = data.get('error_message', \"no message provided\")\n await channel.send(\n f\"{Emojis.bb_message}:x: {user} got banned, but trying to remove them from\"\n f\"BigBrother's user dictionary on the API returned an error: {reason}\"\n )\n\n async def on_message(self, msg: Message):\n \"\"\"Queues up messages sent by watched users.\"\"\"\n\n if msg.author.id in self.watched_users:\n if not self.consuming:\n self.bot.loop.create_task(self.consume_messages())\n\n log.trace(f\"Received message: {msg.content} ({len(msg.attachments)} attachments)\")\n self.channel_queues[msg.author.id][msg.channel.id].append(msg)\n\n async def consume_messages(self):\n \"\"\"Consumes the message queues to log watched users' messages.\"\"\"\n\n if not self.consuming:\n self.consuming = True\n log.trace(\"Sleeping before consuming...\")\n await asyncio.sleep(BigBrotherConfig.log_delay)\n\n log.trace(\"Begin consuming messages.\")\n channel_queues = self.channel_queues.copy()\n self.channel_queues.clear()\n for user_id, queues in channel_queues.items():\n for _, queue in queues.items():\n channel = self.watched_users[user_id]\n while queue:\n msg = queue.popleft()\n log.trace(f\"Consuming message: {msg.clean_content} ({len(msg.attachments)} attachments)\")\n\n self.last_log[2] += 1 # Increment message count.\n await self.send_header(msg, channel)\n await self.log_message(msg, channel)\n\n if self.channel_queues:\n log.trace(\"Queue not empty; continue consumption.\")\n self.bot.loop.create_task(self.consume_messages())\n else:\n log.trace(\"Done consuming messages.\")\n self.consuming = False\n\n async def send_header(self, message: Message, destination: TextChannel):\n \"\"\"\n Sends a log message header to the given channel.\n\n A header is only sent if the user or channel are different than the previous, or if the configured message\n limit for a single header has been exceeded.\n\n :param message: the first message in the queue\n :param destination: the channel in which to send the header\n \"\"\"\n\n last_user, last_channel, msg_count = self.last_log\n limit = BigBrotherConfig.header_message_limit\n\n # Send header if user/channel are different or if message limit exceeded.\n if message.author.id != last_user or message.channel.id != last_channel or msg_count > limit:\n self.last_log = [message.author.id, message.channel.id, 0]\n\n embed = Embed(description=f\"{message.author.mention} in [#{message.channel.name}]({message.jump_url})\")\n embed.set_author(name=message.author.nick or message.author.name, icon_url=message.author.avatar_url)\n await destination.send(embed=embed)\n\n @staticmethod\n async def log_message(message: Message, destination: TextChannel):\n \"\"\"\n Logs a watched user's message in the given channel.\n\n Attachments are also sent. All non-image or non-video URLs are put in inline code blocks to prevent preview\n embeds from being automatically generated.\n\n :param message: the message to log\n :param destination: the channel in which to log the message\n \"\"\"\n\n content = message.clean_content\n if content:\n # Put all non-media URLs in inline code blocks.\n media_urls = {embed.url for embed in message.embeds if embed.type in (\"image\", \"video\")}\n for url in URL_RE.findall(content):\n if url not in media_urls:\n content = content.replace(url, f\"`{url}`\")\n\n await destination.send(content)\n\n await messages.send_attachments(message, destination)\n\n @group(name='bigbrother', aliases=('bb',), invoke_without_command=True)\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def bigbrother_group(self, ctx: Context):\n \"\"\"Monitor users, NSA-style.\"\"\"\n\n await ctx.invoke(self.bot.get_command(\"help\"), \"bigbrother\")\n\n @bigbrother_group.command(name='watched', aliases=('all',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def watched_command(self, ctx: Context, from_cache: bool = True):\n \"\"\"\n Shows all users that are currently monitored and in which channel.\n By default, the users are returned from the cache.\n If this is not desired, `from_cache` can be given as a falsy value, e.g. e.g. 'no'.\n \"\"\"\n\n if from_cache:\n lines = tuple(\n f\"\u2022 <@{user_id}> in <#{self.watched_users[user_id].id}>\"\n for user_id in self.watched_users\n )\n await LinePaginator.paginate(\n lines or (\"There's nothing here yet.\",),\n ctx,\n Embed(title=\"Watched users (cached)\", color=Color.blue()),\n empty=False\n )\n\n else:\n async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:\n if response.status == 200:\n data = await response.json()\n self.update_cache(data)\n lines = tuple(f\"\u2022 <@{entry['user_id']}> in <#{entry['channel_id']}>\" for entry in data)\n\n await LinePaginator.paginate(\n lines or (\"There's nothing here yet.\",),\n ctx,\n Embed(title=\"Watched users\", color=Color.blue()),\n empty=False\n )\n\n else:\n await ctx.send(f\":x: got non-200 response from the API\")\n\n @bigbrother_group.command(name='watch', aliases=('w',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def watch_command(self, ctx: Context, user: User, *, reason: str = None):\n \"\"\"\n Relay messages sent by the given `user` to the `#big-brother-logs` channel\n\n If a `reason` is specified, a note is added for `user`\n \"\"\"\n\n channel_id = Channels.big_brother_logs\n\n post_data = {\n 'user_id': str(user.id),\n 'channel_id': str(channel_id)\n }\n\n async with self.bot.http_session.post(\n URLs.site_bigbrother_api,\n headers=self.HEADERS,\n json=post_data\n ) as response:\n if response.status == 204:\n await ctx.send(f\":ok_hand: will now relay messages sent by {user} in <#{channel_id}>\")\n\n channel = self.bot.get_channel(channel_id)\n if channel is None:\n log.error(\n f\"could not update internal cache, failed to find a channel with ID {channel_id}\"\n )\n else:\n self.watched_users[user.id] = channel\n\n else:\n data = await response.json()\n reason = data.get('error_message', \"no message provided\")\n await ctx.send(f\":x: the API returned an error: {reason}\")\n\n # Add a note (shadow warning) if a reason is specified\n if reason:\n reason = \"bb watch: \" + reason # Prepend for situational awareness\n await post_infraction(ctx, user, type=\"warning\", reason=reason, hidden=True)\n\n @bigbrother_group.command(name='unwatch', aliases=('uw',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def unwatch_command(self, ctx: Context, user: User):\n \"\"\"Stop relaying messages by the given `user`.\"\"\"\n\n url = f\"{URLs.site_bigbrother_api}?user_id={user.id}\"\n async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:\n if response.status == 204:\n await ctx.send(f\":ok_hand: will no longer relay messages sent by {user}\")\n\n if user.id in self.watched_users:\n del self.watched_users[user.id]\n if user.id in self.channel_queues:\n del self.channel_queues[user.id]\n else:\n log.warning(f\"user {user.id} was unwatched but was not found in the cache\")\n\n else:\n data = await response.json()\n reason = data.get('error_message', \"no message provided\")\n await ctx.send(f\":x: the API returned an error: {reason}\")\n\n\ndef setup(bot: Bot):\n bot.add_cog(BigBrother(bot))\n log.info(\"Cog loaded: BigBrother\")\n", "path": "bot/cogs/bigbrother.py"}], "after_files": [{"content": "import asyncio\nimport logging\nimport re\nfrom collections import defaultdict, deque\nfrom typing import List, Union\n\nfrom discord import Color, Embed, Guild, Member, Message, TextChannel, User\nfrom discord.ext.commands import Bot, Context, group\n\nfrom bot.constants import BigBrother as BigBrotherConfig, Channels, Emojis, Guild as GuildConfig, Keys, Roles, URLs\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\nfrom bot.utils import messages\nfrom bot.utils.moderation import post_infraction\n\nlog = logging.getLogger(__name__)\n\nURL_RE = re.compile(r\"(https?://[^\\s]+)\")\n\n\nclass BigBrother:\n \"\"\"User monitoring to assist with moderation.\"\"\"\n\n HEADERS = {'X-API-Key': Keys.site_api}\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.watched_users = {} # { user_id: log_channel_id }\n self.channel_queues = defaultdict(lambda: defaultdict(deque)) # { user_id: { channel_id: queue(messages) }\n self.last_log = [None, None, 0] # [user_id, channel_id, message_count]\n self.consuming = False\n\n self.bot.loop.create_task(self.get_watched_users())\n\n def update_cache(self, api_response: List[dict]):\n \"\"\"\n Updates the internal cache of watched users from the given `api_response`.\n This function will only add (or update) existing keys, it will not delete\n keys that were not present in the API response.\n A user is only added if the bot can find a channel\n with the given `channel_id` in its channel cache.\n \"\"\"\n\n for entry in api_response:\n user_id = int(entry['user_id'])\n channel_id = int(entry['channel_id'])\n channel = self.bot.get_channel(channel_id)\n\n if channel is not None:\n self.watched_users[user_id] = channel\n else:\n log.error(\n f\"Site specified to relay messages by `{user_id}` in `{channel_id}`, \"\n \"but the given channel could not be found. Ignoring.\"\n )\n\n async def get_watched_users(self):\n \"\"\"Retrieves watched users from the API.\"\"\"\n\n await self.bot.wait_until_ready()\n async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:\n data = await response.json()\n self.update_cache(data)\n\n async def on_member_ban(self, guild: Guild, user: Union[User, Member]):\n if guild.id == GuildConfig.id and user.id in self.watched_users:\n url = f\"{URLs.site_bigbrother_api}?user_id={user.id}\"\n channel = self.watched_users[user.id]\n\n async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:\n del self.watched_users[user.id]\n del self.channel_queues[user.id]\n if response.status == 204:\n await channel.send(\n f\"{Emojis.bb_message}:hammer: {user} got banned, so \"\n f\"`BigBrother` will no longer relay their messages to {channel}\"\n )\n\n else:\n data = await response.json()\n reason = data.get('error_message', \"no message provided\")\n await channel.send(\n f\"{Emojis.bb_message}:x: {user} got banned, but trying to remove them from\"\n f\"BigBrother's user dictionary on the API returned an error: {reason}\"\n )\n\n async def on_message(self, msg: Message):\n \"\"\"Queues up messages sent by watched users.\"\"\"\n\n if msg.author.id in self.watched_users:\n if not self.consuming:\n self.bot.loop.create_task(self.consume_messages())\n\n log.trace(f\"Received message: {msg.content} ({len(msg.attachments)} attachments)\")\n self.channel_queues[msg.author.id][msg.channel.id].append(msg)\n\n async def consume_messages(self):\n \"\"\"Consumes the message queues to log watched users' messages.\"\"\"\n\n if not self.consuming:\n self.consuming = True\n log.trace(\"Sleeping before consuming...\")\n await asyncio.sleep(BigBrotherConfig.log_delay)\n\n log.trace(\"Begin consuming messages.\")\n channel_queues = self.channel_queues.copy()\n self.channel_queues.clear()\n for user_id, queues in channel_queues.items():\n for _, queue in queues.items():\n channel = self.watched_users[user_id]\n while queue:\n msg = queue.popleft()\n log.trace(f\"Consuming message: {msg.clean_content} ({len(msg.attachments)} attachments)\")\n\n self.last_log[2] += 1 # Increment message count.\n await self.send_header(msg, channel)\n await self.log_message(msg, channel)\n\n if self.channel_queues:\n log.trace(\"Queue not empty; continue consumption.\")\n self.bot.loop.create_task(self.consume_messages())\n else:\n log.trace(\"Done consuming messages.\")\n self.consuming = False\n\n async def send_header(self, message: Message, destination: TextChannel):\n \"\"\"\n Sends a log message header to the given channel.\n\n A header is only sent if the user or channel are different than the previous, or if the configured message\n limit for a single header has been exceeded.\n\n :param message: the first message in the queue\n :param destination: the channel in which to send the header\n \"\"\"\n\n last_user, last_channel, msg_count = self.last_log\n limit = BigBrotherConfig.header_message_limit\n\n # Send header if user/channel are different or if message limit exceeded.\n if message.author.id != last_user or message.channel.id != last_channel or msg_count > limit:\n self.last_log = [message.author.id, message.channel.id, 0]\n\n embed = Embed(description=f\"{message.author.mention} in [#{message.channel.name}]({message.jump_url})\")\n embed.set_author(name=message.author.nick or message.author.name, icon_url=message.author.avatar_url)\n await destination.send(embed=embed)\n\n @staticmethod\n async def log_message(message: Message, destination: TextChannel):\n \"\"\"\n Logs a watched user's message in the given channel.\n\n Attachments are also sent. All non-image or non-video URLs are put in inline code blocks to prevent preview\n embeds from being automatically generated.\n\n :param message: the message to log\n :param destination: the channel in which to log the message\n \"\"\"\n\n content = message.clean_content\n if content:\n # Put all non-media URLs in inline code blocks.\n media_urls = {embed.url for embed in message.embeds if embed.type in (\"image\", \"video\")}\n for url in URL_RE.findall(content):\n if url not in media_urls:\n content = content.replace(url, f\"`{url}`\")\n\n await destination.send(content)\n\n await messages.send_attachments(message, destination)\n\n @group(name='bigbrother', aliases=('bb',), invoke_without_command=True)\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def bigbrother_group(self, ctx: Context):\n \"\"\"Monitor users, NSA-style.\"\"\"\n\n await ctx.invoke(self.bot.get_command(\"help\"), \"bigbrother\")\n\n @bigbrother_group.command(name='watched', aliases=('all',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def watched_command(self, ctx: Context, from_cache: bool = True):\n \"\"\"\n Shows all users that are currently monitored and in which channel.\n By default, the users are returned from the cache.\n If this is not desired, `from_cache` can be given as a falsy value, e.g. e.g. 'no'.\n \"\"\"\n\n if from_cache:\n lines = tuple(\n f\"\u2022 <@{user_id}> in <#{self.watched_users[user_id].id}>\"\n for user_id in self.watched_users\n )\n await LinePaginator.paginate(\n lines or (\"There's nothing here yet.\",),\n ctx,\n Embed(title=\"Watched users (cached)\", color=Color.blue()),\n empty=False\n )\n\n else:\n async with self.bot.http_session.get(URLs.site_bigbrother_api, headers=self.HEADERS) as response:\n if response.status == 200:\n data = await response.json()\n self.update_cache(data)\n lines = tuple(f\"\u2022 <@{entry['user_id']}> in <#{entry['channel_id']}>\" for entry in data)\n\n await LinePaginator.paginate(\n lines or (\"There's nothing here yet.\",),\n ctx,\n Embed(title=\"Watched users\", color=Color.blue()),\n empty=False\n )\n\n else:\n await ctx.send(f\":x: got non-200 response from the API\")\n\n @bigbrother_group.command(name='watch', aliases=('w',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def watch_command(self, ctx: Context, user: User, *, reason: str):\n \"\"\"\n Relay messages sent by the given `user` to the `#big-brother-logs` channel\n\n A `reason` for watching is required, which is added for the user to be watched as a\n note (aka: shadow warning)\n \"\"\"\n\n channel_id = Channels.big_brother_logs\n\n post_data = {\n 'user_id': str(user.id),\n 'channel_id': str(channel_id)\n }\n\n async with self.bot.http_session.post(\n URLs.site_bigbrother_api,\n headers=self.HEADERS,\n json=post_data\n ) as response:\n if response.status == 204:\n await ctx.send(f\":ok_hand: will now relay messages sent by {user} in <#{channel_id}>\")\n\n channel = self.bot.get_channel(channel_id)\n if channel is None:\n log.error(\n f\"could not update internal cache, failed to find a channel with ID {channel_id}\"\n )\n else:\n self.watched_users[user.id] = channel\n\n else:\n data = await response.json()\n reason = data.get('error_message', \"no message provided\")\n await ctx.send(f\":x: the API returned an error: {reason}\")\n\n # Add a note (shadow warning) with the reason for watching\n reason = \"bb watch: \" + reason # Prepend for situational awareness\n await post_infraction(ctx, user, type=\"warning\", reason=reason, hidden=True)\n\n @bigbrother_group.command(name='unwatch', aliases=('uw',))\n @with_role(Roles.owner, Roles.admin, Roles.moderator)\n async def unwatch_command(self, ctx: Context, user: User):\n \"\"\"Stop relaying messages by the given `user`.\"\"\"\n\n url = f\"{URLs.site_bigbrother_api}?user_id={user.id}\"\n async with self.bot.http_session.delete(url, headers=self.HEADERS) as response:\n if response.status == 204:\n await ctx.send(f\":ok_hand: will no longer relay messages sent by {user}\")\n\n if user.id in self.watched_users:\n del self.watched_users[user.id]\n if user.id in self.channel_queues:\n del self.channel_queues[user.id]\n else:\n log.warning(f\"user {user.id} was unwatched but was not found in the cache\")\n\n else:\n data = await response.json()\n reason = data.get('error_message', \"no message provided\")\n await ctx.send(f\":x: the API returned an error: {reason}\")\n\n\ndef setup(bot: Bot):\n bot.add_cog(BigBrother(bot))\n log.info(\"Cog loaded: BigBrother\")\n", "path": "bot/cogs/bigbrother.py"}]} | 3,581 | 424 |
gh_patches_debug_38915 | rasdani/github-patches | git_diff | lisa-lab__pylearn2-1512 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
speed up NanGuardMode on GPU and move to Theano.
This can be done as in gh-1054. Do the reduction on the GPU, then this will transfer much less data.
The CudaNdarray object do not support many reduction, but we can compile a Theano function that take a gpu object, do the reduction and return the result on the CPU to inspect it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pylearn2/devtools/nan_guard.py`
Content:
```
1 """
2 Functionality for detecting NaNs in a Theano graph.
3 """
4 __authors__ = "Ian Goodfellow"
5 __copyright__ = "Copyright 2010-2012, Universite de Montreal"
6 __credits__ = ["Ian Goodfellow"]
7 __license__ = "3-clause BSD"
8 __maintainer__ = "LISA Lab"
9 __email__ = "pylearn-dev@googlegroups"
10
11 import logging
12 from theano.compile import Mode
13 import theano
14 import numpy as np
15 from pylearn2.models.dbm import flatten
16 from pylearn2.utils import contains_nan, contains_inf
17
18
19 logger = logging.getLogger(__name__)
20
21
22 class NanGuardMode(Mode):
23 """
24 A Theano compilation Mode that makes the compiled function automatically
25 detect NaNs and Infs and detect an error if they occur.
26
27 Parameters
28 ----------
29 nan_is_error : bool
30 If True, raise an error anytime a NaN is encountered
31 inf_is_error: bool
32 If True, raise an error anytime an Inf is encountered. Note that some
33 pylearn2 modules currently use np.inf as a default value (e.g.
34 mlp.max_pool) and these will cause an error if inf_is_error is True.
35 big_is_error: bool
36 If True, raise an error when a value greater than 1e10 is encountered.
37 """
38 def __init__(self, nan_is_error, inf_is_error, big_is_error=True):
39 def do_check_on(var, nd, f, is_input):
40 """
41 Checks `var` for NaNs / Infs. If detected, raises an exception
42 and / or prints information about `nd`, `f`, and `is_input` to
43 help the user determine the cause of the invalid values.
44
45 Parameters
46 ----------
47 var : numpy.ndarray
48 The value to be checked.
49 nd : theano.gof.Apply
50 The Apply node being executed
51 f : callable
52 The thunk for the apply node
53 is_input : bool
54 If True, `var` is an input to `nd`.
55 If False, it is an output.
56 """
57 error = False
58 if nan_is_error:
59 if contains_nan(var):
60 logger.error('NaN detected')
61 error = True
62 if inf_is_error:
63 if contains_inf(var):
64 logger.error('Inf detected')
65 error = True
66 if big_is_error:
67 if np.abs(var).max() > 1e10:
68 logger.error('Big value detected')
69 error = True
70 if error:
71 if is_input:
72 logger.error('In an input')
73 else:
74 logger.error('In an output')
75 logger.error('Inputs: ')
76 for ivar, ival in zip(nd.inputs, f.inputs):
77 logger.error('var')
78 logger.error(ivar)
79 logger.error(theano.printing.min_informative_str(ivar))
80 logger.error('val')
81 logger.error(ival)
82 logger.error('Node:')
83 logger.error(nd)
84 assert False
85
86 def nan_check(i, node, fn):
87 """
88 Runs `fn` while checking its inputs and outputs for NaNs / Infs
89
90 Parameters
91 ----------
92 i : currently ignored (TODO: determine why it is here or remove)
93 node : theano.gof.Apply
94 The Apply node currently being executed
95 fn : callable
96 The thunk to execute for this Apply node
97 """
98 inputs = fn.inputs
99 # TODO: figure out why individual inputs are themselves lists sometimes
100 for x in flatten(inputs):
101 do_check_on(x, node, fn, True)
102 fn()
103 outputs = fn.outputs
104 for j, x in enumerate(flatten(outputs)):
105 do_check_on(x, node, fn, False)
106
107 wrap_linker = theano.gof.WrapLinkerMany([theano.gof.OpWiseCLinker()], [nan_check])
108 super(NanGuardMode, self).__init__(wrap_linker, optimizer=theano.config.optimizer)
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pylearn2/devtools/nan_guard.py b/pylearn2/devtools/nan_guard.py
--- a/pylearn2/devtools/nan_guard.py
+++ b/pylearn2/devtools/nan_guard.py
@@ -11,6 +11,8 @@
import logging
from theano.compile import Mode
import theano
+import theano.tensor as T
+import theano.sandbox.cuda as cuda
import numpy as np
from pylearn2.models.dbm import flatten
from pylearn2.utils import contains_nan, contains_inf
@@ -36,6 +38,23 @@
If True, raise an error when a value greater than 1e10 is encountered.
"""
def __init__(self, nan_is_error, inf_is_error, big_is_error=True):
+ if cuda.cuda_available:
+ self.guard_input = cuda.fvector('nan_guard')
+ if nan_is_error or inf_is_error:
+ self.gpumin = theano.function(
+ [self.guard_input], T.min(self.guard_input),
+ mode='FAST_RUN'
+ )
+ if inf_is_error:
+ self.gpumax = theano.function(
+ [self.guard_input], T.max(self.guard_input),
+ mode='FAST_RUN'
+ )
+ if big_is_error:
+ self.gpuabsmax = theano.function(
+ [self.guard_input], T.max(T.abs_(self.guard_input)),
+ mode='FAST_RUN'
+ )
def do_check_on(var, nd, f, is_input):
"""
Checks `var` for NaNs / Infs. If detected, raises an exception
@@ -56,15 +75,31 @@
"""
error = False
if nan_is_error:
- if contains_nan(var):
+ err = False
+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
+ err = np.isnan(self.gpumin(var.reshape(var.size)))
+ else:
+ err = contains_nan(var)
+ if err:
logger.error('NaN detected')
error = True
if inf_is_error:
- if contains_inf(var):
+ err = False
+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
+ err = (np.isinf(self.gpumin(var.reshape(var.size))) or \
+ np.isinf(self.gpumax(var.reshape(var.size))))
+ else:
+ err = contains_inf(var)
+ if err:
logger.error('Inf detected')
error = True
if big_is_error:
- if np.abs(var).max() > 1e10:
+ err = False
+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):
+ err = (self.gpuabsmax(var.reshape(var.size)) > 1e10)
+ else:
+ err = (np.abs(var).max() > 1e10)
+ if err:
logger.error('Big value detected')
error = True
if error:
| {"golden_diff": "diff --git a/pylearn2/devtools/nan_guard.py b/pylearn2/devtools/nan_guard.py\n--- a/pylearn2/devtools/nan_guard.py\n+++ b/pylearn2/devtools/nan_guard.py\n@@ -11,6 +11,8 @@\n import logging\n from theano.compile import Mode\n import theano\n+import theano.tensor as T\n+import theano.sandbox.cuda as cuda\n import numpy as np\n from pylearn2.models.dbm import flatten\n from pylearn2.utils import contains_nan, contains_inf\n@@ -36,6 +38,23 @@\n If True, raise an error when a value greater than 1e10 is encountered.\n \"\"\"\n def __init__(self, nan_is_error, inf_is_error, big_is_error=True):\n+ if cuda.cuda_available:\n+ self.guard_input = cuda.fvector('nan_guard')\n+ if nan_is_error or inf_is_error:\n+ self.gpumin = theano.function(\n+ [self.guard_input], T.min(self.guard_input),\n+ mode='FAST_RUN'\n+ )\n+ if inf_is_error:\n+ self.gpumax = theano.function(\n+ [self.guard_input], T.max(self.guard_input),\n+ mode='FAST_RUN'\n+ )\n+ if big_is_error:\n+ self.gpuabsmax = theano.function(\n+ [self.guard_input], T.max(T.abs_(self.guard_input)),\n+ mode='FAST_RUN'\n+ )\n def do_check_on(var, nd, f, is_input):\n \"\"\"\n Checks `var` for NaNs / Infs. If detected, raises an exception\n@@ -56,15 +75,31 @@\n \"\"\"\n error = False\n if nan_is_error:\n- if contains_nan(var):\n+ err = False\n+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):\n+ err = np.isnan(self.gpumin(var.reshape(var.size)))\n+ else:\n+ err = contains_nan(var)\n+ if err:\n logger.error('NaN detected')\n error = True\n if inf_is_error:\n- if contains_inf(var):\n+ err = False\n+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):\n+ err = (np.isinf(self.gpumin(var.reshape(var.size))) or \\\n+ np.isinf(self.gpumax(var.reshape(var.size))))\n+ else:\n+ err = contains_inf(var)\n+ if err:\n logger.error('Inf detected')\n error = True\n if big_is_error:\n- if np.abs(var).max() > 1e10:\n+ err = False\n+ if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):\n+ err = (self.gpuabsmax(var.reshape(var.size)) > 1e10)\n+ else:\n+ err = (np.abs(var).max() > 1e10)\n+ if err:\n logger.error('Big value detected')\n error = True\n if error:\n", "issue": "speed up NanGuardMode on GPU and move to Theano.\nThis can be done as in gh-1054. Do the reduction on the GPU, then this will transfer much less data.\n\nThe CudaNdarray object do not support many reduction, but we can compile a Theano function that take a gpu object, do the reduction and return the result on the CPU to inspect it.\n\n", "before_files": [{"content": "\"\"\"\nFunctionality for detecting NaNs in a Theano graph.\n\"\"\"\n__authors__ = \"Ian Goodfellow\"\n__copyright__ = \"Copyright 2010-2012, Universite de Montreal\"\n__credits__ = [\"Ian Goodfellow\"]\n__license__ = \"3-clause BSD\"\n__maintainer__ = \"LISA Lab\"\n__email__ = \"pylearn-dev@googlegroups\"\n\nimport logging\nfrom theano.compile import Mode\nimport theano\nimport numpy as np\nfrom pylearn2.models.dbm import flatten\nfrom pylearn2.utils import contains_nan, contains_inf\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass NanGuardMode(Mode):\n \"\"\"\n A Theano compilation Mode that makes the compiled function automatically\n detect NaNs and Infs and detect an error if they occur.\n\n Parameters\n ----------\n nan_is_error : bool\n If True, raise an error anytime a NaN is encountered\n inf_is_error: bool\n If True, raise an error anytime an Inf is encountered. Note that some\n pylearn2 modules currently use np.inf as a default value (e.g.\n mlp.max_pool) and these will cause an error if inf_is_error is True.\n big_is_error: bool\n If True, raise an error when a value greater than 1e10 is encountered.\n \"\"\"\n def __init__(self, nan_is_error, inf_is_error, big_is_error=True):\n def do_check_on(var, nd, f, is_input):\n \"\"\"\n Checks `var` for NaNs / Infs. If detected, raises an exception\n and / or prints information about `nd`, `f`, and `is_input` to\n help the user determine the cause of the invalid values.\n\n Parameters\n ----------\n var : numpy.ndarray\n The value to be checked.\n nd : theano.gof.Apply\n The Apply node being executed\n f : callable\n The thunk for the apply node\n is_input : bool\n If True, `var` is an input to `nd`.\n If False, it is an output.\n \"\"\"\n error = False\n if nan_is_error:\n if contains_nan(var):\n logger.error('NaN detected')\n error = True\n if inf_is_error:\n if contains_inf(var):\n logger.error('Inf detected')\n error = True\n if big_is_error:\n if np.abs(var).max() > 1e10:\n logger.error('Big value detected')\n error = True\n if error:\n if is_input:\n logger.error('In an input')\n else:\n logger.error('In an output')\n logger.error('Inputs: ')\n for ivar, ival in zip(nd.inputs, f.inputs):\n logger.error('var')\n logger.error(ivar)\n logger.error(theano.printing.min_informative_str(ivar))\n logger.error('val')\n logger.error(ival)\n logger.error('Node:')\n logger.error(nd)\n assert False\n\n def nan_check(i, node, fn):\n \"\"\"\n Runs `fn` while checking its inputs and outputs for NaNs / Infs\n\n Parameters\n ----------\n i : currently ignored (TODO: determine why it is here or remove)\n node : theano.gof.Apply\n The Apply node currently being executed\n fn : callable\n The thunk to execute for this Apply node\n \"\"\"\n inputs = fn.inputs\n # TODO: figure out why individual inputs are themselves lists sometimes\n for x in flatten(inputs):\n do_check_on(x, node, fn, True)\n fn()\n outputs = fn.outputs\n for j, x in enumerate(flatten(outputs)):\n do_check_on(x, node, fn, False)\n\n wrap_linker = theano.gof.WrapLinkerMany([theano.gof.OpWiseCLinker()], [nan_check])\n super(NanGuardMode, self).__init__(wrap_linker, optimizer=theano.config.optimizer)\n", "path": "pylearn2/devtools/nan_guard.py"}], "after_files": [{"content": "\"\"\"\nFunctionality for detecting NaNs in a Theano graph.\n\"\"\"\n__authors__ = \"Ian Goodfellow\"\n__copyright__ = \"Copyright 2010-2012, Universite de Montreal\"\n__credits__ = [\"Ian Goodfellow\"]\n__license__ = \"3-clause BSD\"\n__maintainer__ = \"LISA Lab\"\n__email__ = \"pylearn-dev@googlegroups\"\n\nimport logging\nfrom theano.compile import Mode\nimport theano\nimport theano.tensor as T\nimport theano.sandbox.cuda as cuda\nimport numpy as np\nfrom pylearn2.models.dbm import flatten\nfrom pylearn2.utils import contains_nan, contains_inf\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass NanGuardMode(Mode):\n \"\"\"\n A Theano compilation Mode that makes the compiled function automatically\n detect NaNs and Infs and detect an error if they occur.\n\n Parameters\n ----------\n nan_is_error : bool\n If True, raise an error anytime a NaN is encountered\n inf_is_error: bool\n If True, raise an error anytime an Inf is encountered. Note that some\n pylearn2 modules currently use np.inf as a default value (e.g.\n mlp.max_pool) and these will cause an error if inf_is_error is True.\n big_is_error: bool\n If True, raise an error when a value greater than 1e10 is encountered.\n \"\"\"\n def __init__(self, nan_is_error, inf_is_error, big_is_error=True):\n if cuda.cuda_available:\n self.guard_input = cuda.fvector('nan_guard')\n if nan_is_error or inf_is_error:\n self.gpumin = theano.function(\n [self.guard_input], T.min(self.guard_input),\n mode='FAST_RUN'\n )\n if inf_is_error:\n self.gpumax = theano.function(\n [self.guard_input], T.max(self.guard_input),\n mode='FAST_RUN'\n )\n if big_is_error:\n self.gpuabsmax = theano.function(\n [self.guard_input], T.max(T.abs_(self.guard_input)),\n mode='FAST_RUN'\n )\n def do_check_on(var, nd, f, is_input):\n \"\"\"\n Checks `var` for NaNs / Infs. If detected, raises an exception\n and / or prints information about `nd`, `f`, and `is_input` to\n help the user determine the cause of the invalid values.\n\n Parameters\n ----------\n var : numpy.ndarray\n The value to be checked.\n nd : theano.gof.Apply\n The Apply node being executed\n f : callable\n The thunk for the apply node\n is_input : bool\n If True, `var` is an input to `nd`.\n If False, it is an output.\n \"\"\"\n error = False\n if nan_is_error:\n err = False\n if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):\n err = np.isnan(self.gpumin(var.reshape(var.size)))\n else:\n err = contains_nan(var)\n if err:\n logger.error('NaN detected')\n error = True\n if inf_is_error:\n err = False\n if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):\n err = (np.isinf(self.gpumin(var.reshape(var.size))) or \\\n np.isinf(self.gpumax(var.reshape(var.size))))\n else:\n err = contains_inf(var)\n if err:\n logger.error('Inf detected')\n error = True\n if big_is_error:\n err = False\n if cuda.cuda_available and isinstance(var, cuda.CudaNdarray):\n err = (self.gpuabsmax(var.reshape(var.size)) > 1e10)\n else:\n err = (np.abs(var).max() > 1e10)\n if err:\n logger.error('Big value detected')\n error = True\n if error:\n if is_input:\n logger.error('In an input')\n else:\n logger.error('In an output')\n logger.error('Inputs: ')\n for ivar, ival in zip(nd.inputs, f.inputs):\n logger.error('var')\n logger.error(ivar)\n logger.error(theano.printing.min_informative_str(ivar))\n logger.error('val')\n logger.error(ival)\n logger.error('Node:')\n logger.error(nd)\n assert False\n\n def nan_check(i, node, fn):\n \"\"\"\n Runs `fn` while checking its inputs and outputs for NaNs / Infs\n\n Parameters\n ----------\n i : currently ignored (TODO: determine why it is here or remove)\n node : theano.gof.Apply\n The Apply node currently being executed\n fn : callable\n The thunk to execute for this Apply node\n \"\"\"\n inputs = fn.inputs\n # TODO: figure out why individual inputs are themselves lists sometimes\n for x in flatten(inputs):\n do_check_on(x, node, fn, True)\n fn()\n outputs = fn.outputs\n for j, x in enumerate(flatten(outputs)):\n do_check_on(x, node, fn, False)\n\n wrap_linker = theano.gof.WrapLinkerMany([theano.gof.OpWiseCLinker()], [nan_check])\n super(NanGuardMode, self).__init__(wrap_linker, optimizer=theano.config.optimizer)\n", "path": "pylearn2/devtools/nan_guard.py"}]} | 1,424 | 666 |
gh_patches_debug_23459 | rasdani/github-patches | git_diff | cython__cython-5819 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Instructions on how to install Cython without compilation in project description are outdated
### Describe your issue
The project description displayed on PyPI reports that Cython can be installed avoiding compilation running
```
pip install Cython --install-option="--no-cython-compile"
```
however, `pip` since version does not accept the `--install-option` anymore.
Even when it did, I believe that the option name was `--install-options`.
The documentation reports the same instructions https://cython.readthedocs.io/en/latest/src/quickstart/install.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 try:
3 from setuptools import setup, Extension
4 except ImportError:
5 from distutils.core import setup, Extension
6 import os
7 import stat
8 import subprocess
9 import textwrap
10 import sys
11
12 import platform
13 is_cpython = platform.python_implementation() == 'CPython'
14
15 # this specifies which versions of python we support, pip >= 9 knows to skip
16 # versions of packages which are not compatible with the running python
17 PYTHON_REQUIRES = '>=3.7'
18
19 if sys.platform == "darwin":
20 # Don't create resource files on OS X tar.
21 os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
22 os.environ['COPYFILE_DISABLE'] = 'true'
23
24 setup_args = {}
25
26 def add_command_class(name, cls):
27 cmdclasses = setup_args.get('cmdclass', {})
28 cmdclasses[name] = cls
29 setup_args['cmdclass'] = cmdclasses
30
31 from distutils.command.sdist import sdist as sdist_orig
32 class sdist(sdist_orig):
33 def run(self):
34 self.force_manifest = 1
35 if (sys.platform != "win32" and
36 os.path.isdir('.git')):
37 assert os.system("git rev-parse --verify HEAD > .gitrev") == 0
38 sdist_orig.run(self)
39 add_command_class('sdist', sdist)
40
41 pxd_include_dirs = [
42 directory for directory, dirs, files
43 in os.walk(os.path.join('Cython', 'Includes'))
44 if '__init__.pyx' in files or '__init__.pxd' in files
45 or directory == os.path.join('Cython', 'Includes')]
46
47 pxd_include_patterns = [
48 p+'/*.pxd' for p in pxd_include_dirs ] + [
49 p+'/*.pyx' for p in pxd_include_dirs ]
50
51 setup_args['package_data'] = {
52 'Cython.Plex' : ['*.pxd'],
53 'Cython.Compiler' : ['*.pxd'],
54 'Cython.Runtime' : ['*.pyx', '*.pxd'],
55 'Cython.Utility' : ['*.pyx', '*.pxd', '*.c', '*.h', '*.cpp'],
56 'Cython' : [ p[7:] for p in pxd_include_patterns ],
57 'Cython.Debugger.Tests': ['codefile', 'cfuncs.c'],
58 }
59
60 # This dict is used for passing extra arguments that are setuptools
61 # specific to setup
62 setuptools_extra_args = {}
63
64 if 'setuptools' in sys.modules:
65 setuptools_extra_args['python_requires'] = PYTHON_REQUIRES
66 setuptools_extra_args['zip_safe'] = False
67 setuptools_extra_args['entry_points'] = {
68 'console_scripts': [
69 'cython = Cython.Compiler.Main:setuptools_main',
70 'cythonize = Cython.Build.Cythonize:main',
71 'cygdb = Cython.Debugger.Cygdb:main',
72 ]
73 }
74 scripts = []
75 else:
76 if os.name == "posix":
77 scripts = ["bin/cython", "bin/cythonize", "bin/cygdb"]
78 else:
79 scripts = ["cython.py", "cythonize.py", "cygdb.py"]
80
81
82 def compile_cython_modules(profile=False, coverage=False, compile_minimal=False, compile_more=False, cython_with_refnanny=False):
83 source_root = os.path.abspath(os.path.dirname(__file__))
84 compiled_modules = [
85 "Cython.Plex.Actions",
86 "Cython.Plex.Scanners",
87 "Cython.Compiler.FlowControl",
88 "Cython.Compiler.Scanning",
89 "Cython.Compiler.Visitor",
90 "Cython.Runtime.refnanny",
91 ]
92 if not compile_minimal:
93 compiled_modules.extend([
94 "Cython.Plex.Machines",
95 "Cython.Plex.Transitions",
96 "Cython.Plex.DFA",
97 "Cython.Compiler.Code",
98 "Cython.Compiler.FusedNode",
99 "Cython.Compiler.Parsing",
100 "Cython.Tempita._tempita",
101 "Cython.StringIOTree",
102 "Cython.Utils",
103 ])
104 if compile_more and not compile_minimal:
105 compiled_modules.extend([
106 "Cython.Compiler.Lexicon",
107 "Cython.Compiler.Pythran",
108 "Cython.Build.Dependencies",
109 "Cython.Compiler.ParseTreeTransforms",
110 "Cython.Compiler.Nodes",
111 "Cython.Compiler.ExprNodes",
112 "Cython.Compiler.ModuleNode",
113 "Cython.Compiler.Optimize",
114 ])
115
116 from distutils.spawn import find_executable
117 from distutils.sysconfig import get_python_inc
118 pgen = find_executable(
119 'pgen', os.pathsep.join([os.environ['PATH'], os.path.join(get_python_inc(), '..', 'Parser')]))
120 if not pgen:
121 sys.stderr.write("Unable to find pgen, not compiling formal grammar.\n")
122 else:
123 parser_dir = os.path.join(os.path.dirname(__file__), 'Cython', 'Parser')
124 grammar = os.path.join(parser_dir, 'Grammar')
125 subprocess.check_call([
126 pgen,
127 os.path.join(grammar),
128 os.path.join(parser_dir, 'graminit.h'),
129 os.path.join(parser_dir, 'graminit.c'),
130 ])
131 cst_pyx = os.path.join(parser_dir, 'ConcreteSyntaxTree.pyx')
132 if os.stat(grammar)[stat.ST_MTIME] > os.stat(cst_pyx)[stat.ST_MTIME]:
133 mtime = os.stat(grammar)[stat.ST_MTIME]
134 os.utime(cst_pyx, (mtime, mtime))
135 compiled_modules.extend([
136 "Cython.Parser.ConcreteSyntaxTree",
137 ])
138
139 defines = []
140 if cython_with_refnanny:
141 defines.append(('CYTHON_REFNANNY', '1'))
142 if coverage:
143 defines.append(('CYTHON_TRACE', '1'))
144
145 extensions = []
146 for module in compiled_modules:
147 source_file = os.path.join(source_root, *module.split('.'))
148 pyx_source_file = source_file + ".py"
149 if not os.path.exists(pyx_source_file):
150 pyx_source_file += "x" # .py -> .pyx
151
152 dep_files = []
153 if os.path.exists(source_file + '.pxd'):
154 dep_files.append(source_file + '.pxd')
155
156 extensions.append(Extension(
157 module, sources=[pyx_source_file],
158 define_macros=defines if '.refnanny' not in module else [],
159 depends=dep_files))
160 # XXX hack around setuptools quirk for '*.pyx' sources
161 extensions[-1].sources[0] = pyx_source_file
162
163 # optimise build parallelism by starting with the largest modules
164 extensions.sort(key=lambda ext: os.path.getsize(ext.sources[0]), reverse=True)
165
166 from Cython.Distutils.build_ext import build_ext
167 from Cython.Compiler.Options import get_directive_defaults
168 get_directive_defaults().update(
169 language_level=2,
170 binding=False,
171 always_allow_keywords=False,
172 autotestdict=False,
173 )
174 if profile:
175 get_directive_defaults()['profile'] = True
176 sys.stderr.write("Enabled profiling for the Cython binary modules\n")
177 if coverage:
178 get_directive_defaults()['linetrace'] = True
179 sys.stderr.write("Enabled line tracing and profiling for the Cython binary modules\n")
180
181 # not using cythonize() directly to let distutils decide whether building extensions was requested
182 add_command_class("build_ext", build_ext)
183 setup_args['ext_modules'] = extensions
184
185
186 def check_option(name):
187 cli_arg = "--" + name
188 if cli_arg in sys.argv:
189 sys.argv.remove(cli_arg)
190 return True
191
192 env_var = name.replace("-", "_").upper()
193 if os.environ.get(env_var) == "true":
194 return True
195
196 return False
197
198
199 cython_profile = check_option('cython-profile')
200 cython_coverage = check_option('cython-coverage')
201 cython_with_refnanny = check_option('cython-with-refnanny')
202
203 compile_cython_itself = not check_option('no-cython-compile')
204 if compile_cython_itself:
205 cython_compile_more = check_option('cython-compile-all')
206 cython_compile_minimal = check_option('cython-compile-minimal')
207
208 setup_args.update(setuptools_extra_args)
209
210
211 def dev_status(version: str):
212 if 'b' in version or 'c' in version:
213 # 1b1, 1beta1, 2rc1, ...
214 return 'Development Status :: 4 - Beta'
215 elif 'a' in version:
216 # 1a1, 1alpha1, ...
217 return 'Development Status :: 3 - Alpha'
218 else:
219 return 'Development Status :: 5 - Production/Stable'
220
221
222 packages = [
223 'Cython',
224 'Cython.Build',
225 'Cython.Compiler',
226 'Cython.Runtime',
227 'Cython.Distutils',
228 'Cython.Debugger',
229 'Cython.Debugger.Tests',
230 'Cython.Plex',
231 'Cython.Tests',
232 'Cython.Build.Tests',
233 'Cython.Compiler.Tests',
234 'Cython.Utility',
235 'Cython.Tempita',
236 'pyximport',
237 ]
238
239
240 def run_build():
241 if compile_cython_itself and (is_cpython or cython_compile_more or cython_compile_minimal):
242 compile_cython_modules(cython_profile, cython_coverage, cython_compile_minimal, cython_compile_more, cython_with_refnanny)
243
244 from Cython import __version__ as version
245 setup(
246 name='Cython',
247 version=version,
248 url='https://cython.org/',
249 author='Robert Bradshaw, Stefan Behnel, Dag Seljebotn, Greg Ewing, et al.',
250 author_email='[email protected]',
251 description="The Cython compiler for writing C extensions in the Python language.",
252 long_description=textwrap.dedent("""\
253 The Cython language makes writing C extensions for the Python language as
254 easy as Python itself. Cython is a source code translator based on Pyrex_,
255 but supports more cutting edge functionality and optimizations.
256
257 The Cython language is a superset of the Python language (almost all Python
258 code is also valid Cython code), but Cython additionally supports optional
259 static typing to natively call C functions, operate with C++ classes and
260 declare fast C types on variables and class attributes. This allows the
261 compiler to generate very efficient C code from Cython code.
262
263 This makes Cython the ideal language for writing glue code for external
264 C/C++ libraries, and for fast C modules that speed up the execution of
265 Python code.
266
267 Note that for one-time builds, e.g. for CI/testing, on platforms that are not
268 covered by one of the wheel packages provided on PyPI *and* the pure Python wheel
269 that we provide is not used, it is substantially faster than a full source build
270 to install an uncompiled (slower) version of Cython with::
271
272 pip install Cython --install-option="--no-cython-compile"
273
274 .. _Pyrex: https://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/
275 """),
276 license='Apache-2.0',
277 classifiers=[
278 dev_status(version),
279 "Intended Audience :: Developers",
280 "License :: OSI Approved :: Apache Software License",
281 "Operating System :: OS Independent",
282 "Programming Language :: Python",
283 "Programming Language :: Python :: 3",
284 "Programming Language :: Python :: 3.7",
285 "Programming Language :: Python :: 3.8",
286 "Programming Language :: Python :: 3.9",
287 "Programming Language :: Python :: 3.10",
288 "Programming Language :: Python :: 3.11",
289 "Programming Language :: Python :: 3.12",
290 "Programming Language :: Python :: Implementation :: CPython",
291 "Programming Language :: Python :: Implementation :: PyPy",
292 "Programming Language :: C",
293 "Programming Language :: Cython",
294 "Topic :: Software Development :: Code Generators",
295 "Topic :: Software Development :: Compilers",
296 "Topic :: Software Development :: Libraries :: Python Modules"
297 ],
298 project_urls={
299 "Documentation": "https://cython.readthedocs.io/",
300 "Donate": "https://cython.readthedocs.io/en/latest/src/donating.html",
301 "Source Code": "https://github.com/cython/cython",
302 "Bug Tracker": "https://github.com/cython/cython/issues",
303 "User Group": "https://groups.google.com/g/cython-users",
304 },
305
306 scripts=scripts,
307 packages=packages,
308 py_modules=["cython"],
309 **setup_args
310 )
311
312
313 if __name__ == '__main__':
314 run_build()
315
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -263,13 +263,18 @@
This makes Cython the ideal language for writing glue code for external
C/C++ libraries, and for fast C modules that speed up the execution of
Python code.
-
+
+ The newest Cython release can always be downloaded from https://cython.org/.
+ Unpack the tarball or zip file, enter the directory, and then run::
+
+ pip install .
+
Note that for one-time builds, e.g. for CI/testing, on platforms that are not
covered by one of the wheel packages provided on PyPI *and* the pure Python wheel
that we provide is not used, it is substantially faster than a full source build
to install an uncompiled (slower) version of Cython with::
- pip install Cython --install-option="--no-cython-compile"
+ NO_CYTHON_COMPILE=true pip install .
.. _Pyrex: https://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/
"""),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -263,13 +263,18 @@\n This makes Cython the ideal language for writing glue code for external\n C/C++ libraries, and for fast C modules that speed up the execution of\n Python code.\n-\n+ \n+ The newest Cython release can always be downloaded from https://cython.org/. \n+ Unpack the tarball or zip file, enter the directory, and then run::\n+ \n+ pip install .\n+ \n Note that for one-time builds, e.g. for CI/testing, on platforms that are not\n covered by one of the wheel packages provided on PyPI *and* the pure Python wheel\n that we provide is not used, it is substantially faster than a full source build\n to install an uncompiled (slower) version of Cython with::\n \n- pip install Cython --install-option=\"--no-cython-compile\"\n+ NO_CYTHON_COMPILE=true pip install .\n \n .. _Pyrex: https://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/\n \"\"\"),\n", "issue": "Instructions on how to install Cython without compilation in project description are outdated\n### Describe your issue\n\nThe project description displayed on PyPI reports that Cython can be installed avoiding compilation running\r\n```\r\npip install Cython --install-option=\"--no-cython-compile\"\r\n```\r\nhowever, `pip` since version does not accept the `--install-option` anymore.\r\n\r\nEven when it did, I believe that the option name was `--install-options`.\r\n\r\nThe documentation reports the same instructions https://cython.readthedocs.io/en/latest/src/quickstart/install.html\n", "before_files": [{"content": "#!/usr/bin/env python\ntry:\n from setuptools import setup, Extension\nexcept ImportError:\n from distutils.core import setup, Extension\nimport os\nimport stat\nimport subprocess\nimport textwrap\nimport sys\n\nimport platform\nis_cpython = platform.python_implementation() == 'CPython'\n\n# this specifies which versions of python we support, pip >= 9 knows to skip\n# versions of packages which are not compatible with the running python\nPYTHON_REQUIRES = '>=3.7'\n\nif sys.platform == \"darwin\":\n # Don't create resource files on OS X tar.\n os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'\n os.environ['COPYFILE_DISABLE'] = 'true'\n\nsetup_args = {}\n\ndef add_command_class(name, cls):\n cmdclasses = setup_args.get('cmdclass', {})\n cmdclasses[name] = cls\n setup_args['cmdclass'] = cmdclasses\n\nfrom distutils.command.sdist import sdist as sdist_orig\nclass sdist(sdist_orig):\n def run(self):\n self.force_manifest = 1\n if (sys.platform != \"win32\" and\n os.path.isdir('.git')):\n assert os.system(\"git rev-parse --verify HEAD > .gitrev\") == 0\n sdist_orig.run(self)\nadd_command_class('sdist', sdist)\n\npxd_include_dirs = [\n directory for directory, dirs, files\n in os.walk(os.path.join('Cython', 'Includes'))\n if '__init__.pyx' in files or '__init__.pxd' in files\n or directory == os.path.join('Cython', 'Includes')]\n\npxd_include_patterns = [\n p+'/*.pxd' for p in pxd_include_dirs ] + [\n p+'/*.pyx' for p in pxd_include_dirs ]\n\nsetup_args['package_data'] = {\n 'Cython.Plex' : ['*.pxd'],\n 'Cython.Compiler' : ['*.pxd'],\n 'Cython.Runtime' : ['*.pyx', '*.pxd'],\n 'Cython.Utility' : ['*.pyx', '*.pxd', '*.c', '*.h', '*.cpp'],\n 'Cython' : [ p[7:] for p in pxd_include_patterns ],\n 'Cython.Debugger.Tests': ['codefile', 'cfuncs.c'],\n}\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['python_requires'] = PYTHON_REQUIRES\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': [\n 'cython = Cython.Compiler.Main:setuptools_main',\n 'cythonize = Cython.Build.Cythonize:main',\n 'cygdb = Cython.Debugger.Cygdb:main',\n ]\n }\n scripts = []\nelse:\n if os.name == \"posix\":\n scripts = [\"bin/cython\", \"bin/cythonize\", \"bin/cygdb\"]\n else:\n scripts = [\"cython.py\", \"cythonize.py\", \"cygdb.py\"]\n\n\ndef compile_cython_modules(profile=False, coverage=False, compile_minimal=False, compile_more=False, cython_with_refnanny=False):\n source_root = os.path.abspath(os.path.dirname(__file__))\n compiled_modules = [\n \"Cython.Plex.Actions\",\n \"Cython.Plex.Scanners\",\n \"Cython.Compiler.FlowControl\",\n \"Cython.Compiler.Scanning\",\n \"Cython.Compiler.Visitor\",\n \"Cython.Runtime.refnanny\",\n ]\n if not compile_minimal:\n compiled_modules.extend([\n \"Cython.Plex.Machines\",\n \"Cython.Plex.Transitions\",\n \"Cython.Plex.DFA\",\n \"Cython.Compiler.Code\",\n \"Cython.Compiler.FusedNode\",\n \"Cython.Compiler.Parsing\",\n \"Cython.Tempita._tempita\",\n \"Cython.StringIOTree\",\n \"Cython.Utils\",\n ])\n if compile_more and not compile_minimal:\n compiled_modules.extend([\n \"Cython.Compiler.Lexicon\",\n \"Cython.Compiler.Pythran\",\n \"Cython.Build.Dependencies\",\n \"Cython.Compiler.ParseTreeTransforms\",\n \"Cython.Compiler.Nodes\",\n \"Cython.Compiler.ExprNodes\",\n \"Cython.Compiler.ModuleNode\",\n \"Cython.Compiler.Optimize\",\n ])\n\n from distutils.spawn import find_executable\n from distutils.sysconfig import get_python_inc\n pgen = find_executable(\n 'pgen', os.pathsep.join([os.environ['PATH'], os.path.join(get_python_inc(), '..', 'Parser')]))\n if not pgen:\n sys.stderr.write(\"Unable to find pgen, not compiling formal grammar.\\n\")\n else:\n parser_dir = os.path.join(os.path.dirname(__file__), 'Cython', 'Parser')\n grammar = os.path.join(parser_dir, 'Grammar')\n subprocess.check_call([\n pgen,\n os.path.join(grammar),\n os.path.join(parser_dir, 'graminit.h'),\n os.path.join(parser_dir, 'graminit.c'),\n ])\n cst_pyx = os.path.join(parser_dir, 'ConcreteSyntaxTree.pyx')\n if os.stat(grammar)[stat.ST_MTIME] > os.stat(cst_pyx)[stat.ST_MTIME]:\n mtime = os.stat(grammar)[stat.ST_MTIME]\n os.utime(cst_pyx, (mtime, mtime))\n compiled_modules.extend([\n \"Cython.Parser.ConcreteSyntaxTree\",\n ])\n\n defines = []\n if cython_with_refnanny:\n defines.append(('CYTHON_REFNANNY', '1'))\n if coverage:\n defines.append(('CYTHON_TRACE', '1'))\n\n extensions = []\n for module in compiled_modules:\n source_file = os.path.join(source_root, *module.split('.'))\n pyx_source_file = source_file + \".py\"\n if not os.path.exists(pyx_source_file):\n pyx_source_file += \"x\" # .py -> .pyx\n\n dep_files = []\n if os.path.exists(source_file + '.pxd'):\n dep_files.append(source_file + '.pxd')\n\n extensions.append(Extension(\n module, sources=[pyx_source_file],\n define_macros=defines if '.refnanny' not in module else [],\n depends=dep_files))\n # XXX hack around setuptools quirk for '*.pyx' sources\n extensions[-1].sources[0] = pyx_source_file\n\n # optimise build parallelism by starting with the largest modules\n extensions.sort(key=lambda ext: os.path.getsize(ext.sources[0]), reverse=True)\n\n from Cython.Distutils.build_ext import build_ext\n from Cython.Compiler.Options import get_directive_defaults\n get_directive_defaults().update(\n language_level=2,\n binding=False,\n always_allow_keywords=False,\n autotestdict=False,\n )\n if profile:\n get_directive_defaults()['profile'] = True\n sys.stderr.write(\"Enabled profiling for the Cython binary modules\\n\")\n if coverage:\n get_directive_defaults()['linetrace'] = True\n sys.stderr.write(\"Enabled line tracing and profiling for the Cython binary modules\\n\")\n\n # not using cythonize() directly to let distutils decide whether building extensions was requested\n add_command_class(\"build_ext\", build_ext)\n setup_args['ext_modules'] = extensions\n\n\ndef check_option(name):\n cli_arg = \"--\" + name\n if cli_arg in sys.argv:\n sys.argv.remove(cli_arg)\n return True\n\n env_var = name.replace(\"-\", \"_\").upper()\n if os.environ.get(env_var) == \"true\":\n return True\n\n return False\n\n\ncython_profile = check_option('cython-profile')\ncython_coverage = check_option('cython-coverage')\ncython_with_refnanny = check_option('cython-with-refnanny')\n\ncompile_cython_itself = not check_option('no-cython-compile')\nif compile_cython_itself:\n cython_compile_more = check_option('cython-compile-all')\n cython_compile_minimal = check_option('cython-compile-minimal')\n\nsetup_args.update(setuptools_extra_args)\n\n\ndef dev_status(version: str):\n if 'b' in version or 'c' in version:\n # 1b1, 1beta1, 2rc1, ...\n return 'Development Status :: 4 - Beta'\n elif 'a' in version:\n # 1a1, 1alpha1, ...\n return 'Development Status :: 3 - Alpha'\n else:\n return 'Development Status :: 5 - Production/Stable'\n\n\npackages = [\n 'Cython',\n 'Cython.Build',\n 'Cython.Compiler',\n 'Cython.Runtime',\n 'Cython.Distutils',\n 'Cython.Debugger',\n 'Cython.Debugger.Tests',\n 'Cython.Plex',\n 'Cython.Tests',\n 'Cython.Build.Tests',\n 'Cython.Compiler.Tests',\n 'Cython.Utility',\n 'Cython.Tempita',\n 'pyximport',\n]\n\n\ndef run_build():\n if compile_cython_itself and (is_cpython or cython_compile_more or cython_compile_minimal):\n compile_cython_modules(cython_profile, cython_coverage, cython_compile_minimal, cython_compile_more, cython_with_refnanny)\n\n from Cython import __version__ as version\n setup(\n name='Cython',\n version=version,\n url='https://cython.org/',\n author='Robert Bradshaw, Stefan Behnel, Dag Seljebotn, Greg Ewing, et al.',\n author_email='[email protected]',\n description=\"The Cython compiler for writing C extensions in the Python language.\",\n long_description=textwrap.dedent(\"\"\"\\\n The Cython language makes writing C extensions for the Python language as\n easy as Python itself. Cython is a source code translator based on Pyrex_,\n but supports more cutting edge functionality and optimizations.\n\n The Cython language is a superset of the Python language (almost all Python\n code is also valid Cython code), but Cython additionally supports optional\n static typing to natively call C functions, operate with C++ classes and\n declare fast C types on variables and class attributes. This allows the\n compiler to generate very efficient C code from Cython code.\n\n This makes Cython the ideal language for writing glue code for external\n C/C++ libraries, and for fast C modules that speed up the execution of\n Python code.\n\n Note that for one-time builds, e.g. for CI/testing, on platforms that are not\n covered by one of the wheel packages provided on PyPI *and* the pure Python wheel\n that we provide is not used, it is substantially faster than a full source build\n to install an uncompiled (slower) version of Cython with::\n\n pip install Cython --install-option=\"--no-cython-compile\"\n\n .. _Pyrex: https://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/\n \"\"\"),\n license='Apache-2.0',\n classifiers=[\n dev_status(version),\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: C\",\n \"Programming Language :: Cython\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries :: Python Modules\"\n ],\n project_urls={\n \"Documentation\": \"https://cython.readthedocs.io/\",\n \"Donate\": \"https://cython.readthedocs.io/en/latest/src/donating.html\",\n \"Source Code\": \"https://github.com/cython/cython\",\n \"Bug Tracker\": \"https://github.com/cython/cython/issues\",\n \"User Group\": \"https://groups.google.com/g/cython-users\",\n },\n\n scripts=scripts,\n packages=packages,\n py_modules=[\"cython\"],\n **setup_args\n )\n\n\nif __name__ == '__main__':\n run_build()\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\ntry:\n from setuptools import setup, Extension\nexcept ImportError:\n from distutils.core import setup, Extension\nimport os\nimport stat\nimport subprocess\nimport textwrap\nimport sys\n\nimport platform\nis_cpython = platform.python_implementation() == 'CPython'\n\n# this specifies which versions of python we support, pip >= 9 knows to skip\n# versions of packages which are not compatible with the running python\nPYTHON_REQUIRES = '>=3.7'\n\nif sys.platform == \"darwin\":\n # Don't create resource files on OS X tar.\n os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'\n os.environ['COPYFILE_DISABLE'] = 'true'\n\nsetup_args = {}\n\ndef add_command_class(name, cls):\n cmdclasses = setup_args.get('cmdclass', {})\n cmdclasses[name] = cls\n setup_args['cmdclass'] = cmdclasses\n\nfrom distutils.command.sdist import sdist as sdist_orig\nclass sdist(sdist_orig):\n def run(self):\n self.force_manifest = 1\n if (sys.platform != \"win32\" and\n os.path.isdir('.git')):\n assert os.system(\"git rev-parse --verify HEAD > .gitrev\") == 0\n sdist_orig.run(self)\nadd_command_class('sdist', sdist)\n\npxd_include_dirs = [\n directory for directory, dirs, files\n in os.walk(os.path.join('Cython', 'Includes'))\n if '__init__.pyx' in files or '__init__.pxd' in files\n or directory == os.path.join('Cython', 'Includes')]\n\npxd_include_patterns = [\n p+'/*.pxd' for p in pxd_include_dirs ] + [\n p+'/*.pyx' for p in pxd_include_dirs ]\n\nsetup_args['package_data'] = {\n 'Cython.Plex' : ['*.pxd'],\n 'Cython.Compiler' : ['*.pxd'],\n 'Cython.Runtime' : ['*.pyx', '*.pxd'],\n 'Cython.Utility' : ['*.pyx', '*.pxd', '*.c', '*.h', '*.cpp'],\n 'Cython' : [ p[7:] for p in pxd_include_patterns ],\n 'Cython.Debugger.Tests': ['codefile', 'cfuncs.c'],\n}\n\n# This dict is used for passing extra arguments that are setuptools\n# specific to setup\nsetuptools_extra_args = {}\n\nif 'setuptools' in sys.modules:\n setuptools_extra_args['python_requires'] = PYTHON_REQUIRES\n setuptools_extra_args['zip_safe'] = False\n setuptools_extra_args['entry_points'] = {\n 'console_scripts': [\n 'cython = Cython.Compiler.Main:setuptools_main',\n 'cythonize = Cython.Build.Cythonize:main',\n 'cygdb = Cython.Debugger.Cygdb:main',\n ]\n }\n scripts = []\nelse:\n if os.name == \"posix\":\n scripts = [\"bin/cython\", \"bin/cythonize\", \"bin/cygdb\"]\n else:\n scripts = [\"cython.py\", \"cythonize.py\", \"cygdb.py\"]\n\n\ndef compile_cython_modules(profile=False, coverage=False, compile_minimal=False, compile_more=False, cython_with_refnanny=False):\n source_root = os.path.abspath(os.path.dirname(__file__))\n compiled_modules = [\n \"Cython.Plex.Actions\",\n \"Cython.Plex.Scanners\",\n \"Cython.Compiler.FlowControl\",\n \"Cython.Compiler.Scanning\",\n \"Cython.Compiler.Visitor\",\n \"Cython.Runtime.refnanny\",\n ]\n if not compile_minimal:\n compiled_modules.extend([\n \"Cython.Plex.Machines\",\n \"Cython.Plex.Transitions\",\n \"Cython.Plex.DFA\",\n \"Cython.Compiler.Code\",\n \"Cython.Compiler.FusedNode\",\n \"Cython.Compiler.Parsing\",\n \"Cython.Tempita._tempita\",\n \"Cython.StringIOTree\",\n \"Cython.Utils\",\n ])\n if compile_more and not compile_minimal:\n compiled_modules.extend([\n \"Cython.Compiler.Lexicon\",\n \"Cython.Compiler.Pythran\",\n \"Cython.Build.Dependencies\",\n \"Cython.Compiler.ParseTreeTransforms\",\n \"Cython.Compiler.Nodes\",\n \"Cython.Compiler.ExprNodes\",\n \"Cython.Compiler.ModuleNode\",\n \"Cython.Compiler.Optimize\",\n ])\n\n from distutils.spawn import find_executable\n from distutils.sysconfig import get_python_inc\n pgen = find_executable(\n 'pgen', os.pathsep.join([os.environ['PATH'], os.path.join(get_python_inc(), '..', 'Parser')]))\n if not pgen:\n sys.stderr.write(\"Unable to find pgen, not compiling formal grammar.\\n\")\n else:\n parser_dir = os.path.join(os.path.dirname(__file__), 'Cython', 'Parser')\n grammar = os.path.join(parser_dir, 'Grammar')\n subprocess.check_call([\n pgen,\n os.path.join(grammar),\n os.path.join(parser_dir, 'graminit.h'),\n os.path.join(parser_dir, 'graminit.c'),\n ])\n cst_pyx = os.path.join(parser_dir, 'ConcreteSyntaxTree.pyx')\n if os.stat(grammar)[stat.ST_MTIME] > os.stat(cst_pyx)[stat.ST_MTIME]:\n mtime = os.stat(grammar)[stat.ST_MTIME]\n os.utime(cst_pyx, (mtime, mtime))\n compiled_modules.extend([\n \"Cython.Parser.ConcreteSyntaxTree\",\n ])\n\n defines = []\n if cython_with_refnanny:\n defines.append(('CYTHON_REFNANNY', '1'))\n if coverage:\n defines.append(('CYTHON_TRACE', '1'))\n\n extensions = []\n for module in compiled_modules:\n source_file = os.path.join(source_root, *module.split('.'))\n pyx_source_file = source_file + \".py\"\n if not os.path.exists(pyx_source_file):\n pyx_source_file += \"x\" # .py -> .pyx\n\n dep_files = []\n if os.path.exists(source_file + '.pxd'):\n dep_files.append(source_file + '.pxd')\n\n extensions.append(Extension(\n module, sources=[pyx_source_file],\n define_macros=defines if '.refnanny' not in module else [],\n depends=dep_files))\n # XXX hack around setuptools quirk for '*.pyx' sources\n extensions[-1].sources[0] = pyx_source_file\n\n # optimise build parallelism by starting with the largest modules\n extensions.sort(key=lambda ext: os.path.getsize(ext.sources[0]), reverse=True)\n\n from Cython.Distutils.build_ext import build_ext\n from Cython.Compiler.Options import get_directive_defaults\n get_directive_defaults().update(\n language_level=2,\n binding=False,\n always_allow_keywords=False,\n autotestdict=False,\n )\n if profile:\n get_directive_defaults()['profile'] = True\n sys.stderr.write(\"Enabled profiling for the Cython binary modules\\n\")\n if coverage:\n get_directive_defaults()['linetrace'] = True\n sys.stderr.write(\"Enabled line tracing and profiling for the Cython binary modules\\n\")\n\n # not using cythonize() directly to let distutils decide whether building extensions was requested\n add_command_class(\"build_ext\", build_ext)\n setup_args['ext_modules'] = extensions\n\n\ndef check_option(name):\n cli_arg = \"--\" + name\n if cli_arg in sys.argv:\n sys.argv.remove(cli_arg)\n return True\n\n env_var = name.replace(\"-\", \"_\").upper()\n if os.environ.get(env_var) == \"true\":\n return True\n\n return False\n\n\ncython_profile = check_option('cython-profile')\ncython_coverage = check_option('cython-coverage')\ncython_with_refnanny = check_option('cython-with-refnanny')\n\ncompile_cython_itself = not check_option('no-cython-compile')\nif compile_cython_itself:\n cython_compile_more = check_option('cython-compile-all')\n cython_compile_minimal = check_option('cython-compile-minimal')\n\nsetup_args.update(setuptools_extra_args)\n\n\ndef dev_status(version: str):\n if 'b' in version or 'c' in version:\n # 1b1, 1beta1, 2rc1, ...\n return 'Development Status :: 4 - Beta'\n elif 'a' in version:\n # 1a1, 1alpha1, ...\n return 'Development Status :: 3 - Alpha'\n else:\n return 'Development Status :: 5 - Production/Stable'\n\n\npackages = [\n 'Cython',\n 'Cython.Build',\n 'Cython.Compiler',\n 'Cython.Runtime',\n 'Cython.Distutils',\n 'Cython.Debugger',\n 'Cython.Debugger.Tests',\n 'Cython.Plex',\n 'Cython.Tests',\n 'Cython.Build.Tests',\n 'Cython.Compiler.Tests',\n 'Cython.Utility',\n 'Cython.Tempita',\n 'pyximport',\n]\n\n\ndef run_build():\n if compile_cython_itself and (is_cpython or cython_compile_more or cython_compile_minimal):\n compile_cython_modules(cython_profile, cython_coverage, cython_compile_minimal, cython_compile_more, cython_with_refnanny)\n\n from Cython import __version__ as version\n setup(\n name='Cython',\n version=version,\n url='https://cython.org/',\n author='Robert Bradshaw, Stefan Behnel, Dag Seljebotn, Greg Ewing, et al.',\n author_email='[email protected]',\n description=\"The Cython compiler for writing C extensions in the Python language.\",\n long_description=textwrap.dedent(\"\"\"\\\n The Cython language makes writing C extensions for the Python language as\n easy as Python itself. Cython is a source code translator based on Pyrex_,\n but supports more cutting edge functionality and optimizations.\n\n The Cython language is a superset of the Python language (almost all Python\n code is also valid Cython code), but Cython additionally supports optional\n static typing to natively call C functions, operate with C++ classes and\n declare fast C types on variables and class attributes. This allows the\n compiler to generate very efficient C code from Cython code.\n\n This makes Cython the ideal language for writing glue code for external\n C/C++ libraries, and for fast C modules that speed up the execution of\n Python code.\n \n The newest Cython release can always be downloaded from https://cython.org/. \n Unpack the tarball or zip file, enter the directory, and then run::\n \n pip install .\n \n Note that for one-time builds, e.g. for CI/testing, on platforms that are not\n covered by one of the wheel packages provided on PyPI *and* the pure Python wheel\n that we provide is not used, it is substantially faster than a full source build\n to install an uncompiled (slower) version of Cython with::\n\n NO_CYTHON_COMPILE=true pip install .\n\n .. _Pyrex: https://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/\n \"\"\"),\n license='Apache-2.0',\n classifiers=[\n dev_status(version),\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: C\",\n \"Programming Language :: Cython\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries :: Python Modules\"\n ],\n project_urls={\n \"Documentation\": \"https://cython.readthedocs.io/\",\n \"Donate\": \"https://cython.readthedocs.io/en/latest/src/donating.html\",\n \"Source Code\": \"https://github.com/cython/cython\",\n \"Bug Tracker\": \"https://github.com/cython/cython/issues\",\n \"User Group\": \"https://groups.google.com/g/cython-users\",\n },\n\n scripts=scripts,\n packages=packages,\n py_modules=[\"cython\"],\n **setup_args\n )\n\n\nif __name__ == '__main__':\n run_build()\n", "path": "setup.py"}]} | 3,963 | 254 |
gh_patches_debug_3494 | rasdani/github-patches | git_diff | wagtail__wagtail-10939 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Getting a "cannot pickle" error in Wagtail 5.1 when when using a custom image model
<!--
Found a bug? Please fill out the sections below. 👍
-->
### Issue Summary
I recently upgraded four small sites to Wagtail 5.1. None of these sites have ever used caching, and each site uses a custom image model. On all but one site I started getting the error: ` cannot pickle '_io.BufferedReader' object` every place an `{% image ... %}` tag is used. All of these sites have been working fine for several years. (As an aside, I see that in `Image.get_rendition()`, a `try...except` was removed in 5.1 that would have probably been masking any caching-related errors that might have occurred before 5.1.) The pickling error occurs both when caching is absent and when caching (Redis) is used. The `'_io.BufferedReader' object` error occurs in Django's `locmem.py` caching when no cache is specified in the settings, and a different pickling error message occurs when Redis is used, but in both cases pickling is the problem.
To test from the command line, I inserted a print statement in `locmem.py` to print out the pickled value. I then looped through the images calling `get_rendition()`. All pickled values printed correctly. To test a specific image from the command line, I uploaded a new image to the `logo_admin` field described below and did:
```
r=CustomRendition.objects.get(pk=63)
r.image.get_rendition(r.image_spec)
```
The pickled value was printed correctly. However, when this exact same image/rendition was called from an `{% image ... %}` tag in the admin as described below, the pickling error happens. The `CustomImage(AbstractImage)/CustomRendition(AbstractRendition)` definitions are the same in each of these projects and are set up [as described in the docs](https://docs.wagtail.org/en/latest/advanced_topics/images/custom_image_model.html#custom-image-models).
The easiest way to test is from the admin as described above and in **Steps to Reproduce** below. I also tried:
- deleting all existing renditions
- uploading a new image to the `logo_admin` field shown below. Again, when I comment out the `{% image settings.main.SiteSettings.logo_admin width-150 %}` line shown below, the admin comes up fine. When I uncomment it, the error happens again. The error happens with both `png` and `jpg` images.
### Main Question
How could the exact same rendition be pickled fine when called from the admin but encounter a pickling error when called from an `{% image ... %}` tag??
### Steps to Reproduce
Use a `CustomImage` model and set up the following `SiteSettings` model:
```
@register_setting
class SiteSettings(BaseSiteSetting):
logo_admin = models.OneToOneField(settings.WAGTAILIMAGES_IMAGE_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
```
Render the following from the `base.html` template in `wagtailadmin`:
```
{% extends "wagtailadmin/base.html" %}
{% load wagtailimages_tags %}
{% block branding_logo %}
{% image settings.main.SiteSettings.logo_admin width-150 %}
{% endblock %}
```
- I have NOT confirmed that this issue can be reproduced as described on a fresh Wagtail project.
### Technical details
- Python version: 3.8
- Django version: 4.2.5
- Wagtail version: 5.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/contrib/settings/models.py`
Content:
```
1 from django.db import models
2 from django.utils.functional import cached_property
3 from django.utils.translation import gettext as _
4
5 from wagtail.coreutils import InvokeViaAttributeShortcut
6 from wagtail.models import Site
7
8 from .registry import register_setting
9
10 __all__ = [
11 "BaseGenericSetting",
12 "BaseSiteSetting",
13 "register_setting",
14 ]
15
16
17 class AbstractSetting(models.Model):
18 """
19 The abstract base model for settings. Subclasses must be registered using
20 :func:`~wagtail.contrib.settings.registry.register_setting`
21 """
22
23 class Meta:
24 abstract = True
25
26 # Override to fetch ForeignKey values in the same query when
27 # retrieving settings (e.g. via `for_request()`)
28 select_related = None
29
30 @classmethod
31 def base_queryset(cls):
32 """
33 Returns a queryset of objects of this type to use as a base.
34
35 You can use the `select_related` attribute on your class to
36 specify a list of foreign key field names, which the method
37 will attempt to select additional related-object data for
38 when the query is executed.
39
40 If your needs are more complex than this, you can override
41 this method on your custom class.
42 """
43 queryset = cls.objects.all()
44 if cls.select_related is not None:
45 queryset = queryset.select_related(*cls.select_related)
46 return queryset
47
48 @classmethod
49 def get_cache_attr_name(cls):
50 """
51 Returns the name of the attribute that should be used to store
52 a reference to the fetched/created object on a request.
53 """
54 return f"_{cls._meta.app_label}.{cls._meta.model_name}".lower()
55
56 def __init__(self, *args, **kwargs):
57 super().__init__(*args, **kwargs)
58 # Per-instance page URL cache
59 self._page_url_cache = {}
60
61 @cached_property
62 def page_url(self):
63 # Allows get_page_url() to be invoked using
64 # `obj.page_url.foreign_key_name` syntax
65 return InvokeViaAttributeShortcut(self, "get_page_url")
66
67 def get_page_url(self, attribute_name, request=None):
68 """
69 Returns the URL of a page referenced by a foreign key
70 (or other attribute) matching the name ``attribute_name``.
71 If the field value is null, or links to something other
72 than a ``Page`` object, an empty string is returned.
73 The result is also cached per-object to facilitate
74 fast repeat access.
75
76 Raises an ``AttributeError`` if the object has no such
77 field or attribute.
78 """
79 if attribute_name in self._page_url_cache:
80 return self._page_url_cache[attribute_name]
81
82 if not hasattr(self, attribute_name):
83 raise AttributeError(
84 "'{}' object has no attribute '{}'".format(
85 self.__class__.__name__, attribute_name
86 )
87 )
88
89 page = getattr(self, attribute_name)
90
91 if hasattr(page, "specific"):
92 url = page.specific.get_url(getattr(self, "_request", None))
93 else:
94 url = ""
95
96 self._page_url_cache[attribute_name] = url
97 return url
98
99 def __getstate__(self):
100 # Ignore 'page_url' when pickling
101 state = super().__getstate__()
102 state.pop("page_url", None)
103 return state
104
105
106 class BaseSiteSetting(AbstractSetting):
107 site = models.OneToOneField(
108 Site,
109 unique=True,
110 db_index=True,
111 editable=False,
112 on_delete=models.CASCADE,
113 )
114
115 class Meta:
116 abstract = True
117
118 @classmethod
119 def for_request(cls, request):
120 """
121 Get or create an instance of this model for the request,
122 and cache the result on the request for faster repeat access.
123 """
124 attr_name = cls.get_cache_attr_name()
125 if hasattr(request, attr_name):
126 return getattr(request, attr_name)
127 site = Site.find_for_request(request)
128 site_settings = cls.for_site(site)
129 # to allow more efficient page url generation
130 site_settings._request = request
131 setattr(request, attr_name, site_settings)
132 return site_settings
133
134 @classmethod
135 def for_site(cls, site):
136 """
137 Get or create an instance of this setting for the site.
138 """
139 queryset = cls.base_queryset()
140 instance, created = queryset.get_or_create(site=site)
141 return instance
142
143 def __str__(self):
144 return _("%(site_setting)s for %(site)s") % {
145 "site_setting": self._meta.verbose_name,
146 "site": self.site,
147 }
148
149
150 class BaseGenericSetting(AbstractSetting):
151 """
152 Generic settings are singleton models - only one instance of each model
153 can be created.
154 """
155
156 class Meta:
157 abstract = True
158
159 @classmethod
160 def _get_or_create(cls):
161 """
162 Internal convenience method to get or create the first instance.
163
164 We cannot hardcode `pk=1`, for example, as not all database backends
165 use sequential IDs (e.g. Postgres).
166 """
167
168 first_obj = cls.base_queryset().first()
169 if first_obj is None:
170 return cls.objects.create()
171 return first_obj
172
173 @classmethod
174 def load(cls, request_or_site=None):
175 """
176 Get or create an instance of this model. There is only ever one
177 instance of models inheriting from `AbstractSetting` so we can
178 use `pk=1`.
179
180 If `request_or_site` is present and is a request object, then we cache
181 the result on the request for faster repeat access.
182 """
183
184 # We can only cache on the request, so if there is no request then
185 # we know there's nothing in the cache.
186 if request_or_site is None or isinstance(request_or_site, Site):
187 return cls._get_or_create()
188
189 # Check if we already have this in the cache and return it if so.
190 attr_name = cls.get_cache_attr_name()
191 if hasattr(request_or_site, attr_name):
192 return getattr(request_or_site, attr_name)
193
194 obj = cls._get_or_create()
195
196 # Cache for next time.
197 setattr(request_or_site, attr_name, obj)
198
199 return obj
200
201 def __str__(self):
202 return str(self._meta.verbose_name)
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/contrib/settings/models.py b/wagtail/contrib/settings/models.py
--- a/wagtail/contrib/settings/models.py
+++ b/wagtail/contrib/settings/models.py
@@ -131,6 +131,12 @@
setattr(request, attr_name, site_settings)
return site_settings
+ def __getstate__(self):
+ # Leave out _request from the pickled state
+ state = super().__getstate__()
+ state.pop("_request", None)
+ return state
+
@classmethod
def for_site(cls, site):
"""
| {"golden_diff": "diff --git a/wagtail/contrib/settings/models.py b/wagtail/contrib/settings/models.py\n--- a/wagtail/contrib/settings/models.py\n+++ b/wagtail/contrib/settings/models.py\n@@ -131,6 +131,12 @@\n setattr(request, attr_name, site_settings)\n return site_settings\n \n+ def __getstate__(self):\n+ # Leave out _request from the pickled state\n+ state = super().__getstate__()\n+ state.pop(\"_request\", None)\n+ return state\n+\n @classmethod\n def for_site(cls, site):\n \"\"\"\n", "issue": "Getting a \"cannot pickle\" error in Wagtail 5.1 when when using a custom image model\n<!--\r\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n-->\r\n\r\n### Issue Summary\r\n\r\nI recently upgraded four small sites to Wagtail 5.1. None of these sites have ever used caching, and each site uses a custom image model. On all but one site I started getting the error: ` cannot pickle '_io.BufferedReader' object` every place an `{% image ... %}` tag is used. All of these sites have been working fine for several years. (As an aside, I see that in `Image.get_rendition()`, a `try...except` was removed in 5.1 that would have probably been masking any caching-related errors that might have occurred before 5.1.) The pickling error occurs both when caching is absent and when caching (Redis) is used. The `'_io.BufferedReader' object` error occurs in Django's `locmem.py` caching when no cache is specified in the settings, and a different pickling error message occurs when Redis is used, but in both cases pickling is the problem.\r\n\r\nTo test from the command line, I inserted a print statement in `locmem.py` to print out the pickled value. I then looped through the images calling `get_rendition()`. All pickled values printed correctly. To test a specific image from the command line, I uploaded a new image to the `logo_admin` field described below and did:\r\n```\r\nr=CustomRendition.objects.get(pk=63)\r\nr.image.get_rendition(r.image_spec)\r\n```\r\nThe pickled value was printed correctly. However, when this exact same image/rendition was called from an `{% image ... %}` tag in the admin as described below, the pickling error happens. The `CustomImage(AbstractImage)/CustomRendition(AbstractRendition)` definitions are the same in each of these projects and are set up [as described in the docs](https://docs.wagtail.org/en/latest/advanced_topics/images/custom_image_model.html#custom-image-models).\r\n\r\nThe easiest way to test is from the admin as described above and in **Steps to Reproduce** below. I also tried:\r\n- deleting all existing renditions\r\n- uploading a new image to the `logo_admin` field shown below. Again, when I comment out the `{% image settings.main.SiteSettings.logo_admin width-150 %}` line shown below, the admin comes up fine. When I uncomment it, the error happens again. The error happens with both `png` and `jpg` images.\r\n\r\n### Main Question\r\nHow could the exact same rendition be pickled fine when called from the admin but encounter a pickling error when called from an `{% image ... %}` tag??\r\n\r\n### Steps to Reproduce\r\n\r\nUse a `CustomImage` model and set up the following `SiteSettings` model:\r\n\r\n```\r\n@register_setting\r\nclass SiteSettings(BaseSiteSetting):\r\n logo_admin = models.OneToOneField(settings.WAGTAILIMAGES_IMAGE_MODEL, null=True, blank=True, on_delete=models.SET_NULL, related_name='+')\r\n```\r\n\r\nRender the following from the `base.html` template in `wagtailadmin`:\r\n\r\n```\r\n{% extends \"wagtailadmin/base.html\" %}\r\n{% load wagtailimages_tags %}\r\n\r\n{% block branding_logo %}\r\n {% image settings.main.SiteSettings.logo_admin width-150 %}\r\n{% endblock %}\r\n```\r\n\r\n- I have NOT confirmed that this issue can be reproduced as described on a fresh Wagtail project.\r\n\r\n### Technical details\r\n\r\n- Python version: 3.8\r\n- Django version: 4.2.5\r\n- Wagtail version: 5.1\r\n\n", "before_files": [{"content": "from django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext as _\n\nfrom wagtail.coreutils import InvokeViaAttributeShortcut\nfrom wagtail.models import Site\n\nfrom .registry import register_setting\n\n__all__ = [\n \"BaseGenericSetting\",\n \"BaseSiteSetting\",\n \"register_setting\",\n]\n\n\nclass AbstractSetting(models.Model):\n \"\"\"\n The abstract base model for settings. Subclasses must be registered using\n :func:`~wagtail.contrib.settings.registry.register_setting`\n \"\"\"\n\n class Meta:\n abstract = True\n\n # Override to fetch ForeignKey values in the same query when\n # retrieving settings (e.g. via `for_request()`)\n select_related = None\n\n @classmethod\n def base_queryset(cls):\n \"\"\"\n Returns a queryset of objects of this type to use as a base.\n\n You can use the `select_related` attribute on your class to\n specify a list of foreign key field names, which the method\n will attempt to select additional related-object data for\n when the query is executed.\n\n If your needs are more complex than this, you can override\n this method on your custom class.\n \"\"\"\n queryset = cls.objects.all()\n if cls.select_related is not None:\n queryset = queryset.select_related(*cls.select_related)\n return queryset\n\n @classmethod\n def get_cache_attr_name(cls):\n \"\"\"\n Returns the name of the attribute that should be used to store\n a reference to the fetched/created object on a request.\n \"\"\"\n return f\"_{cls._meta.app_label}.{cls._meta.model_name}\".lower()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Per-instance page URL cache\n self._page_url_cache = {}\n\n @cached_property\n def page_url(self):\n # Allows get_page_url() to be invoked using\n # `obj.page_url.foreign_key_name` syntax\n return InvokeViaAttributeShortcut(self, \"get_page_url\")\n\n def get_page_url(self, attribute_name, request=None):\n \"\"\"\n Returns the URL of a page referenced by a foreign key\n (or other attribute) matching the name ``attribute_name``.\n If the field value is null, or links to something other\n than a ``Page`` object, an empty string is returned.\n The result is also cached per-object to facilitate\n fast repeat access.\n\n Raises an ``AttributeError`` if the object has no such\n field or attribute.\n \"\"\"\n if attribute_name in self._page_url_cache:\n return self._page_url_cache[attribute_name]\n\n if not hasattr(self, attribute_name):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n self.__class__.__name__, attribute_name\n )\n )\n\n page = getattr(self, attribute_name)\n\n if hasattr(page, \"specific\"):\n url = page.specific.get_url(getattr(self, \"_request\", None))\n else:\n url = \"\"\n\n self._page_url_cache[attribute_name] = url\n return url\n\n def __getstate__(self):\n # Ignore 'page_url' when pickling\n state = super().__getstate__()\n state.pop(\"page_url\", None)\n return state\n\n\nclass BaseSiteSetting(AbstractSetting):\n site = models.OneToOneField(\n Site,\n unique=True,\n db_index=True,\n editable=False,\n on_delete=models.CASCADE,\n )\n\n class Meta:\n abstract = True\n\n @classmethod\n def for_request(cls, request):\n \"\"\"\n Get or create an instance of this model for the request,\n and cache the result on the request for faster repeat access.\n \"\"\"\n attr_name = cls.get_cache_attr_name()\n if hasattr(request, attr_name):\n return getattr(request, attr_name)\n site = Site.find_for_request(request)\n site_settings = cls.for_site(site)\n # to allow more efficient page url generation\n site_settings._request = request\n setattr(request, attr_name, site_settings)\n return site_settings\n\n @classmethod\n def for_site(cls, site):\n \"\"\"\n Get or create an instance of this setting for the site.\n \"\"\"\n queryset = cls.base_queryset()\n instance, created = queryset.get_or_create(site=site)\n return instance\n\n def __str__(self):\n return _(\"%(site_setting)s for %(site)s\") % {\n \"site_setting\": self._meta.verbose_name,\n \"site\": self.site,\n }\n\n\nclass BaseGenericSetting(AbstractSetting):\n \"\"\"\n Generic settings are singleton models - only one instance of each model\n can be created.\n \"\"\"\n\n class Meta:\n abstract = True\n\n @classmethod\n def _get_or_create(cls):\n \"\"\"\n Internal convenience method to get or create the first instance.\n\n We cannot hardcode `pk=1`, for example, as not all database backends\n use sequential IDs (e.g. Postgres).\n \"\"\"\n\n first_obj = cls.base_queryset().first()\n if first_obj is None:\n return cls.objects.create()\n return first_obj\n\n @classmethod\n def load(cls, request_or_site=None):\n \"\"\"\n Get or create an instance of this model. There is only ever one\n instance of models inheriting from `AbstractSetting` so we can\n use `pk=1`.\n\n If `request_or_site` is present and is a request object, then we cache\n the result on the request for faster repeat access.\n \"\"\"\n\n # We can only cache on the request, so if there is no request then\n # we know there's nothing in the cache.\n if request_or_site is None or isinstance(request_or_site, Site):\n return cls._get_or_create()\n\n # Check if we already have this in the cache and return it if so.\n attr_name = cls.get_cache_attr_name()\n if hasattr(request_or_site, attr_name):\n return getattr(request_or_site, attr_name)\n\n obj = cls._get_or_create()\n\n # Cache for next time.\n setattr(request_or_site, attr_name, obj)\n\n return obj\n\n def __str__(self):\n return str(self._meta.verbose_name)\n", "path": "wagtail/contrib/settings/models.py"}], "after_files": [{"content": "from django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext as _\n\nfrom wagtail.coreutils import InvokeViaAttributeShortcut\nfrom wagtail.models import Site\n\nfrom .registry import register_setting\n\n__all__ = [\n \"BaseGenericSetting\",\n \"BaseSiteSetting\",\n \"register_setting\",\n]\n\n\nclass AbstractSetting(models.Model):\n \"\"\"\n The abstract base model for settings. Subclasses must be registered using\n :func:`~wagtail.contrib.settings.registry.register_setting`\n \"\"\"\n\n class Meta:\n abstract = True\n\n # Override to fetch ForeignKey values in the same query when\n # retrieving settings (e.g. via `for_request()`)\n select_related = None\n\n @classmethod\n def base_queryset(cls):\n \"\"\"\n Returns a queryset of objects of this type to use as a base.\n\n You can use the `select_related` attribute on your class to\n specify a list of foreign key field names, which the method\n will attempt to select additional related-object data for\n when the query is executed.\n\n If your needs are more complex than this, you can override\n this method on your custom class.\n \"\"\"\n queryset = cls.objects.all()\n if cls.select_related is not None:\n queryset = queryset.select_related(*cls.select_related)\n return queryset\n\n @classmethod\n def get_cache_attr_name(cls):\n \"\"\"\n Returns the name of the attribute that should be used to store\n a reference to the fetched/created object on a request.\n \"\"\"\n return f\"_{cls._meta.app_label}.{cls._meta.model_name}\".lower()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Per-instance page URL cache\n self._page_url_cache = {}\n\n @cached_property\n def page_url(self):\n # Allows get_page_url() to be invoked using\n # `obj.page_url.foreign_key_name` syntax\n return InvokeViaAttributeShortcut(self, \"get_page_url\")\n\n def get_page_url(self, attribute_name, request=None):\n \"\"\"\n Returns the URL of a page referenced by a foreign key\n (or other attribute) matching the name ``attribute_name``.\n If the field value is null, or links to something other\n than a ``Page`` object, an empty string is returned.\n The result is also cached per-object to facilitate\n fast repeat access.\n\n Raises an ``AttributeError`` if the object has no such\n field or attribute.\n \"\"\"\n if attribute_name in self._page_url_cache:\n return self._page_url_cache[attribute_name]\n\n if not hasattr(self, attribute_name):\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n self.__class__.__name__, attribute_name\n )\n )\n\n page = getattr(self, attribute_name)\n\n if hasattr(page, \"specific\"):\n url = page.specific.get_url(getattr(self, \"_request\", None))\n else:\n url = \"\"\n\n self._page_url_cache[attribute_name] = url\n return url\n\n def __getstate__(self):\n # Ignore 'page_url' when pickling\n state = super().__getstate__()\n state.pop(\"page_url\", None)\n return state\n\n\nclass BaseSiteSetting(AbstractSetting):\n site = models.OneToOneField(\n Site,\n unique=True,\n db_index=True,\n editable=False,\n on_delete=models.CASCADE,\n )\n\n class Meta:\n abstract = True\n\n @classmethod\n def for_request(cls, request):\n \"\"\"\n Get or create an instance of this model for the request,\n and cache the result on the request for faster repeat access.\n \"\"\"\n attr_name = cls.get_cache_attr_name()\n if hasattr(request, attr_name):\n return getattr(request, attr_name)\n site = Site.find_for_request(request)\n site_settings = cls.for_site(site)\n # to allow more efficient page url generation\n site_settings._request = request\n setattr(request, attr_name, site_settings)\n return site_settings\n\n def __getstate__(self):\n # Leave out _request from the pickled state\n state = super().__getstate__()\n state.pop(\"_request\", None)\n return state\n\n @classmethod\n def for_site(cls, site):\n \"\"\"\n Get or create an instance of this setting for the site.\n \"\"\"\n queryset = cls.base_queryset()\n instance, created = queryset.get_or_create(site=site)\n return instance\n\n def __str__(self):\n return _(\"%(site_setting)s for %(site)s\") % {\n \"site_setting\": self._meta.verbose_name,\n \"site\": self.site,\n }\n\n\nclass BaseGenericSetting(AbstractSetting):\n \"\"\"\n Generic settings are singleton models - only one instance of each model\n can be created.\n \"\"\"\n\n class Meta:\n abstract = True\n\n @classmethod\n def _get_or_create(cls):\n \"\"\"\n Internal convenience method to get or create the first instance.\n\n We cannot hardcode `pk=1`, for example, as not all database backends\n use sequential IDs (e.g. Postgres).\n \"\"\"\n\n first_obj = cls.base_queryset().first()\n if first_obj is None:\n return cls.objects.create()\n return first_obj\n\n @classmethod\n def load(cls, request_or_site=None):\n \"\"\"\n Get or create an instance of this model. There is only ever one\n instance of models inheriting from `AbstractSetting` so we can\n use `pk=1`.\n\n If `request_or_site` is present and is a request object, then we cache\n the result on the request for faster repeat access.\n \"\"\"\n\n # We can only cache on the request, so if there is no request then\n # we know there's nothing in the cache.\n if request_or_site is None or isinstance(request_or_site, Site):\n return cls._get_or_create()\n\n # Check if we already have this in the cache and return it if so.\n attr_name = cls.get_cache_attr_name()\n if hasattr(request_or_site, attr_name):\n return getattr(request_or_site, attr_name)\n\n obj = cls._get_or_create()\n\n # Cache for next time.\n setattr(request_or_site, attr_name, obj)\n\n return obj\n\n def __str__(self):\n return str(self._meta.verbose_name)\n", "path": "wagtail/contrib/settings/models.py"}]} | 2,921 | 134 |
gh_patches_debug_40127 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1884 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Barnsley MBC. Fails when running with collection date as today
### I Have A Problem With:
A specific source
### What's Your Problem
Barnsley Council source does not work when the bin collection date is 'today'
This attempted to parse the date and then fails.
It looks like something with a check if the date is 'today' could resolve this.
```
if heading[0].text == "Today":
bin_date = datetime.today().date()
else:
bin_date = datetime.strptime(heading[0].text, "%A, %B %d, %Y").date()
```
### Source (if relevant)
barnsley_gov_uk
### Logs
```Shell
fetch failed for source Barnsley Metropolitan Borough Council: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py", line 83, in fetch bin_date = datetime.strptime(heading[0].text, "%A, %B %d, %Y").date() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.12/_strptime.py", line 333, in _strptime raise ValueError("time data %r does not match format %r" % ValueError: time data 'Today' does not match format '%A, %B %d, %Y'
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: barnsley_gov_uk
args:
postcode: S70 3QU
uprn: '100050607581'
calendar_title: Waste Collection
day_switch_time: '12:00'
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py`
Content:
```
1 # Credit where it's due:
2 # This is predominantly a refactoring of the Bristol City Council script from the UKBinCollectionData repo
3 # https://github.com/robbrad/UKBinCollectionData
4
5
6 from datetime import datetime
7
8 import requests
9 from bs4 import BeautifulSoup
10 from waste_collection_schedule import Collection # type: ignore[attr-defined]
11
12 TITLE = "Barnsley Metropolitan Borough Council"
13 DESCRIPTION = "Source for Barnsley Metropolitan Borough Council."
14 URL = "https://barnsley.gov.uk"
15 TEST_CASES = {
16 "S71 1EE 100050671689": {"postcode": "S71 1EE", "uprn": 100050671689},
17 "S75 1QF 10032783992": {"postcode": "S75 1QF", "uprn": "10032783992"},
18 }
19
20
21 ICON_MAP = {
22 "grey": "mdi:trash-can",
23 "green": "mdi:leaf",
24 "blue": "mdi:package-variant",
25 "brown": "mdi:recycle",
26 }
27
28
29 API_URL = "https://waste.barnsley.gov.uk/ViewCollection/SelectAddress"
30
31
32 class Source:
33 def __init__(self, postcode: str, uprn: str | int):
34 self._postcode: str = postcode
35 self._uprn: str | int = uprn
36
37 def fetch(self):
38 entries = []
39
40 # Pass in form data and make the POST request
41 headers = {
42 "authority": "waste.barnsley.gov.uk",
43 "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
44 "accept-language": "en-GB,en;q=0.9",
45 "cache-control": "no-cache",
46 "content-type": "application/x-www-form-urlencoded",
47 "origin": "https://waste.barnsley.gov.uk",
48 "pragma": "no-cache",
49 "referer": "https://waste.barnsley.gov.uk/ViewCollection/SelectAddress",
50 "sec-ch-ua": '"Chromium";v="118", "Opera GX";v="104", "Not=A?Brand";v="99"',
51 "sec-ch-ua-mobile": "?0",
52 "sec-ch-ua-platform": '"Windows"',
53 "sec-fetch-dest": "document",
54 "sec-fetch-mode": "navigate",
55 "sec-fetch-site": "same-origin",
56 "sec-fetch-user": "?1",
57 "upgrade-insecure-requests": "1",
58 "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.5993.118 Safari/537.36",
59 }
60 form_data = {
61 "personInfo.person1.HouseNumberOrName": "",
62 "personInfo.person1.Postcode": f"{self._postcode}",
63 "personInfo.person1.UPRN": f"{self._uprn}",
64 "person1_SelectAddress": "Select address",
65 }
66 response = requests.post(
67 "https://waste.barnsley.gov.uk/ViewCollection/SelectAddress",
68 headers=headers,
69 data=form_data,
70 )
71 soup = BeautifulSoup(response.text, features="html.parser")
72 soup.prettify()
73
74 if response.status_code != 200:
75 raise ConnectionRefusedError(
76 "Error getting results from website! Please open an issue on GitHub!"
77 )
78
79 # Parse the response, getting the top box first and then tabled collections after
80 results = soup.find("div", {"class": "panel"}).find_all("fieldset")[0:2]
81 heading = results[0].find_all("p")[1:3]
82 bin_text = heading[1].text.strip() + " bin"
83 bin_date = datetime.strptime(heading[0].text, "%A, %B %d, %Y").date()
84 entries.append(
85 Collection(
86 t=bin_text,
87 date=bin_date,
88 icon=ICON_MAP.get(bin_text.split(" ")[0].lower()),
89 )
90 )
91
92 results_table = [row for row in results[1].find_all("tbody")[0] if row != "\n"]
93 for row in results_table:
94 text_list = [item.text.strip() for item in row.contents if item != "\n"]
95 bin_text = text_list[1] + " bin"
96 bin_date = datetime.strptime(text_list[0], "%A, %B %d, %Y").date()
97 entries.append(
98 Collection(
99 t=bin_text,
100 date=bin_date,
101 icon=ICON_MAP.get(bin_text.split(" ")[0].lower()),
102 )
103 )
104
105 return entries
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py
@@ -3,7 +3,7 @@
# https://github.com/robbrad/UKBinCollectionData
-from datetime import datetime
+from datetime import date, datetime
import requests
from bs4 import BeautifulSoup
@@ -15,6 +15,7 @@
TEST_CASES = {
"S71 1EE 100050671689": {"postcode": "S71 1EE", "uprn": 100050671689},
"S75 1QF 10032783992": {"postcode": "S75 1QF", "uprn": "10032783992"},
+ "test": {"postcode": "S70 3QU", "uprn": 100050607581},
}
@@ -29,6 +30,12 @@
API_URL = "https://waste.barnsley.gov.uk/ViewCollection/SelectAddress"
+def parse_date(d: str) -> date:
+ if d.lower() == "today":
+ return datetime.now().date()
+ return datetime.strptime(d, "%A, %B %d, %Y").date()
+
+
class Source:
def __init__(self, postcode: str, uprn: str | int):
self._postcode: str = postcode
@@ -79,21 +86,10 @@
# Parse the response, getting the top box first and then tabled collections after
results = soup.find("div", {"class": "panel"}).find_all("fieldset")[0:2]
heading = results[0].find_all("p")[1:3]
- bin_text = heading[1].text.strip() + " bin"
- bin_date = datetime.strptime(heading[0].text, "%A, %B %d, %Y").date()
- entries.append(
- Collection(
- t=bin_text,
- date=bin_date,
- icon=ICON_MAP.get(bin_text.split(" ")[0].lower()),
- )
- )
- results_table = [row for row in results[1].find_all("tbody")[0] if row != "\n"]
- for row in results_table:
- text_list = [item.text.strip() for item in row.contents if item != "\n"]
- bin_text = text_list[1] + " bin"
- bin_date = datetime.strptime(text_list[0], "%A, %B %d, %Y").date()
+ for bin in heading[1].text.strip().split(", "):
+ bin_text = bin + " bin"
+ bin_date = parse_date(heading[0].text)
entries.append(
Collection(
t=bin_text,
@@ -102,4 +98,18 @@
)
)
+ results_table = [row for row in results[1].find_all("tbody")[0] if row != "\n"]
+ for row in results_table:
+ text_list = [item.text.strip() for item in row.contents if item != "\n"]
+ for bin in text_list[1].split(", "):
+ bin_text = bin + " bin"
+ bin_date = parse_date(text_list[0])
+ entries.append(
+ Collection(
+ t=bin_text,
+ date=bin_date,
+ icon=ICON_MAP.get(bin_text.split(" ")[0].lower()),
+ )
+ )
+
return entries
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py\n@@ -3,7 +3,7 @@\n # https://github.com/robbrad/UKBinCollectionData\n \n \n-from datetime import datetime\n+from datetime import date, datetime\n \n import requests\n from bs4 import BeautifulSoup\n@@ -15,6 +15,7 @@\n TEST_CASES = {\n \"S71 1EE 100050671689\": {\"postcode\": \"S71 1EE\", \"uprn\": 100050671689},\n \"S75 1QF 10032783992\": {\"postcode\": \"S75 1QF\", \"uprn\": \"10032783992\"},\n+ \"test\": {\"postcode\": \"S70 3QU\", \"uprn\": 100050607581},\n }\n \n \n@@ -29,6 +30,12 @@\n API_URL = \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\"\n \n \n+def parse_date(d: str) -> date:\n+ if d.lower() == \"today\":\n+ return datetime.now().date()\n+ return datetime.strptime(d, \"%A, %B %d, %Y\").date()\n+\n+\n class Source:\n def __init__(self, postcode: str, uprn: str | int):\n self._postcode: str = postcode\n@@ -79,21 +86,10 @@\n # Parse the response, getting the top box first and then tabled collections after\n results = soup.find(\"div\", {\"class\": \"panel\"}).find_all(\"fieldset\")[0:2]\n heading = results[0].find_all(\"p\")[1:3]\n- bin_text = heading[1].text.strip() + \" bin\"\n- bin_date = datetime.strptime(heading[0].text, \"%A, %B %d, %Y\").date()\n- entries.append(\n- Collection(\n- t=bin_text,\n- date=bin_date,\n- icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n- )\n- )\n \n- results_table = [row for row in results[1].find_all(\"tbody\")[0] if row != \"\\n\"]\n- for row in results_table:\n- text_list = [item.text.strip() for item in row.contents if item != \"\\n\"]\n- bin_text = text_list[1] + \" bin\"\n- bin_date = datetime.strptime(text_list[0], \"%A, %B %d, %Y\").date()\n+ for bin in heading[1].text.strip().split(\", \"):\n+ bin_text = bin + \" bin\"\n+ bin_date = parse_date(heading[0].text)\n entries.append(\n Collection(\n t=bin_text,\n@@ -102,4 +98,18 @@\n )\n )\n \n+ results_table = [row for row in results[1].find_all(\"tbody\")[0] if row != \"\\n\"]\n+ for row in results_table:\n+ text_list = [item.text.strip() for item in row.contents if item != \"\\n\"]\n+ for bin in text_list[1].split(\", \"):\n+ bin_text = bin + \" bin\"\n+ bin_date = parse_date(text_list[0])\n+ entries.append(\n+ Collection(\n+ t=bin_text,\n+ date=bin_date,\n+ icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n+ )\n+ )\n+\n return entries\n", "issue": "[Bug]: Barnsley MBC. Fails when running with collection date as today\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nBarnsley Council source does not work when the bin collection date is 'today'\r\nThis attempted to parse the date and then fails.\r\n\r\nIt looks like something with a check if the date is 'today' could resolve this.\r\n```\r\nif heading[0].text == \"Today\": \r\n bin_date = datetime.today().date()\r\nelse:\r\n bin_date = datetime.strptime(heading[0].text, \"%A, %B %d, %Y\").date()\r\n```\n\n### Source (if relevant)\n\nbarnsley_gov_uk \n\n### Logs\n\n```Shell\nfetch failed for source Barnsley Metropolitan Borough Council: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py\", line 83, in fetch bin_date = datetime.strptime(heading[0].text, \"%A, %B %d, %Y\").date() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 554, in _strptime_datetime tt, fraction, gmtoff_fraction = _strptime(data_string, format) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File \"/usr/local/lib/python3.12/_strptime.py\", line 333, in _strptime raise ValueError(\"time data %r does not match format %r\" % ValueError: time data 'Today' does not match format '%A, %B %d, %Y'\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: barnsley_gov_uk\r\n args:\r\n postcode: S70 3QU\r\n uprn: '100050607581'\r\n calendar_title: Waste Collection\r\n day_switch_time: '12:00'\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "# Credit where it's due:\n# This is predominantly a refactoring of the Bristol City Council script from the UKBinCollectionData repo\n# https://github.com/robbrad/UKBinCollectionData\n\n\nfrom datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Barnsley Metropolitan Borough Council\"\nDESCRIPTION = \"Source for Barnsley Metropolitan Borough Council.\"\nURL = \"https://barnsley.gov.uk\"\nTEST_CASES = {\n \"S71 1EE 100050671689\": {\"postcode\": \"S71 1EE\", \"uprn\": 100050671689},\n \"S75 1QF 10032783992\": {\"postcode\": \"S75 1QF\", \"uprn\": \"10032783992\"},\n}\n\n\nICON_MAP = {\n \"grey\": \"mdi:trash-can\",\n \"green\": \"mdi:leaf\",\n \"blue\": \"mdi:package-variant\",\n \"brown\": \"mdi:recycle\",\n}\n\n\nAPI_URL = \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\"\n\n\nclass Source:\n def __init__(self, postcode: str, uprn: str | int):\n self._postcode: str = postcode\n self._uprn: str | int = uprn\n\n def fetch(self):\n entries = []\n\n # Pass in form data and make the POST request\n headers = {\n \"authority\": \"waste.barnsley.gov.uk\",\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\",\n \"accept-language\": \"en-GB,en;q=0.9\",\n \"cache-control\": \"no-cache\",\n \"content-type\": \"application/x-www-form-urlencoded\",\n \"origin\": \"https://waste.barnsley.gov.uk\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\",\n \"sec-ch-ua\": '\"Chromium\";v=\"118\", \"Opera GX\";v=\"104\", \"Not=A?Brand\";v=\"99\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": '\"Windows\"',\n \"sec-fetch-dest\": \"document\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.5993.118 Safari/537.36\",\n }\n form_data = {\n \"personInfo.person1.HouseNumberOrName\": \"\",\n \"personInfo.person1.Postcode\": f\"{self._postcode}\",\n \"personInfo.person1.UPRN\": f\"{self._uprn}\",\n \"person1_SelectAddress\": \"Select address\",\n }\n response = requests.post(\n \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\",\n headers=headers,\n data=form_data,\n )\n soup = BeautifulSoup(response.text, features=\"html.parser\")\n soup.prettify()\n\n if response.status_code != 200:\n raise ConnectionRefusedError(\n \"Error getting results from website! Please open an issue on GitHub!\"\n )\n\n # Parse the response, getting the top box first and then tabled collections after\n results = soup.find(\"div\", {\"class\": \"panel\"}).find_all(\"fieldset\")[0:2]\n heading = results[0].find_all(\"p\")[1:3]\n bin_text = heading[1].text.strip() + \" bin\"\n bin_date = datetime.strptime(heading[0].text, \"%A, %B %d, %Y\").date()\n entries.append(\n Collection(\n t=bin_text,\n date=bin_date,\n icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n )\n )\n\n results_table = [row for row in results[1].find_all(\"tbody\")[0] if row != \"\\n\"]\n for row in results_table:\n text_list = [item.text.strip() for item in row.contents if item != \"\\n\"]\n bin_text = text_list[1] + \" bin\"\n bin_date = datetime.strptime(text_list[0], \"%A, %B %d, %Y\").date()\n entries.append(\n Collection(\n t=bin_text,\n date=bin_date,\n icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py"}], "after_files": [{"content": "# Credit where it's due:\n# This is predominantly a refactoring of the Bristol City Council script from the UKBinCollectionData repo\n# https://github.com/robbrad/UKBinCollectionData\n\n\nfrom datetime import date, datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Barnsley Metropolitan Borough Council\"\nDESCRIPTION = \"Source for Barnsley Metropolitan Borough Council.\"\nURL = \"https://barnsley.gov.uk\"\nTEST_CASES = {\n \"S71 1EE 100050671689\": {\"postcode\": \"S71 1EE\", \"uprn\": 100050671689},\n \"S75 1QF 10032783992\": {\"postcode\": \"S75 1QF\", \"uprn\": \"10032783992\"},\n \"test\": {\"postcode\": \"S70 3QU\", \"uprn\": 100050607581},\n}\n\n\nICON_MAP = {\n \"grey\": \"mdi:trash-can\",\n \"green\": \"mdi:leaf\",\n \"blue\": \"mdi:package-variant\",\n \"brown\": \"mdi:recycle\",\n}\n\n\nAPI_URL = \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\"\n\n\ndef parse_date(d: str) -> date:\n if d.lower() == \"today\":\n return datetime.now().date()\n return datetime.strptime(d, \"%A, %B %d, %Y\").date()\n\n\nclass Source:\n def __init__(self, postcode: str, uprn: str | int):\n self._postcode: str = postcode\n self._uprn: str | int = uprn\n\n def fetch(self):\n entries = []\n\n # Pass in form data and make the POST request\n headers = {\n \"authority\": \"waste.barnsley.gov.uk\",\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\",\n \"accept-language\": \"en-GB,en;q=0.9\",\n \"cache-control\": \"no-cache\",\n \"content-type\": \"application/x-www-form-urlencoded\",\n \"origin\": \"https://waste.barnsley.gov.uk\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\",\n \"sec-ch-ua\": '\"Chromium\";v=\"118\", \"Opera GX\";v=\"104\", \"Not=A?Brand\";v=\"99\"',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": '\"Windows\"',\n \"sec-fetch-dest\": \"document\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"same-origin\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.5993.118 Safari/537.36\",\n }\n form_data = {\n \"personInfo.person1.HouseNumberOrName\": \"\",\n \"personInfo.person1.Postcode\": f\"{self._postcode}\",\n \"personInfo.person1.UPRN\": f\"{self._uprn}\",\n \"person1_SelectAddress\": \"Select address\",\n }\n response = requests.post(\n \"https://waste.barnsley.gov.uk/ViewCollection/SelectAddress\",\n headers=headers,\n data=form_data,\n )\n soup = BeautifulSoup(response.text, features=\"html.parser\")\n soup.prettify()\n\n if response.status_code != 200:\n raise ConnectionRefusedError(\n \"Error getting results from website! Please open an issue on GitHub!\"\n )\n\n # Parse the response, getting the top box first and then tabled collections after\n results = soup.find(\"div\", {\"class\": \"panel\"}).find_all(\"fieldset\")[0:2]\n heading = results[0].find_all(\"p\")[1:3]\n\n for bin in heading[1].text.strip().split(\", \"):\n bin_text = bin + \" bin\"\n bin_date = parse_date(heading[0].text)\n entries.append(\n Collection(\n t=bin_text,\n date=bin_date,\n icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n )\n )\n\n results_table = [row for row in results[1].find_all(\"tbody\")[0] if row != \"\\n\"]\n for row in results_table:\n text_list = [item.text.strip() for item in row.contents if item != \"\\n\"]\n for bin in text_list[1].split(\", \"):\n bin_text = bin + \" bin\"\n bin_date = parse_date(text_list[0])\n entries.append(\n Collection(\n t=bin_text,\n date=bin_date,\n icon=ICON_MAP.get(bin_text.split(\" \")[0].lower()),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/barnsley_gov_uk.py"}]} | 2,280 | 881 |
gh_patches_debug_12776 | rasdani/github-patches | git_diff | platformsh__platformsh-docs-2105 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
:arrow_up: Update MeiliSearch
### What needs to be documented?
We should update MeiliSearch to version 0.25. This means we have to change how we manage API keys. It does mean we can create more keys for monitoring and the like.
### Additional context
[Blog post on version](https://blog.meilisearch.com/whats-new-in-v0-25/)
[Additional context](https://github.com/orgs/platformsh/projects/3)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `search/main.py`
Content:
```
1 import os
2 import glob
3 import json
4 import meilisearch
5 from platformshconfig import Config
6
7 class Search:
8 def __init__(self):
9 self.default = {
10 "host": "http://127.0.0.1",
11 "key": None,
12 "port": 7700
13 }
14
15 self.scrape_dir = "output"
16 self.scrape_config = "config/scrape.json"
17 self.docs_index = "docs"
18 self.primaryKey = "documentId"
19 self.index_name = "Docs"
20
21 # Below are Platform.sh custom settings for how the search engine functions.
22
23 # Data available to the dropdown React app in docs, used to fill out autocomplete results.
24 self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']
25 # Data actually searchable by our queries.
26 self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']
27
28 # Show results for one query with the listed pages, when they by default would not show up as best results.
29 # Note: these aren't automatically two-way, which is why they're all defined twice.
30 self.synonyms = {
31 "cron": ["crons"],
32 "crons": ["cron tasks", "cron jobs"],
33 "e-mail": ["email"],
34 "routes.yaml": ["routes"],
35 "routes": ["routes.yaml"],
36 "services": ["services.yaml"],
37 "services.yaml": ["services"],
38 "application": [".platform.app.yaml", "app.yaml", "applications.yaml"],
39 ".platform.app.yaml": ["application"],
40 "app.yaml": ["application"],
41 "applications.yaml": ["application", "multi-app"],
42 "multi-app": ["applications.yaml"],
43 "regions": ["public ip addresses"],
44 "public ip addresses": ["regions"],
45 "ssl": ["https", "tls"],
46 "https": ["ssl"],
47 "auth": ["authentication", "access control"], # Only needs to be one way since we don't use "auth" in the docs
48 }
49
50 # Ranking rules:
51 #
52 # - Default order: ["words", "typo", "proximity", "attribute", "sort", "exactness"]
53 #
54 # - words: number of times query is in document (greater number gets priority)
55 # - typo: fewer typos > more typos
56 # - proximity: smaller distance between multiple occurences of query in same document > larger distances
57 # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in
58 # more important attributes first.
59 # - sort: queries are sorted at query time
60 # - exactness: similarity of matched words in document with query
61
62 self.ranking_rules = ["rank:asc", "attribute", "typo", "words", "proximity", "exactness"]
63
64 self.updated_settings = {
65 "rankingRules": self.ranking_rules,
66 "searchableAttributes": self.searchable_attributes,
67 "displayedAttributes": self.displayed_attributes
68 }
69
70 # Group results by page
71 self.distinct_attribute = "pageUrl"
72
73 def getConnectionString(self):
74 """
75 Sets the Meilisearch host string, depending on the environment.
76
77 Returns:
78 string: Meilisearch host string.
79 """
80 if os.environ.get('PORT'):
81 return "{}:{}".format(self.default["host"], os.environ['PORT'])
82 else:
83 return "{}:{}".format(self.default["host"], self.default["port"])
84
85 def getMasterKey(self):
86 """
87 Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.
88 """
89 config = Config()
90 if config.is_valid_platform():
91 return config.projectEntropy
92 elif os.environ.get("MEILI_MASTER_KEY"):
93 return os.environ["MEILI_MASTER_KEY"]
94 else:
95 return self.default["key"]
96
97 def add_documents(self, index):
98 """
99 Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.
100 """
101 documents = [f for f in glob.glob("{}/*.json".format(self.scrape_dir))]
102 for doc in documents:
103 self.add(doc, index)
104
105 def add(self, doc, index):
106 """
107 Add an individual site's index to the Meilisearch service.
108 """
109 with open(doc) as scraped_index:
110 data = json.load(scraped_index)
111 index.add_documents(data)
112
113 def update(self):
114 """
115 Updates the Meilisearch index.
116 """
117 # Create a Meilisearch client.
118 client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())
119
120 # Delete previous index
121 if len(client.get_indexes()):
122 client.get_index(self.docs_index).delete()
123
124 # Create a new index
125 index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})
126
127 # Add synonyms for the index
128 index.update_synonyms(self.synonyms)
129
130 # Update its settings: what can be searched, what's displayable, and how results should be ranked.
131 index.update_settings(self.updated_settings)
132
133 # Update distinct attribute.
134 index.update_distinct_attribute(self.distinct_attribute)
135
136 # Add documents to the index
137 self.add_documents(index)
138
139 if __name__ == "__main__":
140 meili = Search()
141 meili.update()
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/search/main.py b/search/main.py
--- a/search/main.py
+++ b/search/main.py
@@ -119,10 +119,14 @@
# Delete previous index
if len(client.get_indexes()):
- client.get_index(self.docs_index).delete()
+ client.index(self.docs_index).delete()
# Create a new index
- index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})
+ create_index_task = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})
+
+ client.wait_for_task(create_index_task['uid'])
+
+ index = client.get_index(create_index_task['indexUid'])
# Add synonyms for the index
index.update_synonyms(self.synonyms)
| {"golden_diff": "diff --git a/search/main.py b/search/main.py\n--- a/search/main.py\n+++ b/search/main.py\n@@ -119,10 +119,14 @@\n \n # Delete previous index\n if len(client.get_indexes()):\n- client.get_index(self.docs_index).delete()\n+ client.index(self.docs_index).delete()\n \n # Create a new index\n- index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n+ create_index_task = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n+\n+ client.wait_for_task(create_index_task['uid'])\n+\n+ index = client.get_index(create_index_task['indexUid'])\n \n # Add synonyms for the index\n index.update_synonyms(self.synonyms)\n", "issue": ":arrow_up: Update MeiliSearch\n### What needs to be documented?\r\n\r\nWe should update MeiliSearch to version 0.25. This means we have to change how we manage API keys. It does mean we can create more keys for monitoring and the like.\r\n\r\n### Additional context\r\n\r\n[Blog post on version](https://blog.meilisearch.com/whats-new-in-v0-25/)\r\n[Additional context](https://github.com/orgs/platformsh/projects/3)\n", "before_files": [{"content": "import os\nimport glob\nimport json\nimport meilisearch\nfrom platformshconfig import Config\n\nclass Search:\n def __init__(self):\n self.default = {\n \"host\": \"http://127.0.0.1\",\n \"key\": None,\n \"port\": 7700\n }\n\n self.scrape_dir = \"output\"\n self.scrape_config = \"config/scrape.json\"\n self.docs_index = \"docs\"\n self.primaryKey = \"documentId\"\n self.index_name = \"Docs\"\n\n # Below are Platform.sh custom settings for how the search engine functions.\n\n # Data available to the dropdown React app in docs, used to fill out autocomplete results.\n self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']\n # Data actually searchable by our queries.\n self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']\n\n # Show results for one query with the listed pages, when they by default would not show up as best results.\n # Note: these aren't automatically two-way, which is why they're all defined twice.\n self.synonyms = {\n \"cron\": [\"crons\"],\n \"crons\": [\"cron tasks\", \"cron jobs\"],\n \"e-mail\": [\"email\"],\n \"routes.yaml\": [\"routes\"],\n \"routes\": [\"routes.yaml\"],\n \"services\": [\"services.yaml\"],\n \"services.yaml\": [\"services\"],\n \"application\": [\".platform.app.yaml\", \"app.yaml\", \"applications.yaml\"],\n \".platform.app.yaml\": [\"application\"],\n \"app.yaml\": [\"application\"],\n \"applications.yaml\": [\"application\", \"multi-app\"],\n \"multi-app\": [\"applications.yaml\"],\n \"regions\": [\"public ip addresses\"],\n \"public ip addresses\": [\"regions\"],\n \"ssl\": [\"https\", \"tls\"],\n \"https\": [\"ssl\"],\n \"auth\": [\"authentication\", \"access control\"], # Only needs to be one way since we don't use \"auth\" in the docs\n }\n\n # Ranking rules:\n #\n # - Default order: [\"words\", \"typo\", \"proximity\", \"attribute\", \"sort\", \"exactness\"]\n #\n # - words: number of times query is in document (greater number gets priority)\n # - typo: fewer typos > more typos\n # - proximity: smaller distance between multiple occurences of query in same document > larger distances\n # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in\n # more important attributes first.\n # - sort: queries are sorted at query time\n # - exactness: similarity of matched words in document with query\n\n self.ranking_rules = [\"rank:asc\", \"attribute\", \"typo\", \"words\", \"proximity\", \"exactness\"]\n\n self.updated_settings = {\n \"rankingRules\": self.ranking_rules,\n \"searchableAttributes\": self.searchable_attributes,\n \"displayedAttributes\": self.displayed_attributes\n }\n\n # Group results by page\n self.distinct_attribute = \"pageUrl\"\n\n def getConnectionString(self):\n \"\"\"\n Sets the Meilisearch host string, depending on the environment.\n\n Returns:\n string: Meilisearch host string.\n \"\"\"\n if os.environ.get('PORT'):\n return \"{}:{}\".format(self.default[\"host\"], os.environ['PORT'])\n else:\n return \"{}:{}\".format(self.default[\"host\"], self.default[\"port\"])\n\n def getMasterKey(self):\n \"\"\"\n Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.\n \"\"\"\n config = Config()\n if config.is_valid_platform():\n return config.projectEntropy\n elif os.environ.get(\"MEILI_MASTER_KEY\"):\n return os.environ[\"MEILI_MASTER_KEY\"]\n else:\n return self.default[\"key\"]\n\n def add_documents(self, index):\n \"\"\"\n Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.\n \"\"\"\n documents = [f for f in glob.glob(\"{}/*.json\".format(self.scrape_dir))]\n for doc in documents:\n self.add(doc, index)\n\n def add(self, doc, index):\n \"\"\"\n Add an individual site's index to the Meilisearch service.\n \"\"\"\n with open(doc) as scraped_index:\n data = json.load(scraped_index)\n index.add_documents(data)\n\n def update(self):\n \"\"\"\n Updates the Meilisearch index.\n \"\"\"\n # Create a Meilisearch client.\n client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())\n\n # Delete previous index\n if len(client.get_indexes()):\n client.get_index(self.docs_index).delete()\n\n # Create a new index\n index = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n\n # Add synonyms for the index\n index.update_synonyms(self.synonyms)\n\n # Update its settings: what can be searched, what's displayable, and how results should be ranked.\n index.update_settings(self.updated_settings)\n\n # Update distinct attribute.\n index.update_distinct_attribute(self.distinct_attribute)\n\n # Add documents to the index\n self.add_documents(index)\n\nif __name__ == \"__main__\":\n meili = Search()\n meili.update()\n", "path": "search/main.py"}], "after_files": [{"content": "import os\nimport glob\nimport json\nimport meilisearch\nfrom platformshconfig import Config\n\nclass Search:\n def __init__(self):\n self.default = {\n \"host\": \"http://127.0.0.1\",\n \"key\": None,\n \"port\": 7700\n }\n\n self.scrape_dir = \"output\"\n self.scrape_config = \"config/scrape.json\"\n self.docs_index = \"docs\"\n self.primaryKey = \"documentId\"\n self.index_name = \"Docs\"\n\n # Below are Platform.sh custom settings for how the search engine functions.\n\n # Data available to the dropdown React app in docs, used to fill out autocomplete results.\n self.displayed_attributes = ['keywords', 'title', 'text', 'url', 'site', 'section']\n # Data actually searchable by our queries.\n self.searchable_attributes = ['keywords', 'title', 'pageUrl', 'section', 'text', 'url']\n\n # Show results for one query with the listed pages, when they by default would not show up as best results.\n # Note: these aren't automatically two-way, which is why they're all defined twice.\n self.synonyms = {\n \"cron\": [\"crons\"],\n \"crons\": [\"cron tasks\", \"cron jobs\"],\n \"e-mail\": [\"email\"],\n \"routes.yaml\": [\"routes\"],\n \"routes\": [\"routes.yaml\"],\n \"services\": [\"services.yaml\"],\n \"services.yaml\": [\"services\"],\n \"application\": [\".platform.app.yaml\", \"app.yaml\", \"applications.yaml\"],\n \".platform.app.yaml\": [\"application\"],\n \"app.yaml\": [\"application\"],\n \"applications.yaml\": [\"application\", \"multi-app\"],\n \"multi-app\": [\"applications.yaml\"],\n \"regions\": [\"public ip addresses\"],\n \"public ip addresses\": [\"regions\"],\n \"ssl\": [\"https\", \"tls\"],\n \"https\": [\"ssl\"],\n \"auth\": [\"authentication\", \"access control\"], # Only needs to be one way since we don't use \"auth\" in the docs\n }\n\n # Ranking rules:\n #\n # - Default order: [\"words\", \"typo\", \"proximity\", \"attribute\", \"sort\", \"exactness\"]\n #\n # - words: number of times query is in document (greater number gets priority)\n # - typo: fewer typos > more typos\n # - proximity: smaller distance between multiple occurences of query in same document > larger distances\n # - attribute: sorted according to order of importance of attributes (searchable_attributes). terms in\n # more important attributes first.\n # - sort: queries are sorted at query time\n # - exactness: similarity of matched words in document with query\n\n self.ranking_rules = [\"rank:asc\", \"attribute\", \"typo\", \"words\", \"proximity\", \"exactness\"]\n\n self.updated_settings = {\n \"rankingRules\": self.ranking_rules,\n \"searchableAttributes\": self.searchable_attributes,\n \"displayedAttributes\": self.displayed_attributes\n }\n\n # Group results by page\n self.distinct_attribute = \"pageUrl\"\n\n def getConnectionString(self):\n \"\"\"\n Sets the Meilisearch host string, depending on the environment.\n\n Returns:\n string: Meilisearch host string.\n \"\"\"\n if os.environ.get('PORT'):\n return \"{}:{}\".format(self.default[\"host\"], os.environ['PORT'])\n else:\n return \"{}:{}\".format(self.default[\"host\"], self.default[\"port\"])\n\n def getMasterKey(self):\n \"\"\"\n Retrieves the Meilisearch master key, either from the Platform.sh environment or locally.\n \"\"\"\n config = Config()\n if config.is_valid_platform():\n return config.projectEntropy\n elif os.environ.get(\"MEILI_MASTER_KEY\"):\n return os.environ[\"MEILI_MASTER_KEY\"]\n else:\n return self.default[\"key\"]\n\n def add_documents(self, index):\n \"\"\"\n Cycle through the individual site indexes in /outputs so their individual documents can be added to Meilisearch.\n \"\"\"\n documents = [f for f in glob.glob(\"{}/*.json\".format(self.scrape_dir))]\n for doc in documents:\n self.add(doc, index)\n\n def add(self, doc, index):\n \"\"\"\n Add an individual site's index to the Meilisearch service.\n \"\"\"\n with open(doc) as scraped_index:\n data = json.load(scraped_index)\n index.add_documents(data)\n\n def update(self):\n \"\"\"\n Updates the Meilisearch index.\n \"\"\"\n # Create a Meilisearch client.\n client = meilisearch.Client(self.getConnectionString(), self.getMasterKey())\n\n # Delete previous index\n if len(client.get_indexes()):\n client.index(self.docs_index).delete()\n\n # Create a new index\n create_index_task = client.create_index(uid=self.docs_index, options={'primaryKey': self.primaryKey, 'uid': self.index_name})\n\n client.wait_for_task(create_index_task['uid'])\n\n index = client.get_index(create_index_task['indexUid'])\n\n # Add synonyms for the index\n index.update_synonyms(self.synonyms)\n\n # Update its settings: what can be searched, what's displayable, and how results should be ranked.\n index.update_settings(self.updated_settings)\n\n # Update distinct attribute.\n index.update_distinct_attribute(self.distinct_attribute)\n\n # Add documents to the index\n self.add_documents(index)\n\nif __name__ == \"__main__\":\n meili = Search()\n meili.update()\n", "path": "search/main.py"}]} | 1,861 | 185 |
gh_patches_debug_5561 | rasdani/github-patches | git_diff | Showndarya__Hacktoberfest-545 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make JSON linting more verbose ?
Currently we simply check :
1. json is valid
2. json contains the keys we want
3. those keys are not empty
the problem is with step 1, it's not very helpful if people have no idea what the JSON spec is.
One fix is :
1. add a link to jsonlint.com or such a service in the print of the error so that people can check themselves.
2. add something like http://deron.meranda.us/python/demjson/ with proper package caching on travis side, but it will make for slower builds still probably
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `.travis.py`
Content:
```
1 import json
2 import os
3 import re
4 import subprocess
5
6 # Get a diff between master and current.
7 try:
8 commit_range = os.environ["TRAVIS_COMMIT_RANGE"]
9 changed_files = subprocess.check_output(["git", "diff", "--name-only", commit_range])
10 except KeyError:
11 print("🔥 This should be run on Travis. Otherwise make sure TRAVIS_BRANCH is set.")
12 exit(1)
13
14 # Filter JSON files only.
15 changed_files_json = []
16 if changed_files:
17 changed_files = changed_files.decode()
18 for changed_file in changed_files.split('\n'):
19 if re.search(r"\.json$", changed_file):
20 changed_files_json.append(changed_file)
21
22
23 # Iterate over list of changed JSON files.
24 for changed_file_json in changed_files_json:
25 print(f"Checking file {changed_file_json}...")
26 there_was_an_error = False
27
28 if not os.path.basename(changed_file_json)[0].isupper():
29 there_was_an_error = True
30 print("🔥 File name not capitalized.")
31
32 try:
33 with open(changed_file_json) as data_file:
34 file_content = json.loads(data_file.read())
35 except json.decoder.JSONDecodeError:
36 there_was_an_error = True
37 print("🔥 JSON could not be parsed.")
38
39 if 'word' not in file_content:
40 there_was_an_error = True
41 print("🔥 Key 'word' not found.")
42
43 if not file_content["word"]:
44 there_was_an_error = True
45 print("🔥 Value for 'word' appears to be empty.")
46
47 if 'definitions' not in file_content:
48 there_was_an_error = True
49 print("🔥 Key 'definitions' not found.")
50
51 if not file_content["definitions"]:
52 there_was_an_error = True
53 print("🔥 Value for 'definitions' appears to be empty.")
54
55 if 'parts-of-speech' not in file_content:
56 there_was_an_error = True
57 print("🔥 Key 'parts-of-speech' not found.")
58
59 if not file_content["parts-of-speech"]:
60 there_was_an_error = True
61 print("🔥 Value for 'parts-of-speech' appears to be empty.")
62
63 if there_was_an_error:
64 exit(1)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/.travis.py b/.travis.py
--- a/.travis.py
+++ b/.travis.py
@@ -34,7 +34,7 @@
file_content = json.loads(data_file.read())
except json.decoder.JSONDecodeError:
there_was_an_error = True
- print("🔥 JSON could not be parsed.")
+ print(f"🔥 JSON could not be parsed. Follow this link to know more : https://jsonlint.com/?json={data_file.read()}")
if 'word' not in file_content:
there_was_an_error = True
| {"golden_diff": "diff --git a/.travis.py b/.travis.py\n--- a/.travis.py\n+++ b/.travis.py\n@@ -34,7 +34,7 @@\n file_content = json.loads(data_file.read())\n except json.decoder.JSONDecodeError:\n there_was_an_error = True\n- print(\"\ud83d\udd25 JSON could not be parsed.\")\n+ print(f\"\ud83d\udd25 JSON could not be parsed. Follow this link to know more : https://jsonlint.com/?json={data_file.read()}\")\n \n if 'word' not in file_content:\n there_was_an_error = True\n", "issue": "Make JSON linting more verbose ?\nCurrently we simply check :\r\n1. json is valid\r\n2. json contains the keys we want\r\n3. those keys are not empty\r\n\r\nthe problem is with step 1, it's not very helpful if people have no idea what the JSON spec is.\r\n\r\nOne fix is :\r\n1. add a link to jsonlint.com or such a service in the print of the error so that people can check themselves.\r\n2. add something like http://deron.meranda.us/python/demjson/ with proper package caching on travis side, but it will make for slower builds still probably\n", "before_files": [{"content": "import json\nimport os\nimport re\nimport subprocess\n\n# Get a diff between master and current.\ntry:\n commit_range = os.environ[\"TRAVIS_COMMIT_RANGE\"]\n changed_files = subprocess.check_output([\"git\", \"diff\", \"--name-only\", commit_range])\nexcept KeyError:\n print(\"\ud83d\udd25 This should be run on Travis. Otherwise make sure TRAVIS_BRANCH is set.\")\n exit(1)\n\n# Filter JSON files only.\nchanged_files_json = []\nif changed_files:\n changed_files = changed_files.decode()\n for changed_file in changed_files.split('\\n'):\n if re.search(r\"\\.json$\", changed_file):\n changed_files_json.append(changed_file)\n\n \n# Iterate over list of changed JSON files.\nfor changed_file_json in changed_files_json:\n print(f\"Checking file {changed_file_json}...\")\n there_was_an_error = False\n\n if not os.path.basename(changed_file_json)[0].isupper():\n there_was_an_error = True\n print(\"\ud83d\udd25 File name not capitalized.\")\n\n try:\n with open(changed_file_json) as data_file:\n file_content = json.loads(data_file.read())\n except json.decoder.JSONDecodeError:\n there_was_an_error = True\n print(\"\ud83d\udd25 JSON could not be parsed.\")\n\n if 'word' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'word' not found.\")\n\n if not file_content[\"word\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'word' appears to be empty.\")\n\n if 'definitions' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'definitions' not found.\")\n\n if not file_content[\"definitions\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'definitions' appears to be empty.\")\n\n if 'parts-of-speech' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'parts-of-speech' not found.\")\n\n if not file_content[\"parts-of-speech\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'parts-of-speech' appears to be empty.\")\n\n if there_was_an_error:\n exit(1)\n", "path": ".travis.py"}], "after_files": [{"content": "import json\nimport os\nimport re\nimport subprocess\n\n# Get a diff between master and current.\ntry:\n commit_range = os.environ[\"TRAVIS_COMMIT_RANGE\"]\n changed_files = subprocess.check_output([\"git\", \"diff\", \"--name-only\", commit_range])\nexcept KeyError:\n print(\"\ud83d\udd25 This should be run on Travis. Otherwise make sure TRAVIS_BRANCH is set.\")\n exit(1)\n\n# Filter JSON files only.\nchanged_files_json = []\nif changed_files:\n changed_files = changed_files.decode()\n for changed_file in changed_files.split('\\n'):\n if re.search(r\"\\.json$\", changed_file):\n changed_files_json.append(changed_file)\n\n \n# Iterate over list of changed JSON files.\nfor changed_file_json in changed_files_json:\n print(f\"Checking file {changed_file_json}...\")\n there_was_an_error = False\n\n if not os.path.basename(changed_file_json)[0].isupper():\n there_was_an_error = True\n print(\"\ud83d\udd25 File name not capitalized.\")\n\n try:\n with open(changed_file_json) as data_file:\n file_content = json.loads(data_file.read())\n except json.decoder.JSONDecodeError:\n there_was_an_error = True\n print(f\"\ud83d\udd25 JSON could not be parsed. Follow this link to know more : https://jsonlint.com/?json={data_file.read()}\")\n\n if 'word' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'word' not found.\")\n\n if not file_content[\"word\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'word' appears to be empty.\")\n\n if 'definitions' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'definitions' not found.\")\n\n if not file_content[\"definitions\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'definitions' appears to be empty.\")\n\n if 'parts-of-speech' not in file_content:\n there_was_an_error = True\n print(\"\ud83d\udd25 Key 'parts-of-speech' not found.\")\n\n if not file_content[\"parts-of-speech\"]:\n there_was_an_error = True\n print(\"\ud83d\udd25 Value for 'parts-of-speech' appears to be empty.\")\n\n if there_was_an_error:\n exit(1)\n", "path": ".travis.py"}]} | 978 | 128 |
gh_patches_debug_38657 | rasdani/github-patches | git_diff | litestar-org__litestar-1780 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/contrib/sqlalchemy/types.py`
Content:
```
1 from __future__ import annotations
2
3 import uuid
4 from base64 import b64decode
5 from typing import TYPE_CHECKING, Any, cast
6
7 from sqlalchemy.dialects.oracle import BLOB as ORA_BLOB
8 from sqlalchemy.dialects.oracle import RAW as ORA_RAW
9 from sqlalchemy.dialects.postgresql import JSONB as PG_JSONB
10 from sqlalchemy.dialects.postgresql import UUID as PG_UUID
11 from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, TypeDecorator
12 from sqlalchemy.types import JSON as _JSON
13
14 if TYPE_CHECKING:
15 from sqlalchemy.engine import Dialect
16
17 BigIntIdentity = BigInteger().with_variant(Integer, "sqlite")
18
19
20 class GUID(TypeDecorator):
21 """Platform-independent GUID type.
22
23 Uses PostgreSQL's UUID type, Oracle's RAW(16) type, otherwise uses
24 BINARY(16) or CHAR(32), storing as stringified hex values.
25
26 Will accept stringified UUIDs as a hexstring or an actual UUID
27
28 """
29
30 impl = BINARY(16)
31 cache_ok = True
32
33 @property
34 def python_type(self) -> type[uuid.UUID]:
35 return uuid.UUID
36
37 def __init__(self, *args: Any, binary: bool = True, **kwargs: Any) -> None:
38 self.binary = binary
39
40 def load_dialect_impl(self, dialect: Dialect) -> Any:
41 if dialect.name in {"postgresql", "duckdb"}:
42 return dialect.type_descriptor(PG_UUID())
43 if dialect.name == "oracle":
44 return dialect.type_descriptor(ORA_RAW(16))
45 if self.binary:
46 return dialect.type_descriptor(BINARY(16))
47 return dialect.type_descriptor(CHAR(32))
48
49 def process_bind_param(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> bytes | str | None:
50 if value is None:
51 return value
52 if dialect.name in {"postgresql", "duckdb"}:
53 return str(value)
54 value = self.to_uuid(value)
55 if value is None:
56 return value
57 if dialect.name in {"oracle", "spanner+spanner"}:
58 return value.bytes
59 return value.bytes if self.binary else value.hex
60
61 def process_result_value(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> uuid.UUID | None:
62 if value is None:
63 return value
64 if isinstance(value, uuid.UUID):
65 return value
66 if dialect.name == "spanner+spanner":
67 return uuid.UUID(bytes=b64decode(value))
68 if self.binary:
69 return uuid.UUID(bytes=cast("bytes", value))
70 return uuid.UUID(hex=cast("str", value))
71
72 @staticmethod
73 def to_uuid(value: Any) -> uuid.UUID | None:
74 if isinstance(value, uuid.UUID) or value is None:
75 return value
76 try:
77 value = uuid.UUID(hex=value)
78 except (TypeError, ValueError):
79 value = uuid.UUID(bytes=value)
80 return cast("uuid.UUID | None", value)
81
82
83 class JSON(TypeDecorator):
84 """Platform-independent JSON type.
85
86 Uses JSONB type for postgres, BLOB for Oracle, otherwise uses the generic JSON data type.
87
88 JSON = _JSON().with_variant(PG_JSONB, "postgresql").with_variant(ORA_BLOB, "oracle")
89
90 """
91
92 impl = _JSON
93 cache_ok = True
94
95 @property
96 def python_type(self) -> type[dict]:
97 return dict
98
99 def __init__(self, *args: Any, **kwargs: Any) -> None:
100 """Initialize JSON type"""
101
102 def load_dialect_impl(self, dialect: Dialect) -> Any:
103 if dialect.name == "postgresql":
104 return dialect.type_descriptor(PG_JSONB()) # type: ignore
105 if dialect.name == "oracle":
106 return dialect.type_descriptor(ORA_BLOB())
107 return dialect.type_descriptor(_JSON())
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/contrib/sqlalchemy/types.py b/litestar/contrib/sqlalchemy/types.py
--- a/litestar/contrib/sqlalchemy/types.py
+++ b/litestar/contrib/sqlalchemy/types.py
@@ -4,11 +4,12 @@
from base64 import b64decode
from typing import TYPE_CHECKING, Any, cast
+from sqlalchemy import text, util
from sqlalchemy.dialects.oracle import BLOB as ORA_BLOB
from sqlalchemy.dialects.oracle import RAW as ORA_RAW
from sqlalchemy.dialects.postgresql import JSONB as PG_JSONB
from sqlalchemy.dialects.postgresql import UUID as PG_UUID
-from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, TypeDecorator
+from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, SchemaType, TypeDecorator
from sqlalchemy.types import JSON as _JSON
if TYPE_CHECKING:
@@ -80,7 +81,7 @@
return cast("uuid.UUID | None", value)
-class JSON(TypeDecorator):
+class JSON(TypeDecorator, SchemaType): # type: ignore
"""Platform-independent JSON type.
Uses JSONB type for postgres, BLOB for Oracle, otherwise uses the generic JSON data type.
@@ -98,6 +99,8 @@
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize JSON type"""
+ self.name = kwargs.pop("name", None)
+ self.oracle_strict = kwargs.pop("oracle_strict", True)
def load_dialect_impl(self, dialect: Dialect) -> Any:
if dialect.name == "postgresql":
@@ -105,3 +108,31 @@
if dialect.name == "oracle":
return dialect.type_descriptor(ORA_BLOB())
return dialect.type_descriptor(_JSON())
+
+ def _should_create_constraint(self, compiler: Any, **kw: Any) -> bool:
+ return bool(compiler.dialect.name == "oracle")
+
+ def _variant_mapping_for_set_table(self, column: Any) -> dict | None:
+ if column.type._variant_mapping:
+ variant_mapping = dict(column.type._variant_mapping)
+ variant_mapping["_default"] = column.type
+ else:
+ variant_mapping = None
+ return variant_mapping
+
+ @util.preload_module("sqlalchemy.sql.schema")
+ def _set_table(self, column: Any, table: Any) -> None:
+ schema = util.preloaded.sql_schema
+ variant_mapping = self._variant_mapping_for_set_table(column)
+ constraint_options = "(strict)" if self.oracle_strict else ""
+ sqltext = text(f"{column.name} is json {constraint_options}")
+ e = schema.CheckConstraint(
+ sqltext,
+ name=f"{column.name}_is_json",
+ _create_rule=util.portable_instancemethod( # type: ignore[no-untyped-call]
+ self._should_create_constraint,
+ {"variant_mapping": variant_mapping},
+ ),
+ _type_bound=True,
+ )
+ table.append_constraint(e)
| {"golden_diff": "diff --git a/litestar/contrib/sqlalchemy/types.py b/litestar/contrib/sqlalchemy/types.py\n--- a/litestar/contrib/sqlalchemy/types.py\n+++ b/litestar/contrib/sqlalchemy/types.py\n@@ -4,11 +4,12 @@\n from base64 import b64decode\n from typing import TYPE_CHECKING, Any, cast\n \n+from sqlalchemy import text, util\n from sqlalchemy.dialects.oracle import BLOB as ORA_BLOB\n from sqlalchemy.dialects.oracle import RAW as ORA_RAW\n from sqlalchemy.dialects.postgresql import JSONB as PG_JSONB\n from sqlalchemy.dialects.postgresql import UUID as PG_UUID\n-from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, TypeDecorator\n+from sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, SchemaType, TypeDecorator\n from sqlalchemy.types import JSON as _JSON\n \n if TYPE_CHECKING:\n@@ -80,7 +81,7 @@\n return cast(\"uuid.UUID | None\", value)\n \n \n-class JSON(TypeDecorator):\n+class JSON(TypeDecorator, SchemaType): # type: ignore\n \"\"\"Platform-independent JSON type.\n \n Uses JSONB type for postgres, BLOB for Oracle, otherwise uses the generic JSON data type.\n@@ -98,6 +99,8 @@\n \n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Initialize JSON type\"\"\"\n+ self.name = kwargs.pop(\"name\", None)\n+ self.oracle_strict = kwargs.pop(\"oracle_strict\", True)\n \n def load_dialect_impl(self, dialect: Dialect) -> Any:\n if dialect.name == \"postgresql\":\n@@ -105,3 +108,31 @@\n if dialect.name == \"oracle\":\n return dialect.type_descriptor(ORA_BLOB())\n return dialect.type_descriptor(_JSON())\n+\n+ def _should_create_constraint(self, compiler: Any, **kw: Any) -> bool:\n+ return bool(compiler.dialect.name == \"oracle\")\n+\n+ def _variant_mapping_for_set_table(self, column: Any) -> dict | None:\n+ if column.type._variant_mapping:\n+ variant_mapping = dict(column.type._variant_mapping)\n+ variant_mapping[\"_default\"] = column.type\n+ else:\n+ variant_mapping = None\n+ return variant_mapping\n+\n+ @util.preload_module(\"sqlalchemy.sql.schema\")\n+ def _set_table(self, column: Any, table: Any) -> None:\n+ schema = util.preloaded.sql_schema\n+ variant_mapping = self._variant_mapping_for_set_table(column)\n+ constraint_options = \"(strict)\" if self.oracle_strict else \"\"\n+ sqltext = text(f\"{column.name} is json {constraint_options}\")\n+ e = schema.CheckConstraint(\n+ sqltext,\n+ name=f\"{column.name}_is_json\",\n+ _create_rule=util.portable_instancemethod( # type: ignore[no-untyped-call]\n+ self._should_create_constraint,\n+ {\"variant_mapping\": variant_mapping},\n+ ),\n+ _type_bound=True,\n+ )\n+ table.append_constraint(e)\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport uuid\nfrom base64 import b64decode\nfrom typing import TYPE_CHECKING, Any, cast\n\nfrom sqlalchemy.dialects.oracle import BLOB as ORA_BLOB\nfrom sqlalchemy.dialects.oracle import RAW as ORA_RAW\nfrom sqlalchemy.dialects.postgresql import JSONB as PG_JSONB\nfrom sqlalchemy.dialects.postgresql import UUID as PG_UUID\nfrom sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, TypeDecorator\nfrom sqlalchemy.types import JSON as _JSON\n\nif TYPE_CHECKING:\n from sqlalchemy.engine import Dialect\n\nBigIntIdentity = BigInteger().with_variant(Integer, \"sqlite\")\n\n\nclass GUID(TypeDecorator):\n \"\"\"Platform-independent GUID type.\n\n Uses PostgreSQL's UUID type, Oracle's RAW(16) type, otherwise uses\n BINARY(16) or CHAR(32), storing as stringified hex values.\n\n Will accept stringified UUIDs as a hexstring or an actual UUID\n\n \"\"\"\n\n impl = BINARY(16)\n cache_ok = True\n\n @property\n def python_type(self) -> type[uuid.UUID]:\n return uuid.UUID\n\n def __init__(self, *args: Any, binary: bool = True, **kwargs: Any) -> None:\n self.binary = binary\n\n def load_dialect_impl(self, dialect: Dialect) -> Any:\n if dialect.name in {\"postgresql\", \"duckdb\"}:\n return dialect.type_descriptor(PG_UUID())\n if dialect.name == \"oracle\":\n return dialect.type_descriptor(ORA_RAW(16))\n if self.binary:\n return dialect.type_descriptor(BINARY(16))\n return dialect.type_descriptor(CHAR(32))\n\n def process_bind_param(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> bytes | str | None:\n if value is None:\n return value\n if dialect.name in {\"postgresql\", \"duckdb\"}:\n return str(value)\n value = self.to_uuid(value)\n if value is None:\n return value\n if dialect.name in {\"oracle\", \"spanner+spanner\"}:\n return value.bytes\n return value.bytes if self.binary else value.hex\n\n def process_result_value(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> uuid.UUID | None:\n if value is None:\n return value\n if isinstance(value, uuid.UUID):\n return value\n if dialect.name == \"spanner+spanner\":\n return uuid.UUID(bytes=b64decode(value))\n if self.binary:\n return uuid.UUID(bytes=cast(\"bytes\", value))\n return uuid.UUID(hex=cast(\"str\", value))\n\n @staticmethod\n def to_uuid(value: Any) -> uuid.UUID | None:\n if isinstance(value, uuid.UUID) or value is None:\n return value\n try:\n value = uuid.UUID(hex=value)\n except (TypeError, ValueError):\n value = uuid.UUID(bytes=value)\n return cast(\"uuid.UUID | None\", value)\n\n\nclass JSON(TypeDecorator):\n \"\"\"Platform-independent JSON type.\n\n Uses JSONB type for postgres, BLOB for Oracle, otherwise uses the generic JSON data type.\n\n JSON = _JSON().with_variant(PG_JSONB, \"postgresql\").with_variant(ORA_BLOB, \"oracle\")\n\n \"\"\"\n\n impl = _JSON\n cache_ok = True\n\n @property\n def python_type(self) -> type[dict]:\n return dict\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Initialize JSON type\"\"\"\n\n def load_dialect_impl(self, dialect: Dialect) -> Any:\n if dialect.name == \"postgresql\":\n return dialect.type_descriptor(PG_JSONB()) # type: ignore\n if dialect.name == \"oracle\":\n return dialect.type_descriptor(ORA_BLOB())\n return dialect.type_descriptor(_JSON())\n", "path": "litestar/contrib/sqlalchemy/types.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport uuid\nfrom base64 import b64decode\nfrom typing import TYPE_CHECKING, Any, cast\n\nfrom sqlalchemy import text, util\nfrom sqlalchemy.dialects.oracle import BLOB as ORA_BLOB\nfrom sqlalchemy.dialects.oracle import RAW as ORA_RAW\nfrom sqlalchemy.dialects.postgresql import JSONB as PG_JSONB\nfrom sqlalchemy.dialects.postgresql import UUID as PG_UUID\nfrom sqlalchemy.types import BINARY, CHAR, BigInteger, Integer, SchemaType, TypeDecorator\nfrom sqlalchemy.types import JSON as _JSON\n\nif TYPE_CHECKING:\n from sqlalchemy.engine import Dialect\n\nBigIntIdentity = BigInteger().with_variant(Integer, \"sqlite\")\n\n\nclass GUID(TypeDecorator):\n \"\"\"Platform-independent GUID type.\n\n Uses PostgreSQL's UUID type, Oracle's RAW(16) type, otherwise uses\n BINARY(16) or CHAR(32), storing as stringified hex values.\n\n Will accept stringified UUIDs as a hexstring or an actual UUID\n\n \"\"\"\n\n impl = BINARY(16)\n cache_ok = True\n\n @property\n def python_type(self) -> type[uuid.UUID]:\n return uuid.UUID\n\n def __init__(self, *args: Any, binary: bool = True, **kwargs: Any) -> None:\n self.binary = binary\n\n def load_dialect_impl(self, dialect: Dialect) -> Any:\n if dialect.name in {\"postgresql\", \"duckdb\"}:\n return dialect.type_descriptor(PG_UUID())\n if dialect.name == \"oracle\":\n return dialect.type_descriptor(ORA_RAW(16))\n if self.binary:\n return dialect.type_descriptor(BINARY(16))\n return dialect.type_descriptor(CHAR(32))\n\n def process_bind_param(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> bytes | str | None:\n if value is None:\n return value\n if dialect.name in {\"postgresql\", \"duckdb\"}:\n return str(value)\n value = self.to_uuid(value)\n if value is None:\n return value\n if dialect.name in {\"oracle\", \"spanner+spanner\"}:\n return value.bytes\n return value.bytes if self.binary else value.hex\n\n def process_result_value(self, value: bytes | str | uuid.UUID | None, dialect: Dialect) -> uuid.UUID | None:\n if value is None:\n return value\n if isinstance(value, uuid.UUID):\n return value\n if dialect.name == \"spanner+spanner\":\n return uuid.UUID(bytes=b64decode(value))\n if self.binary:\n return uuid.UUID(bytes=cast(\"bytes\", value))\n return uuid.UUID(hex=cast(\"str\", value))\n\n @staticmethod\n def to_uuid(value: Any) -> uuid.UUID | None:\n if isinstance(value, uuid.UUID) or value is None:\n return value\n try:\n value = uuid.UUID(hex=value)\n except (TypeError, ValueError):\n value = uuid.UUID(bytes=value)\n return cast(\"uuid.UUID | None\", value)\n\n\nclass JSON(TypeDecorator, SchemaType): # type: ignore\n \"\"\"Platform-independent JSON type.\n\n Uses JSONB type for postgres, BLOB for Oracle, otherwise uses the generic JSON data type.\n\n JSON = _JSON().with_variant(PG_JSONB, \"postgresql\").with_variant(ORA_BLOB, \"oracle\")\n\n \"\"\"\n\n impl = _JSON\n cache_ok = True\n\n @property\n def python_type(self) -> type[dict]:\n return dict\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Initialize JSON type\"\"\"\n self.name = kwargs.pop(\"name\", None)\n self.oracle_strict = kwargs.pop(\"oracle_strict\", True)\n\n def load_dialect_impl(self, dialect: Dialect) -> Any:\n if dialect.name == \"postgresql\":\n return dialect.type_descriptor(PG_JSONB()) # type: ignore\n if dialect.name == \"oracle\":\n return dialect.type_descriptor(ORA_BLOB())\n return dialect.type_descriptor(_JSON())\n\n def _should_create_constraint(self, compiler: Any, **kw: Any) -> bool:\n return bool(compiler.dialect.name == \"oracle\")\n\n def _variant_mapping_for_set_table(self, column: Any) -> dict | None:\n if column.type._variant_mapping:\n variant_mapping = dict(column.type._variant_mapping)\n variant_mapping[\"_default\"] = column.type\n else:\n variant_mapping = None\n return variant_mapping\n\n @util.preload_module(\"sqlalchemy.sql.schema\")\n def _set_table(self, column: Any, table: Any) -> None:\n schema = util.preloaded.sql_schema\n variant_mapping = self._variant_mapping_for_set_table(column)\n constraint_options = \"(strict)\" if self.oracle_strict else \"\"\n sqltext = text(f\"{column.name} is json {constraint_options}\")\n e = schema.CheckConstraint(\n sqltext,\n name=f\"{column.name}_is_json\",\n _create_rule=util.portable_instancemethod( # type: ignore[no-untyped-call]\n self._should_create_constraint,\n {\"variant_mapping\": variant_mapping},\n ),\n _type_bound=True,\n )\n table.append_constraint(e)\n", "path": "litestar/contrib/sqlalchemy/types.py"}]} | 1,496 | 681 |
gh_patches_debug_19016 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1401 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PS-1] RPC service `pull_embedding_vector` implementation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/ps/servicer.py`
Content:
```
1 import threading
2
3 from google.protobuf import empty_pb2
4
5 from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
6 from elasticdl.python.common.dtypes import dtype_numpy_to_tensor
7
8
9 class PserverServicer(elasticdl_pb2_grpc.PserverServicer):
10 """PS service implementation"""
11
12 def __init__(
13 self,
14 parameters,
15 grads_to_wait,
16 optimizer,
17 lr_staleness_modulation=False,
18 use_async=False,
19 ):
20 self._parameters = parameters
21 self._grads_to_wait = grads_to_wait
22 self._optimizer = optimizer
23 self._lr_staleness_modulation = lr_staleness_modulation
24 self._use_async = use_async
25 self._version = 0
26 self._lock = threading.Lock()
27
28 def pull_variable(self, request, _):
29 """
30 Response with all non-embedding parameters if initialized.
31 """
32 res = elasticdl_pb2.PullVariableResponse()
33 if not self._parameters.init_status:
34 res.model_init_status = False
35 return res
36
37 # Only sync-SGD needs lock
38 # TODO: use a read-write lock to support multiple concurrent reads
39 if not self._use_async:
40 self._lock.acquire()
41 res.model.version = self._parameters.version
42 for name, var in self._parameters.non_embedding_params.items():
43 tensor = res.model.param.add()
44 tensor.name = name
45 tensor.dim.extend(var.shape.as_list())
46 var_values = var.numpy()
47 tensor.content = var_values.tobytes()
48 tensor.dtype = dtype_numpy_to_tensor(var_values.dtype)
49 if not self._use_async:
50 self._lock.release()
51 res.model_init_status = True
52 return res
53
54 def pull_embedding_vector(self, request, _):
55 # TODO: implement this RPC service
56 return elasticdl_pb2.Tensor()
57
58 def push_model(self, request, _):
59 with self._lock:
60 self._parameters.init_from_model_pb(request)
61 return empty_pb2.Empty()
62
63 def push_gradient(self, request, _):
64 # TODO: implement this RPC service
65 return elasticdl_pb2.PushGradientResponse()
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py
--- a/elasticdl/python/ps/servicer.py
+++ b/elasticdl/python/ps/servicer.py
@@ -4,6 +4,7 @@
from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
from elasticdl.python.common.dtypes import dtype_numpy_to_tensor
+from elasticdl.python.common.tensor import Tensor, serialize_tensor
class PserverServicer(elasticdl_pb2_grpc.PserverServicer):
@@ -52,8 +53,15 @@
return res
def pull_embedding_vector(self, request, _):
- # TODO: implement this RPC service
- return elasticdl_pb2.Tensor()
+ ret = elasticdl_pb2.Tensor()
+ if not request.ids:
+ return ret
+ embedding_vectors = self._parameters.get_embedding_param(
+ request.name, request.ids
+ )
+ tensor = Tensor(values=embedding_vectors)
+ serialize_tensor(tensor, ret)
+ return ret
def push_model(self, request, _):
with self._lock:
| {"golden_diff": "diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py\n--- a/elasticdl/python/ps/servicer.py\n+++ b/elasticdl/python/ps/servicer.py\n@@ -4,6 +4,7 @@\n \n from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n from elasticdl.python.common.dtypes import dtype_numpy_to_tensor\n+from elasticdl.python.common.tensor import Tensor, serialize_tensor\n \n \n class PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n@@ -52,8 +53,15 @@\n return res\n \n def pull_embedding_vector(self, request, _):\n- # TODO: implement this RPC service\n- return elasticdl_pb2.Tensor()\n+ ret = elasticdl_pb2.Tensor()\n+ if not request.ids:\n+ return ret\n+ embedding_vectors = self._parameters.get_embedding_param(\n+ request.name, request.ids\n+ )\n+ tensor = Tensor(values=embedding_vectors)\n+ serialize_tensor(tensor, ret)\n+ return ret\n \n def push_model(self, request, _):\n with self._lock:\n", "issue": "[PS-1] RPC service `pull_embedding_vector` implementation\n\n", "before_files": [{"content": "import threading\n\nfrom google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\nfrom elasticdl.python.common.dtypes import dtype_numpy_to_tensor\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n self._lock = threading.Lock()\n\n def pull_variable(self, request, _):\n \"\"\"\n Response with all non-embedding parameters if initialized.\n \"\"\"\n res = elasticdl_pb2.PullVariableResponse()\n if not self._parameters.init_status:\n res.model_init_status = False\n return res\n\n # Only sync-SGD needs lock\n # TODO: use a read-write lock to support multiple concurrent reads\n if not self._use_async:\n self._lock.acquire()\n res.model.version = self._parameters.version\n for name, var in self._parameters.non_embedding_params.items():\n tensor = res.model.param.add()\n tensor.name = name\n tensor.dim.extend(var.shape.as_list())\n var_values = var.numpy()\n tensor.content = var_values.tobytes()\n tensor.dtype = dtype_numpy_to_tensor(var_values.dtype)\n if not self._use_async:\n self._lock.release()\n res.model_init_status = True\n return res\n\n def pull_embedding_vector(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.Tensor()\n\n def push_model(self, request, _):\n with self._lock:\n self._parameters.init_from_model_pb(request)\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PushGradientResponse()\n", "path": "elasticdl/python/ps/servicer.py"}], "after_files": [{"content": "import threading\n\nfrom google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\nfrom elasticdl.python.common.dtypes import dtype_numpy_to_tensor\nfrom elasticdl.python.common.tensor import Tensor, serialize_tensor\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n self._lock = threading.Lock()\n\n def pull_variable(self, request, _):\n \"\"\"\n Response with all non-embedding parameters if initialized.\n \"\"\"\n res = elasticdl_pb2.PullVariableResponse()\n if not self._parameters.init_status:\n res.model_init_status = False\n return res\n\n # Only sync-SGD needs lock\n # TODO: use a read-write lock to support multiple concurrent reads\n if not self._use_async:\n self._lock.acquire()\n res.model.version = self._parameters.version\n for name, var in self._parameters.non_embedding_params.items():\n tensor = res.model.param.add()\n tensor.name = name\n tensor.dim.extend(var.shape.as_list())\n var_values = var.numpy()\n tensor.content = var_values.tobytes()\n tensor.dtype = dtype_numpy_to_tensor(var_values.dtype)\n if not self._use_async:\n self._lock.release()\n res.model_init_status = True\n return res\n\n def pull_embedding_vector(self, request, _):\n ret = elasticdl_pb2.Tensor()\n if not request.ids:\n return ret\n embedding_vectors = self._parameters.get_embedding_param(\n request.name, request.ids\n )\n tensor = Tensor(values=embedding_vectors)\n serialize_tensor(tensor, ret)\n return ret\n\n def push_model(self, request, _):\n with self._lock:\n self._parameters.init_from_model_pb(request)\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PushGradientResponse()\n", "path": "elasticdl/python/ps/servicer.py"}]} | 864 | 255 |
gh_patches_debug_37115 | rasdani/github-patches | git_diff | SCons__scons-3697 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CompilationDatabase tool emits a field called `target` which should be called `output`
**Describe the bug**
With the new integrated SCons compilation database support, there is a field called `target` in the resulting file:
```
"target": "build/cached/third_party/s2/base/int128.dyn.o"
```
Per the compilation database documentation, it seems this field should instead be called `output`:
https://clang.llvm.org/docs/JSONCompilationDatabase.html#format
**Required information**
* Link to SCons Users thread discussing your issue.
Discussed directly with @bdbaddog
* Version of SCons
master
* Version of Python
3.7
* Which python distribution if applicable (python.org, cygwin, anaconda, macports, brew,etc)
* How you installed SCons
Using it from source
* What Platform are you on? (Linux/Windows and which version)
macOS, but it shouldn't matter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `SCons/Tool/compilation_db.py`
Content:
```
1 """
2 Implements the ability for SCons to emit a compilation database for the MongoDB project. See
3 http://clang.llvm.org/docs/JSONCompilationDatabase.html for details on what a compilation
4 database is, and why you might want one. The only user visible entry point here is
5 'env.CompilationDatabase'. This method takes an optional 'target' to name the file that
6 should hold the compilation database, otherwise, the file defaults to compile_commands.json,
7 which is the name that most clang tools search for by default.
8 """
9
10 # Copyright 2020 MongoDB Inc.
11 #
12 # Permission is hereby granted, free of charge, to any person obtaining
13 # a copy of this software and associated documentation files (the
14 # "Software"), to deal in the Software without restriction, including
15 # without limitation the rights to use, copy, modify, merge, publish,
16 # distribute, sublicense, and/or sell copies of the Software, and to
17 # permit persons to whom the Software is furnished to do so, subject to
18 # the following conditions:
19 #
20 # The above copyright notice and this permission notice shall be included
21 # in all copies or substantial portions of the Software.
22 #
23 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
24 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
25 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
27 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
28 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
29 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #
31
32 import json
33 import itertools
34 import SCons
35
36 from .cxx import CXXSuffixes
37 from .cc import CSuffixes
38 from .asm import ASSuffixes, ASPPSuffixes
39
40 # TODO: Is there a better way to do this than this global? Right now this exists so that the
41 # emitter we add can record all of the things it emits, so that the scanner for the top level
42 # compilation database can access the complete list, and also so that the writer has easy
43 # access to write all of the files. But it seems clunky. How can the emitter and the scanner
44 # communicate more gracefully?
45 __COMPILATION_DB_ENTRIES = []
46
47
48 # We make no effort to avoid rebuilding the entries. Someday, perhaps we could and even
49 # integrate with the cache, but there doesn't seem to be much call for it.
50 class __CompilationDbNode(SCons.Node.Python.Value):
51 def __init__(self, value):
52 SCons.Node.Python.Value.__init__(self, value)
53 self.Decider(changed_since_last_build_node)
54
55
56 def changed_since_last_build_node(child, target, prev_ni, node):
57 """ Dummy decider to force always building"""
58 return True
59
60
61 def make_emit_compilation_DB_entry(comstr):
62 """
63 Effectively this creates a lambda function to capture:
64 * command line
65 * source
66 * target
67 :param comstr: unevaluated command line
68 :return: an emitter which has captured the above
69 """
70 user_action = SCons.Action.Action(comstr)
71
72 def emit_compilation_db_entry(target, source, env):
73 """
74 This emitter will be added to each c/c++ object build to capture the info needed
75 for clang tools
76 :param target: target node(s)
77 :param source: source node(s)
78 :param env: Environment for use building this node
79 :return: target(s), source(s)
80 """
81
82 dbtarget = __CompilationDbNode(source)
83
84 entry = env.__COMPILATIONDB_Entry(
85 target=dbtarget,
86 source=[],
87 __COMPILATIONDB_UTARGET=target,
88 __COMPILATIONDB_USOURCE=source,
89 __COMPILATIONDB_UACTION=user_action,
90 __COMPILATIONDB_ENV=env,
91 )
92
93 # TODO: Technically, these next two lines should not be required: it should be fine to
94 # cache the entries. However, they don't seem to update properly. Since they are quick
95 # to re-generate disable caching and sidestep this problem.
96 env.AlwaysBuild(entry)
97 env.NoCache(entry)
98
99 __COMPILATION_DB_ENTRIES.append(dbtarget)
100
101 return target, source
102
103 return emit_compilation_db_entry
104
105
106 def compilation_db_entry_action(target, source, env, **kw):
107 """
108 Create a dictionary with evaluated command line, target, source
109 and store that info as an attribute on the target
110 (Which has been stored in __COMPILATION_DB_ENTRIES array
111 :param target: target node(s)
112 :param source: source node(s)
113 :param env: Environment for use building this node
114 :param kw:
115 :return: None
116 """
117
118 command = env["__COMPILATIONDB_UACTION"].strfunction(
119 target=env["__COMPILATIONDB_UTARGET"],
120 source=env["__COMPILATIONDB_USOURCE"],
121 env=env["__COMPILATIONDB_ENV"],
122 )
123
124 entry = {
125 "directory": env.Dir("#").abspath,
126 "command": command,
127 "file": env["__COMPILATIONDB_USOURCE"][0],
128 "target": env['__COMPILATIONDB_UTARGET'][0]
129 }
130
131 target[0].write(entry)
132
133
134 def write_compilation_db(target, source, env):
135 entries = []
136
137 use_abspath = env['COMPILATIONDB_USE_ABSPATH'] in [True, 1, 'True', 'true']
138
139 for s in __COMPILATION_DB_ENTRIES:
140 entry = s.read()
141 source_file = entry['file']
142 target_file = entry['target']
143
144 if use_abspath:
145 source_file = source_file.abspath
146 target_file = target_file.abspath
147 else:
148 source_file = source_file.path
149 target_file = target_file.path
150
151 path_entry = {'directory': entry['directory'],
152 'command': entry['command'],
153 'file': source_file,
154 'target': target_file}
155
156 entries.append(path_entry)
157
158 with open(target[0].path, "w") as target_file:
159 json.dump(
160 entries, target_file, sort_keys=True, indent=4, separators=(",", ": ")
161 )
162
163
164 def scan_compilation_db(node, env, path):
165 return __COMPILATION_DB_ENTRIES
166
167
168 def compilation_db_emitter(target, source, env):
169 """ fix up the source/targets """
170
171 # Someone called env.CompilationDatabase('my_targetname.json')
172 if not target and len(source) == 1:
173 target = source
174
175 # Default target name is compilation_db.json
176 if not target:
177 target = ['compile_commands.json', ]
178
179 # No source should have been passed. Drop it.
180 if source:
181 source = []
182
183 return target, source
184
185
186 def generate(env, **kwargs):
187 static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
188
189 env["COMPILATIONDB_COMSTR"] = kwargs.get(
190 "COMPILATIONDB_COMSTR", "Building compilation database $TARGET"
191 )
192
193 components_by_suffix = itertools.chain(
194 itertools.product(
195 CSuffixes,
196 [
197 (static_obj, SCons.Defaults.StaticObjectEmitter, "$CCCOM"),
198 (shared_obj, SCons.Defaults.SharedObjectEmitter, "$SHCCCOM"),
199 ],
200 ),
201 itertools.product(
202 CXXSuffixes,
203 [
204 (static_obj, SCons.Defaults.StaticObjectEmitter, "$CXXCOM"),
205 (shared_obj, SCons.Defaults.SharedObjectEmitter, "$SHCXXCOM"),
206 ],
207 ),
208 itertools.product(
209 ASSuffixes,
210 [(static_obj, SCons.Defaults.StaticObjectEmitter, "$ASCOM")],
211 [(shared_obj, SCons.Defaults.SharedObjectEmitter, "$ASCOM")],
212 ),
213 itertools.product(
214 ASPPSuffixes,
215 [(static_obj, SCons.Defaults.StaticObjectEmitter, "$ASPPCOM")],
216 [(shared_obj, SCons.Defaults.SharedObjectEmitter, "$ASPPCOM")],
217 ),
218 )
219
220 for entry in components_by_suffix:
221 suffix = entry[0]
222 builder, base_emitter, command = entry[1]
223
224 # Assumes a dictionary emitter
225 emitter = builder.emitter.get(suffix, False)
226 if emitter:
227 # We may not have tools installed which initialize all or any of
228 # cxx, cc, or assembly. If not skip resetting the respective emitter.
229 builder.emitter[suffix] = SCons.Builder.ListEmitter(
230 [emitter, make_emit_compilation_DB_entry(command), ]
231 )
232
233 env["BUILDERS"]["__COMPILATIONDB_Entry"] = SCons.Builder.Builder(
234 action=SCons.Action.Action(compilation_db_entry_action, None),
235 )
236
237 env["BUILDERS"]["CompilationDatabase"] = SCons.Builder.Builder(
238 action=SCons.Action.Action(write_compilation_db, "$COMPILATIONDB_COMSTR"),
239 target_scanner=SCons.Scanner.Scanner(
240 function=scan_compilation_db, node_class=None
241 ),
242 emitter=compilation_db_emitter,
243 suffix='json',
244 )
245
246 env['COMPILATIONDB_USE_ABSPATH'] = False
247
248
249 def exists(env):
250 return True
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/SCons/Tool/compilation_db.py b/SCons/Tool/compilation_db.py
--- a/SCons/Tool/compilation_db.py
+++ b/SCons/Tool/compilation_db.py
@@ -84,7 +84,7 @@
entry = env.__COMPILATIONDB_Entry(
target=dbtarget,
source=[],
- __COMPILATIONDB_UTARGET=target,
+ __COMPILATIONDB_UOUTPUT=target,
__COMPILATIONDB_USOURCE=source,
__COMPILATIONDB_UACTION=user_action,
__COMPILATIONDB_ENV=env,
@@ -116,7 +116,7 @@
"""
command = env["__COMPILATIONDB_UACTION"].strfunction(
- target=env["__COMPILATIONDB_UTARGET"],
+ target=env["__COMPILATIONDB_UOUTPUT"],
source=env["__COMPILATIONDB_USOURCE"],
env=env["__COMPILATIONDB_ENV"],
)
@@ -125,7 +125,7 @@
"directory": env.Dir("#").abspath,
"command": command,
"file": env["__COMPILATIONDB_USOURCE"][0],
- "target": env['__COMPILATIONDB_UTARGET'][0]
+ "output": env['__COMPILATIONDB_UOUTPUT'][0]
}
target[0].write(entry)
@@ -139,25 +139,25 @@
for s in __COMPILATION_DB_ENTRIES:
entry = s.read()
source_file = entry['file']
- target_file = entry['target']
+ output_file = entry['output']
if use_abspath:
source_file = source_file.abspath
- target_file = target_file.abspath
+ output_file = output_file.abspath
else:
source_file = source_file.path
- target_file = target_file.path
+ output_file = output_file.path
path_entry = {'directory': entry['directory'],
'command': entry['command'],
'file': source_file,
- 'target': target_file}
+ 'output': output_file}
entries.append(path_entry)
- with open(target[0].path, "w") as target_file:
+ with open(target[0].path, "w") as output_file:
json.dump(
- entries, target_file, sort_keys=True, indent=4, separators=(",", ": ")
+ entries, output_file, sort_keys=True, indent=4, separators=(",", ": ")
)
| {"golden_diff": "diff --git a/SCons/Tool/compilation_db.py b/SCons/Tool/compilation_db.py\n--- a/SCons/Tool/compilation_db.py\n+++ b/SCons/Tool/compilation_db.py\n@@ -84,7 +84,7 @@\n entry = env.__COMPILATIONDB_Entry(\n target=dbtarget,\n source=[],\n- __COMPILATIONDB_UTARGET=target,\n+ __COMPILATIONDB_UOUTPUT=target,\n __COMPILATIONDB_USOURCE=source,\n __COMPILATIONDB_UACTION=user_action,\n __COMPILATIONDB_ENV=env,\n@@ -116,7 +116,7 @@\n \"\"\"\n \n command = env[\"__COMPILATIONDB_UACTION\"].strfunction(\n- target=env[\"__COMPILATIONDB_UTARGET\"],\n+ target=env[\"__COMPILATIONDB_UOUTPUT\"],\n source=env[\"__COMPILATIONDB_USOURCE\"],\n env=env[\"__COMPILATIONDB_ENV\"],\n )\n@@ -125,7 +125,7 @@\n \"directory\": env.Dir(\"#\").abspath,\n \"command\": command,\n \"file\": env[\"__COMPILATIONDB_USOURCE\"][0],\n- \"target\": env['__COMPILATIONDB_UTARGET'][0]\n+ \"output\": env['__COMPILATIONDB_UOUTPUT'][0]\n }\n \n target[0].write(entry)\n@@ -139,25 +139,25 @@\n for s in __COMPILATION_DB_ENTRIES:\n entry = s.read()\n source_file = entry['file']\n- target_file = entry['target']\n+ output_file = entry['output']\n \n if use_abspath:\n source_file = source_file.abspath\n- target_file = target_file.abspath\n+ output_file = output_file.abspath\n else:\n source_file = source_file.path\n- target_file = target_file.path\n+ output_file = output_file.path\n \n path_entry = {'directory': entry['directory'],\n 'command': entry['command'],\n 'file': source_file,\n- 'target': target_file}\n+ 'output': output_file}\n \n entries.append(path_entry)\n \n- with open(target[0].path, \"w\") as target_file:\n+ with open(target[0].path, \"w\") as output_file:\n json.dump(\n- entries, target_file, sort_keys=True, indent=4, separators=(\",\", \": \")\n+ entries, output_file, sort_keys=True, indent=4, separators=(\",\", \": \")\n )\n", "issue": "CompilationDatabase tool emits a field called `target` which should be called `output`\n**Describe the bug**\r\nWith the new integrated SCons compilation database support, there is a field called `target` in the resulting file:\r\n\r\n```\r\n\"target\": \"build/cached/third_party/s2/base/int128.dyn.o\"\r\n```\r\n\r\nPer the compilation database documentation, it seems this field should instead be called `output`:\r\n\r\nhttps://clang.llvm.org/docs/JSONCompilationDatabase.html#format\r\n\r\n\r\n**Required information**\r\n* Link to SCons Users thread discussing your issue.\r\nDiscussed directly with @bdbaddog \r\n\r\n* Version of SCons\r\nmaster\r\n\r\n* Version of Python\r\n3.7\r\n\r\n* Which python distribution if applicable (python.org, cygwin, anaconda, macports, brew,etc)\r\n* How you installed SCons\r\nUsing it from source\r\n\r\n* What Platform are you on? (Linux/Windows and which version)\r\nmacOS, but it shouldn't matter.\r\n\n", "before_files": [{"content": "\"\"\"\nImplements the ability for SCons to emit a compilation database for the MongoDB project. See\nhttp://clang.llvm.org/docs/JSONCompilationDatabase.html for details on what a compilation\ndatabase is, and why you might want one. The only user visible entry point here is\n'env.CompilationDatabase'. This method takes an optional 'target' to name the file that\nshould hold the compilation database, otherwise, the file defaults to compile_commands.json,\nwhich is the name that most clang tools search for by default.\n\"\"\"\n\n# Copyright 2020 MongoDB Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\nimport json\nimport itertools\nimport SCons\n\nfrom .cxx import CXXSuffixes\nfrom .cc import CSuffixes\nfrom .asm import ASSuffixes, ASPPSuffixes\n\n# TODO: Is there a better way to do this than this global? Right now this exists so that the\n# emitter we add can record all of the things it emits, so that the scanner for the top level\n# compilation database can access the complete list, and also so that the writer has easy\n# access to write all of the files. But it seems clunky. How can the emitter and the scanner\n# communicate more gracefully?\n__COMPILATION_DB_ENTRIES = []\n\n\n# We make no effort to avoid rebuilding the entries. Someday, perhaps we could and even\n# integrate with the cache, but there doesn't seem to be much call for it.\nclass __CompilationDbNode(SCons.Node.Python.Value):\n def __init__(self, value):\n SCons.Node.Python.Value.__init__(self, value)\n self.Decider(changed_since_last_build_node)\n\n\ndef changed_since_last_build_node(child, target, prev_ni, node):\n \"\"\" Dummy decider to force always building\"\"\"\n return True\n\n\ndef make_emit_compilation_DB_entry(comstr):\n \"\"\"\n Effectively this creates a lambda function to capture:\n * command line\n * source\n * target\n :param comstr: unevaluated command line\n :return: an emitter which has captured the above\n \"\"\"\n user_action = SCons.Action.Action(comstr)\n\n def emit_compilation_db_entry(target, source, env):\n \"\"\"\n This emitter will be added to each c/c++ object build to capture the info needed\n for clang tools\n :param target: target node(s)\n :param source: source node(s)\n :param env: Environment for use building this node\n :return: target(s), source(s)\n \"\"\"\n\n dbtarget = __CompilationDbNode(source)\n\n entry = env.__COMPILATIONDB_Entry(\n target=dbtarget,\n source=[],\n __COMPILATIONDB_UTARGET=target,\n __COMPILATIONDB_USOURCE=source,\n __COMPILATIONDB_UACTION=user_action,\n __COMPILATIONDB_ENV=env,\n )\n\n # TODO: Technically, these next two lines should not be required: it should be fine to\n # cache the entries. However, they don't seem to update properly. Since they are quick\n # to re-generate disable caching and sidestep this problem.\n env.AlwaysBuild(entry)\n env.NoCache(entry)\n\n __COMPILATION_DB_ENTRIES.append(dbtarget)\n\n return target, source\n\n return emit_compilation_db_entry\n\n\ndef compilation_db_entry_action(target, source, env, **kw):\n \"\"\"\n Create a dictionary with evaluated command line, target, source\n and store that info as an attribute on the target\n (Which has been stored in __COMPILATION_DB_ENTRIES array\n :param target: target node(s)\n :param source: source node(s)\n :param env: Environment for use building this node\n :param kw:\n :return: None\n \"\"\"\n\n command = env[\"__COMPILATIONDB_UACTION\"].strfunction(\n target=env[\"__COMPILATIONDB_UTARGET\"],\n source=env[\"__COMPILATIONDB_USOURCE\"],\n env=env[\"__COMPILATIONDB_ENV\"],\n )\n\n entry = {\n \"directory\": env.Dir(\"#\").abspath,\n \"command\": command,\n \"file\": env[\"__COMPILATIONDB_USOURCE\"][0],\n \"target\": env['__COMPILATIONDB_UTARGET'][0]\n }\n\n target[0].write(entry)\n\n\ndef write_compilation_db(target, source, env):\n entries = []\n\n use_abspath = env['COMPILATIONDB_USE_ABSPATH'] in [True, 1, 'True', 'true']\n\n for s in __COMPILATION_DB_ENTRIES:\n entry = s.read()\n source_file = entry['file']\n target_file = entry['target']\n\n if use_abspath:\n source_file = source_file.abspath\n target_file = target_file.abspath\n else:\n source_file = source_file.path\n target_file = target_file.path\n\n path_entry = {'directory': entry['directory'],\n 'command': entry['command'],\n 'file': source_file,\n 'target': target_file}\n\n entries.append(path_entry)\n\n with open(target[0].path, \"w\") as target_file:\n json.dump(\n entries, target_file, sort_keys=True, indent=4, separators=(\",\", \": \")\n )\n\n\ndef scan_compilation_db(node, env, path):\n return __COMPILATION_DB_ENTRIES\n\n\ndef compilation_db_emitter(target, source, env):\n \"\"\" fix up the source/targets \"\"\"\n\n # Someone called env.CompilationDatabase('my_targetname.json')\n if not target and len(source) == 1:\n target = source\n\n # Default target name is compilation_db.json\n if not target:\n target = ['compile_commands.json', ]\n\n # No source should have been passed. Drop it.\n if source:\n source = []\n\n return target, source\n\n\ndef generate(env, **kwargs):\n static_obj, shared_obj = SCons.Tool.createObjBuilders(env)\n\n env[\"COMPILATIONDB_COMSTR\"] = kwargs.get(\n \"COMPILATIONDB_COMSTR\", \"Building compilation database $TARGET\"\n )\n\n components_by_suffix = itertools.chain(\n itertools.product(\n CSuffixes,\n [\n (static_obj, SCons.Defaults.StaticObjectEmitter, \"$CCCOM\"),\n (shared_obj, SCons.Defaults.SharedObjectEmitter, \"$SHCCCOM\"),\n ],\n ),\n itertools.product(\n CXXSuffixes,\n [\n (static_obj, SCons.Defaults.StaticObjectEmitter, \"$CXXCOM\"),\n (shared_obj, SCons.Defaults.SharedObjectEmitter, \"$SHCXXCOM\"),\n ],\n ),\n itertools.product(\n ASSuffixes,\n [(static_obj, SCons.Defaults.StaticObjectEmitter, \"$ASCOM\")],\n [(shared_obj, SCons.Defaults.SharedObjectEmitter, \"$ASCOM\")],\n ),\n itertools.product(\n ASPPSuffixes,\n [(static_obj, SCons.Defaults.StaticObjectEmitter, \"$ASPPCOM\")],\n [(shared_obj, SCons.Defaults.SharedObjectEmitter, \"$ASPPCOM\")],\n ),\n )\n\n for entry in components_by_suffix:\n suffix = entry[0]\n builder, base_emitter, command = entry[1]\n\n # Assumes a dictionary emitter\n emitter = builder.emitter.get(suffix, False)\n if emitter:\n # We may not have tools installed which initialize all or any of\n # cxx, cc, or assembly. If not skip resetting the respective emitter.\n builder.emitter[suffix] = SCons.Builder.ListEmitter(\n [emitter, make_emit_compilation_DB_entry(command), ]\n )\n\n env[\"BUILDERS\"][\"__COMPILATIONDB_Entry\"] = SCons.Builder.Builder(\n action=SCons.Action.Action(compilation_db_entry_action, None),\n )\n\n env[\"BUILDERS\"][\"CompilationDatabase\"] = SCons.Builder.Builder(\n action=SCons.Action.Action(write_compilation_db, \"$COMPILATIONDB_COMSTR\"),\n target_scanner=SCons.Scanner.Scanner(\n function=scan_compilation_db, node_class=None\n ),\n emitter=compilation_db_emitter,\n suffix='json',\n )\n\n env['COMPILATIONDB_USE_ABSPATH'] = False\n\n\ndef exists(env):\n return True\n", "path": "SCons/Tool/compilation_db.py"}], "after_files": [{"content": "\"\"\"\nImplements the ability for SCons to emit a compilation database for the MongoDB project. See\nhttp://clang.llvm.org/docs/JSONCompilationDatabase.html for details on what a compilation\ndatabase is, and why you might want one. The only user visible entry point here is\n'env.CompilationDatabase'. This method takes an optional 'target' to name the file that\nshould hold the compilation database, otherwise, the file defaults to compile_commands.json,\nwhich is the name that most clang tools search for by default.\n\"\"\"\n\n# Copyright 2020 MongoDB Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\nimport json\nimport itertools\nimport SCons\n\nfrom .cxx import CXXSuffixes\nfrom .cc import CSuffixes\nfrom .asm import ASSuffixes, ASPPSuffixes\n\n# TODO: Is there a better way to do this than this global? Right now this exists so that the\n# emitter we add can record all of the things it emits, so that the scanner for the top level\n# compilation database can access the complete list, and also so that the writer has easy\n# access to write all of the files. But it seems clunky. How can the emitter and the scanner\n# communicate more gracefully?\n__COMPILATION_DB_ENTRIES = []\n\n\n# We make no effort to avoid rebuilding the entries. Someday, perhaps we could and even\n# integrate with the cache, but there doesn't seem to be much call for it.\nclass __CompilationDbNode(SCons.Node.Python.Value):\n def __init__(self, value):\n SCons.Node.Python.Value.__init__(self, value)\n self.Decider(changed_since_last_build_node)\n\n\ndef changed_since_last_build_node(child, target, prev_ni, node):\n \"\"\" Dummy decider to force always building\"\"\"\n return True\n\n\ndef make_emit_compilation_DB_entry(comstr):\n \"\"\"\n Effectively this creates a lambda function to capture:\n * command line\n * source\n * target\n :param comstr: unevaluated command line\n :return: an emitter which has captured the above\n \"\"\"\n user_action = SCons.Action.Action(comstr)\n\n def emit_compilation_db_entry(target, source, env):\n \"\"\"\n This emitter will be added to each c/c++ object build to capture the info needed\n for clang tools\n :param target: target node(s)\n :param source: source node(s)\n :param env: Environment for use building this node\n :return: target(s), source(s)\n \"\"\"\n\n dbtarget = __CompilationDbNode(source)\n\n entry = env.__COMPILATIONDB_Entry(\n target=dbtarget,\n source=[],\n __COMPILATIONDB_UOUTPUT=target,\n __COMPILATIONDB_USOURCE=source,\n __COMPILATIONDB_UACTION=user_action,\n __COMPILATIONDB_ENV=env,\n )\n\n # TODO: Technically, these next two lines should not be required: it should be fine to\n # cache the entries. However, they don't seem to update properly. Since they are quick\n # to re-generate disable caching and sidestep this problem.\n env.AlwaysBuild(entry)\n env.NoCache(entry)\n\n __COMPILATION_DB_ENTRIES.append(dbtarget)\n\n return target, source\n\n return emit_compilation_db_entry\n\n\ndef compilation_db_entry_action(target, source, env, **kw):\n \"\"\"\n Create a dictionary with evaluated command line, target, source\n and store that info as an attribute on the target\n (Which has been stored in __COMPILATION_DB_ENTRIES array\n :param target: target node(s)\n :param source: source node(s)\n :param env: Environment for use building this node\n :param kw:\n :return: None\n \"\"\"\n\n command = env[\"__COMPILATIONDB_UACTION\"].strfunction(\n target=env[\"__COMPILATIONDB_UOUTPUT\"],\n source=env[\"__COMPILATIONDB_USOURCE\"],\n env=env[\"__COMPILATIONDB_ENV\"],\n )\n\n entry = {\n \"directory\": env.Dir(\"#\").abspath,\n \"command\": command,\n \"file\": env[\"__COMPILATIONDB_USOURCE\"][0],\n \"output\": env['__COMPILATIONDB_UOUTPUT'][0]\n }\n\n target[0].write(entry)\n\n\ndef write_compilation_db(target, source, env):\n entries = []\n\n use_abspath = env['COMPILATIONDB_USE_ABSPATH'] in [True, 1, 'True', 'true']\n\n for s in __COMPILATION_DB_ENTRIES:\n entry = s.read()\n source_file = entry['file']\n output_file = entry['output']\n\n if use_abspath:\n source_file = source_file.abspath\n output_file = output_file.abspath\n else:\n source_file = source_file.path\n output_file = output_file.path\n\n path_entry = {'directory': entry['directory'],\n 'command': entry['command'],\n 'file': source_file,\n 'output': output_file}\n\n entries.append(path_entry)\n\n with open(target[0].path, \"w\") as output_file:\n json.dump(\n entries, output_file, sort_keys=True, indent=4, separators=(\",\", \": \")\n )\n\n\ndef scan_compilation_db(node, env, path):\n return __COMPILATION_DB_ENTRIES\n\n\ndef compilation_db_emitter(target, source, env):\n \"\"\" fix up the source/targets \"\"\"\n\n # Someone called env.CompilationDatabase('my_targetname.json')\n if not target and len(source) == 1:\n target = source\n\n # Default target name is compilation_db.json\n if not target:\n target = ['compile_commands.json', ]\n\n # No source should have been passed. Drop it.\n if source:\n source = []\n\n return target, source\n\n\ndef generate(env, **kwargs):\n static_obj, shared_obj = SCons.Tool.createObjBuilders(env)\n\n env[\"COMPILATIONDB_COMSTR\"] = kwargs.get(\n \"COMPILATIONDB_COMSTR\", \"Building compilation database $TARGET\"\n )\n\n components_by_suffix = itertools.chain(\n itertools.product(\n CSuffixes,\n [\n (static_obj, SCons.Defaults.StaticObjectEmitter, \"$CCCOM\"),\n (shared_obj, SCons.Defaults.SharedObjectEmitter, \"$SHCCCOM\"),\n ],\n ),\n itertools.product(\n CXXSuffixes,\n [\n (static_obj, SCons.Defaults.StaticObjectEmitter, \"$CXXCOM\"),\n (shared_obj, SCons.Defaults.SharedObjectEmitter, \"$SHCXXCOM\"),\n ],\n ),\n itertools.product(\n ASSuffixes,\n [(static_obj, SCons.Defaults.StaticObjectEmitter, \"$ASCOM\")],\n [(shared_obj, SCons.Defaults.SharedObjectEmitter, \"$ASCOM\")],\n ),\n itertools.product(\n ASPPSuffixes,\n [(static_obj, SCons.Defaults.StaticObjectEmitter, \"$ASPPCOM\")],\n [(shared_obj, SCons.Defaults.SharedObjectEmitter, \"$ASPPCOM\")],\n ),\n )\n\n for entry in components_by_suffix:\n suffix = entry[0]\n builder, base_emitter, command = entry[1]\n\n # Assumes a dictionary emitter\n emitter = builder.emitter.get(suffix, False)\n if emitter:\n # We may not have tools installed which initialize all or any of\n # cxx, cc, or assembly. If not skip resetting the respective emitter.\n builder.emitter[suffix] = SCons.Builder.ListEmitter(\n [emitter, make_emit_compilation_DB_entry(command), ]\n )\n\n env[\"BUILDERS\"][\"__COMPILATIONDB_Entry\"] = SCons.Builder.Builder(\n action=SCons.Action.Action(compilation_db_entry_action, None),\n )\n\n env[\"BUILDERS\"][\"CompilationDatabase\"] = SCons.Builder.Builder(\n action=SCons.Action.Action(write_compilation_db, \"$COMPILATIONDB_COMSTR\"),\n target_scanner=SCons.Scanner.Scanner(\n function=scan_compilation_db, node_class=None\n ),\n emitter=compilation_db_emitter,\n suffix='json',\n )\n\n env['COMPILATIONDB_USE_ABSPATH'] = False\n\n\ndef exists(env):\n return True\n", "path": "SCons/Tool/compilation_db.py"}]} | 3,123 | 563 |
gh_patches_debug_40818 | rasdani/github-patches | git_diff | UTNkar__moore-195 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use person number for appoint->overturn field
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->
### Prerequisites
* [X] Put an X between the brackets on this line if you have done all of the
following:
* Reproduced the problem with clear cache.
* (If running the application locally:) Made sure your running the newest version on the development branch
* Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues
### Description
Currently the `overturn` field within the `appoint` view of `Position` has a overturn field that works with usernames. The username(s) entered will be appointed to the position, no matter if they applied or not.
_Problem_: Within the old system a similar feature was available, but it was by person number. According to the management team this is preferable.
### Steps to Reproduce
1. Create new `Position` with past deadline
2. Click `appoint` in the overview
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/involvement/forms.py`
Content:
```
1 from django import forms
2 from django.contrib.auth import get_user_model
3 from django.utils.translation import ugettext_lazy as _
4
5 from involvement.models import Application, Reference
6 from utils.forms import AdvancedModelMultipleChoiceField
7
8
9 class ApplicationForm(forms.ModelForm):
10 class Meta:
11 model = Application
12 exclude = ['position', 'applicant']
13 widgets = {
14 'cover_letter': forms.Textarea(attrs={'style': 'height: 200px',
15 'class': 'form-control'}),
16 'qualifications': forms.Textarea(attrs={'style': 'height: 200px',
17 'class': 'form-control'}),
18 }
19
20 def clean_status(self):
21 status = self.cleaned_data['status']
22 if status not in ['draft', 'submitted'] \
23 or (self.initial['status'] == 'submitted'
24 and status == 'draft'):
25 raise forms.ValidationError(_('The submitted status was invalid.'))
26 return status
27
28
29 ReferenceFormSet = forms.inlineformset_factory(
30 Application,
31 Reference,
32 fields=('name', 'position', 'email', 'phone_number', 'comment'),
33 widgets={
34 'name': forms.TextInput(attrs={'class': 'form-control'}),
35 'position': forms.TextInput(attrs={'class': 'form-control'}),
36 'email': forms.TextInput(attrs={'class': 'form-control'}),
37 'phone_number': forms.TextInput(attrs={'class': 'form-control'}),
38 'comment': forms.TextInput(attrs={'class': 'form-control'}),
39 },
40 extra=0,
41 )
42
43
44 class ApprovalForm(forms.ModelForm):
45 status = forms.ChoiceField(
46 choices=(
47 ('submitted', '---------'),
48 ('approved', _('Approved')),
49 ('disapproved', _('Disapproved')),
50 ),
51 )
52
53 class Meta:
54 model = Application
55 fields = []
56
57 def clean_status(self):
58 status = self.cleaned_data['status']
59 if status not in ['submitted', 'approved', 'disapproved']:
60 raise forms.ValidationError(_('The submitted status was invalid.'))
61 return status
62
63 def save(self, commit=True):
64 self.instance.status = self.cleaned_data['status']
65
66 super(ApprovalForm, self).save(commit)
67
68
69 class AppointmentForm(forms.Form):
70 appoint = AdvancedModelMultipleChoiceField(
71 Application.objects.none(),
72 widget=forms.CheckboxSelectMultiple(),
73 required=False,
74 )
75 overturn = forms.CharField(
76 required=False,
77 label=_('Overturn'),
78 help_text=_('Enter a comma separated list of users you want to '
79 'appoint to the position, even though did not apply for '
80 'the position.')
81 )
82
83 def __init__(self, position, *args, **kwargs):
84 super(AppointmentForm, self).__init__(*args, **kwargs)
85 self.position = position
86 self.fields['appoint'].queryset = position.applications.filter(
87 status__in=['submitted', 'approved', 'appointed', 'turned_down']
88 )
89 self.initial['appoint'] = position.applications.filter(
90 status='appointed'
91 )
92
93 def clean_overturn(self):
94 string = self.cleaned_data['overturn']
95 string = string.replace(' ', '')
96 if string == '':
97 return []
98 else:
99 users = string.split(',')
100 for u in users:
101 if not get_user_model().objects.filter(
102 username=u
103 ).exists():
104 raise forms.ValidationError(
105 _('No user with the username %(user)s exists.'),
106 params={'user': u},
107 )
108 elif self.position.applications.filter(
109 applicant__username=u
110 ).exclude(
111 status='draft'
112 ).exists():
113 raise forms.ValidationError(
114 _('User %(user)s already applied for this position '
115 'and can not be appointed through the overturn '
116 'field.'),
117 params={'user': u},
118 )
119 return users
120
121 def clean(self):
122 super(AppointmentForm, self).clean()
123 appoint = self.cleaned_data.get('appoint', [])
124 overturn = self.cleaned_data.get('overturn', [])
125 nr_appointment = len(appoint) + len(overturn)
126 if nr_appointment > self.position.appointments:
127 raise forms.ValidationError(
128 _('You cannot appoint %(current)s applicants. The maximum '
129 'for this position is %(max)s.'),
130 params={
131 'current': nr_appointment,
132 'max': self.position.appointments,
133 },
134 )
135 return self.cleaned_data
136
137 def save(self):
138 for application in self.fields['appoint'].queryset:
139 if application in self.cleaned_data['appoint']:
140 application.status = 'appointed'
141 else:
142 application.status = 'turned_down'
143 application.save()
144
145 for user in self.cleaned_data['overturn']:
146 user = get_user_model().objects.get(
147 username=user
148 )
149 appl, created = Application.objects.get_or_create(
150 position=self.position,
151 applicant=user,
152 defaults={'status': 'appointed'}
153 )
154 if not created:
155 appl.status = 'appointed'
156 appl.save()
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/involvement/forms.py b/src/involvement/forms.py
--- a/src/involvement/forms.py
+++ b/src/involvement/forms.py
@@ -3,6 +3,7 @@
from django.utils.translation import ugettext_lazy as _
from involvement.models import Application, Reference
+from members.forms import PersonNumberField
from utils.forms import AdvancedModelMultipleChoiceField
@@ -75,9 +76,9 @@
overturn = forms.CharField(
required=False,
label=_('Overturn'),
- help_text=_('Enter a comma separated list of users you want to '
- 'appoint to the position, even though did not apply for '
- 'the position.')
+ help_text=_('Enter a comma separated list of person numbers you want '
+ 'to appoint to the position, even though did not apply for'
+ ' the position.')
)
def __init__(self, position, *args, **kwargs):
@@ -96,26 +97,35 @@
if string == '':
return []
else:
- users = string.split(',')
- for u in users:
+ pnrs = string.split(',')
+ users = []
+ for pnr in pnrs:
+ date, number = PersonNumberField().to_python(pnr)
if not get_user_model().objects.filter(
- username=u
+ birthday=date,
+ person_number_ext=number,
).exists():
raise forms.ValidationError(
- _('No user with the username %(user)s exists.'),
- params={'user': u},
+ _('No user with the person number %(pnr)s exists.'),
+ params={'pnr': pnr},
)
elif self.position.applications.filter(
- applicant__username=u
+ applicant__birthday=date,
+ applicant__person_number_ext=number,
).exclude(
status='draft'
).exists():
raise forms.ValidationError(
- _('User %(user)s already applied for this position '
- 'and can not be appointed through the overturn '
- 'field.'),
- params={'user': u},
+ _('User with person number %(pnr)s already applied for'
+ ' this position and can not be appointed through the'
+ ' overturn field.'),
+ params={'pnr': pnr},
)
+ else:
+ users.append(get_user_model().objects.get(
+ birthday=date,
+ person_number_ext=number,
+ ))
return users
def clean(self):
@@ -143,9 +153,6 @@
application.save()
for user in self.cleaned_data['overturn']:
- user = get_user_model().objects.get(
- username=user
- )
appl, created = Application.objects.get_or_create(
position=self.position,
applicant=user,
| {"golden_diff": "diff --git a/src/involvement/forms.py b/src/involvement/forms.py\n--- a/src/involvement/forms.py\n+++ b/src/involvement/forms.py\n@@ -3,6 +3,7 @@\n from django.utils.translation import ugettext_lazy as _\n \n from involvement.models import Application, Reference\n+from members.forms import PersonNumberField\n from utils.forms import AdvancedModelMultipleChoiceField\n \n \n@@ -75,9 +76,9 @@\n overturn = forms.CharField(\n required=False,\n label=_('Overturn'),\n- help_text=_('Enter a comma separated list of users you want to '\n- 'appoint to the position, even though did not apply for '\n- 'the position.')\n+ help_text=_('Enter a comma separated list of person numbers you want '\n+ 'to appoint to the position, even though did not apply for'\n+ ' the position.')\n )\n \n def __init__(self, position, *args, **kwargs):\n@@ -96,26 +97,35 @@\n if string == '':\n return []\n else:\n- users = string.split(',')\n- for u in users:\n+ pnrs = string.split(',')\n+ users = []\n+ for pnr in pnrs:\n+ date, number = PersonNumberField().to_python(pnr)\n if not get_user_model().objects.filter(\n- username=u\n+ birthday=date,\n+ person_number_ext=number,\n ).exists():\n raise forms.ValidationError(\n- _('No user with the username %(user)s exists.'),\n- params={'user': u},\n+ _('No user with the person number %(pnr)s exists.'),\n+ params={'pnr': pnr},\n )\n elif self.position.applications.filter(\n- applicant__username=u\n+ applicant__birthday=date,\n+ applicant__person_number_ext=number,\n ).exclude(\n status='draft'\n ).exists():\n raise forms.ValidationError(\n- _('User %(user)s already applied for this position '\n- 'and can not be appointed through the overturn '\n- 'field.'),\n- params={'user': u},\n+ _('User with person number %(pnr)s already applied for'\n+ ' this position and can not be appointed through the'\n+ ' overturn field.'),\n+ params={'pnr': pnr},\n )\n+ else:\n+ users.append(get_user_model().objects.get(\n+ birthday=date,\n+ person_number_ext=number,\n+ ))\n return users\n \n def clean(self):\n@@ -143,9 +153,6 @@\n application.save()\n \n for user in self.cleaned_data['overturn']:\n- user = get_user_model().objects.get(\n- username=user\n- )\n appl, created = Application.objects.get_or_create(\n position=self.position,\n applicant=user,\n", "issue": "Use person number for appoint->overturn field\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\n### Prerequisites\r\n\r\n* [X] Put an X between the brackets on this line if you have done all of the\r\nfollowing:\r\n * Reproduced the problem with clear cache.\r\n * (If running the application locally:) Made sure your running the newest version on the development branch\r\n * Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues\r\n\r\n### Description\r\n\r\nCurrently the `overturn` field within the `appoint` view of `Position` has a overturn field that works with usernames. The username(s) entered will be appointed to the position, no matter if they applied or not.\r\n\r\n_Problem_: Within the old system a similar feature was available, but it was by person number. According to the management team this is preferable.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create new `Position` with past deadline\r\n2. Click `appoint` in the overview\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom involvement.models import Application, Reference\nfrom utils.forms import AdvancedModelMultipleChoiceField\n\n\nclass ApplicationForm(forms.ModelForm):\n class Meta:\n model = Application\n exclude = ['position', 'applicant']\n widgets = {\n 'cover_letter': forms.Textarea(attrs={'style': 'height: 200px',\n 'class': 'form-control'}),\n 'qualifications': forms.Textarea(attrs={'style': 'height: 200px',\n 'class': 'form-control'}),\n }\n\n def clean_status(self):\n status = self.cleaned_data['status']\n if status not in ['draft', 'submitted'] \\\n or (self.initial['status'] == 'submitted'\n and status == 'draft'):\n raise forms.ValidationError(_('The submitted status was invalid.'))\n return status\n\n\nReferenceFormSet = forms.inlineformset_factory(\n Application,\n Reference,\n fields=('name', 'position', 'email', 'phone_number', 'comment'),\n widgets={\n 'name': forms.TextInput(attrs={'class': 'form-control'}),\n 'position': forms.TextInput(attrs={'class': 'form-control'}),\n 'email': forms.TextInput(attrs={'class': 'form-control'}),\n 'phone_number': forms.TextInput(attrs={'class': 'form-control'}),\n 'comment': forms.TextInput(attrs={'class': 'form-control'}),\n },\n extra=0,\n)\n\n\nclass ApprovalForm(forms.ModelForm):\n status = forms.ChoiceField(\n choices=(\n ('submitted', '---------'),\n ('approved', _('Approved')),\n ('disapproved', _('Disapproved')),\n ),\n )\n\n class Meta:\n model = Application\n fields = []\n\n def clean_status(self):\n status = self.cleaned_data['status']\n if status not in ['submitted', 'approved', 'disapproved']:\n raise forms.ValidationError(_('The submitted status was invalid.'))\n return status\n\n def save(self, commit=True):\n self.instance.status = self.cleaned_data['status']\n\n super(ApprovalForm, self).save(commit)\n\n\nclass AppointmentForm(forms.Form):\n appoint = AdvancedModelMultipleChoiceField(\n Application.objects.none(),\n widget=forms.CheckboxSelectMultiple(),\n required=False,\n )\n overturn = forms.CharField(\n required=False,\n label=_('Overturn'),\n help_text=_('Enter a comma separated list of users you want to '\n 'appoint to the position, even though did not apply for '\n 'the position.')\n )\n\n def __init__(self, position, *args, **kwargs):\n super(AppointmentForm, self).__init__(*args, **kwargs)\n self.position = position\n self.fields['appoint'].queryset = position.applications.filter(\n status__in=['submitted', 'approved', 'appointed', 'turned_down']\n )\n self.initial['appoint'] = position.applications.filter(\n status='appointed'\n )\n\n def clean_overturn(self):\n string = self.cleaned_data['overturn']\n string = string.replace(' ', '')\n if string == '':\n return []\n else:\n users = string.split(',')\n for u in users:\n if not get_user_model().objects.filter(\n username=u\n ).exists():\n raise forms.ValidationError(\n _('No user with the username %(user)s exists.'),\n params={'user': u},\n )\n elif self.position.applications.filter(\n applicant__username=u\n ).exclude(\n status='draft'\n ).exists():\n raise forms.ValidationError(\n _('User %(user)s already applied for this position '\n 'and can not be appointed through the overturn '\n 'field.'),\n params={'user': u},\n )\n return users\n\n def clean(self):\n super(AppointmentForm, self).clean()\n appoint = self.cleaned_data.get('appoint', [])\n overturn = self.cleaned_data.get('overturn', [])\n nr_appointment = len(appoint) + len(overturn)\n if nr_appointment > self.position.appointments:\n raise forms.ValidationError(\n _('You cannot appoint %(current)s applicants. The maximum '\n 'for this position is %(max)s.'),\n params={\n 'current': nr_appointment,\n 'max': self.position.appointments,\n },\n )\n return self.cleaned_data\n\n def save(self):\n for application in self.fields['appoint'].queryset:\n if application in self.cleaned_data['appoint']:\n application.status = 'appointed'\n else:\n application.status = 'turned_down'\n application.save()\n\n for user in self.cleaned_data['overturn']:\n user = get_user_model().objects.get(\n username=user\n )\n appl, created = Application.objects.get_or_create(\n position=self.position,\n applicant=user,\n defaults={'status': 'appointed'}\n )\n if not created:\n appl.status = 'appointed'\n appl.save()\n", "path": "src/involvement/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom involvement.models import Application, Reference\nfrom members.forms import PersonNumberField\nfrom utils.forms import AdvancedModelMultipleChoiceField\n\n\nclass ApplicationForm(forms.ModelForm):\n class Meta:\n model = Application\n exclude = ['position', 'applicant']\n widgets = {\n 'cover_letter': forms.Textarea(attrs={'style': 'height: 200px',\n 'class': 'form-control'}),\n 'qualifications': forms.Textarea(attrs={'style': 'height: 200px',\n 'class': 'form-control'}),\n }\n\n def clean_status(self):\n status = self.cleaned_data['status']\n if status not in ['draft', 'submitted'] \\\n or (self.initial['status'] == 'submitted'\n and status == 'draft'):\n raise forms.ValidationError(_('The submitted status was invalid.'))\n return status\n\n\nReferenceFormSet = forms.inlineformset_factory(\n Application,\n Reference,\n fields=('name', 'position', 'email', 'phone_number', 'comment'),\n widgets={\n 'name': forms.TextInput(attrs={'class': 'form-control'}),\n 'position': forms.TextInput(attrs={'class': 'form-control'}),\n 'email': forms.TextInput(attrs={'class': 'form-control'}),\n 'phone_number': forms.TextInput(attrs={'class': 'form-control'}),\n 'comment': forms.TextInput(attrs={'class': 'form-control'}),\n },\n extra=0,\n)\n\n\nclass ApprovalForm(forms.ModelForm):\n status = forms.ChoiceField(\n choices=(\n ('submitted', '---------'),\n ('approved', _('Approved')),\n ('disapproved', _('Disapproved')),\n ),\n )\n\n class Meta:\n model = Application\n fields = []\n\n def clean_status(self):\n status = self.cleaned_data['status']\n if status not in ['submitted', 'approved', 'disapproved']:\n raise forms.ValidationError(_('The submitted status was invalid.'))\n return status\n\n def save(self, commit=True):\n self.instance.status = self.cleaned_data['status']\n\n super(ApprovalForm, self).save(commit)\n\n\nclass AppointmentForm(forms.Form):\n appoint = AdvancedModelMultipleChoiceField(\n Application.objects.none(),\n widget=forms.CheckboxSelectMultiple(),\n required=False,\n )\n overturn = forms.CharField(\n required=False,\n label=_('Overturn'),\n help_text=_('Enter a comma separated list of person numbers you want '\n 'to appoint to the position, even though did not apply for'\n ' the position.')\n )\n\n def __init__(self, position, *args, **kwargs):\n super(AppointmentForm, self).__init__(*args, **kwargs)\n self.position = position\n self.fields['appoint'].queryset = position.applications.filter(\n status__in=['submitted', 'approved', 'appointed', 'turned_down']\n )\n self.initial['appoint'] = position.applications.filter(\n status='appointed'\n )\n\n def clean_overturn(self):\n string = self.cleaned_data['overturn']\n string = string.replace(' ', '')\n if string == '':\n return []\n else:\n pnrs = string.split(',')\n users = []\n for pnr in pnrs:\n date, number = PersonNumberField().to_python(pnr)\n if not get_user_model().objects.filter(\n birthday=date,\n person_number_ext=number,\n ).exists():\n raise forms.ValidationError(\n _('No user with the person number %(pnr)s exists.'),\n params={'pnr': pnr},\n )\n elif self.position.applications.filter(\n applicant__birthday=date,\n applicant__person_number_ext=number,\n ).exclude(\n status='draft'\n ).exists():\n raise forms.ValidationError(\n _('User with person number %(pnr)s already applied for'\n ' this position and can not be appointed through the'\n ' overturn field.'),\n params={'pnr': pnr},\n )\n else:\n users.append(get_user_model().objects.get(\n birthday=date,\n person_number_ext=number,\n ))\n return users\n\n def clean(self):\n super(AppointmentForm, self).clean()\n appoint = self.cleaned_data.get('appoint', [])\n overturn = self.cleaned_data.get('overturn', [])\n nr_appointment = len(appoint) + len(overturn)\n if nr_appointment > self.position.appointments:\n raise forms.ValidationError(\n _('You cannot appoint %(current)s applicants. The maximum '\n 'for this position is %(max)s.'),\n params={\n 'current': nr_appointment,\n 'max': self.position.appointments,\n },\n )\n return self.cleaned_data\n\n def save(self):\n for application in self.fields['appoint'].queryset:\n if application in self.cleaned_data['appoint']:\n application.status = 'appointed'\n else:\n application.status = 'turned_down'\n application.save()\n\n for user in self.cleaned_data['overturn']:\n appl, created = Application.objects.get_or_create(\n position=self.position,\n applicant=user,\n defaults={'status': 'appointed'}\n )\n if not created:\n appl.status = 'appointed'\n appl.save()\n", "path": "src/involvement/forms.py"}]} | 1,909 | 610 |
gh_patches_debug_16671 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1116 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]: FrontEnd routing issues
### Description
The header in the application routes the application incorrectly and needs to be fixed. If it routes the anchor tag incorrectly, we'll be having a nested routes tree of /page1/page2/...
### What should have happened?
Its end output must be like this
```html
<a class="dark-white-text menu-text" href="/howitworks">How CiviWiki Works</a>
```
instead of
```html
<a class="dark-white-text menu-text" href="howitworks">How CiviWiki Works</a>
```
### What browser(s) are you seeing the problem on?
All
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/frontend_views/urls.py`
Content:
```
1 from django.urls import path
2 from frontend_views import views
3
4 urlpatterns = [
5 path("about/", views.about_view, name="about"),
6 path("support_us/", views.support_us_view, name="support us"),
7 path("howitworks/", views.how_it_works_view, name="how it works"),
8 path("profile/<str:username>/", views.user_profile, name="profile"),
9 path("thread/<int:thread_id>/", views.issue_thread, name="issue thread"),
10 path("profile/", views.user_profile, name="default_profile"),
11 path("", views.base_view, name="base"),
12 path("thread/<int:thread_id>/csv/", views.civi2csv, name="civi2csv"),
13 ]
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/project/frontend_views/urls.py b/project/frontend_views/urls.py
--- a/project/frontend_views/urls.py
+++ b/project/frontend_views/urls.py
@@ -3,10 +3,10 @@
urlpatterns = [
path("about/", views.about_view, name="about"),
- path("support_us/", views.support_us_view, name="support us"),
- path("howitworks/", views.how_it_works_view, name="how it works"),
+ path("support_us/", views.support_us_view, name="support_us"),
+ path("howitworks/", views.how_it_works_view, name="how_it_works"),
path("profile/<str:username>/", views.user_profile, name="profile"),
- path("thread/<int:thread_id>/", views.issue_thread, name="issue thread"),
+ path("thread/<int:thread_id>/", views.issue_thread, name="issue_thread"),
path("profile/", views.user_profile, name="default_profile"),
path("", views.base_view, name="base"),
path("thread/<int:thread_id>/csv/", views.civi2csv, name="civi2csv"),
| {"golden_diff": "diff --git a/project/frontend_views/urls.py b/project/frontend_views/urls.py\n--- a/project/frontend_views/urls.py\n+++ b/project/frontend_views/urls.py\n@@ -3,10 +3,10 @@\n \n urlpatterns = [\n path(\"about/\", views.about_view, name=\"about\"),\n- path(\"support_us/\", views.support_us_view, name=\"support us\"),\n- path(\"howitworks/\", views.how_it_works_view, name=\"how it works\"),\n+ path(\"support_us/\", views.support_us_view, name=\"support_us\"),\n+ path(\"howitworks/\", views.how_it_works_view, name=\"how_it_works\"),\n path(\"profile/<str:username>/\", views.user_profile, name=\"profile\"),\n- path(\"thread/<int:thread_id>/\", views.issue_thread, name=\"issue thread\"),\n+ path(\"thread/<int:thread_id>/\", views.issue_thread, name=\"issue_thread\"),\n path(\"profile/\", views.user_profile, name=\"default_profile\"),\n path(\"\", views.base_view, name=\"base\"),\n path(\"thread/<int:thread_id>/csv/\", views.civi2csv, name=\"civi2csv\"),\n", "issue": "[BUG]: FrontEnd routing issues\n### Description\r\n\r\nThe header in the application routes the application incorrectly and needs to be fixed. If it routes the anchor tag incorrectly, we'll be having a nested routes tree of /page1/page2/...\r\n\r\n\r\n\r\n### What should have happened?\r\nIts end output must be like this\r\n```html\r\n<a class=\"dark-white-text menu-text\" href=\"/howitworks\">How CiviWiki Works</a>\r\n```\r\ninstead of \r\n```html\r\n<a class=\"dark-white-text menu-text\" href=\"howitworks\">How CiviWiki Works</a>\r\n```\r\n\r\n### What browser(s) are you seeing the problem on?\r\nAll\r\n\n", "before_files": [{"content": "from django.urls import path\nfrom frontend_views import views\n\nurlpatterns = [\n path(\"about/\", views.about_view, name=\"about\"),\n path(\"support_us/\", views.support_us_view, name=\"support us\"),\n path(\"howitworks/\", views.how_it_works_view, name=\"how it works\"),\n path(\"profile/<str:username>/\", views.user_profile, name=\"profile\"),\n path(\"thread/<int:thread_id>/\", views.issue_thread, name=\"issue thread\"),\n path(\"profile/\", views.user_profile, name=\"default_profile\"),\n path(\"\", views.base_view, name=\"base\"),\n path(\"thread/<int:thread_id>/csv/\", views.civi2csv, name=\"civi2csv\"),\n]\n", "path": "project/frontend_views/urls.py"}], "after_files": [{"content": "from django.urls import path\nfrom frontend_views import views\n\nurlpatterns = [\n path(\"about/\", views.about_view, name=\"about\"),\n path(\"support_us/\", views.support_us_view, name=\"support_us\"),\n path(\"howitworks/\", views.how_it_works_view, name=\"how_it_works\"),\n path(\"profile/<str:username>/\", views.user_profile, name=\"profile\"),\n path(\"thread/<int:thread_id>/\", views.issue_thread, name=\"issue_thread\"),\n path(\"profile/\", views.user_profile, name=\"default_profile\"),\n path(\"\", views.base_view, name=\"base\"),\n path(\"thread/<int:thread_id>/csv/\", views.civi2csv, name=\"civi2csv\"),\n]\n", "path": "project/frontend_views/urls.py"}]} | 574 | 254 |
gh_patches_debug_23139 | rasdani/github-patches | git_diff | Pyomo__pyomo-2740 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate the old 'pyomo' command syntax
In earlier releases, we supported commands like
` pyomo --solver=glpk foo.py bar.dat`
And we moved to a nested pyomo command, which uses the following syntax:
` pyomo solve --solver=glpk foo.py bar.dat`
However, to enable graceful transition for users, we added a hack to transition to the old syntax if it looks like the user is not specifying a valid subcommand. However, this leads to confusing error messages, and I think it's time to deprecate that hack and start throwing warnings that this hack will be removed in an upcoming release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/scripting/pyomo_main.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright (c) 2008-2022
5 # National Technology and Engineering Solutions of Sandia, LLC
6 # Under the terms of Contract DE-NA0003525 with National Technology and
7 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
8 # rights in this software.
9 # This software is distributed under the 3-clause BSD License.
10 # ___________________________________________________________________________
11
12 import sys
13 import copy
14
15 try:
16 import pkg_resources
17
18 pyomo_commands = pkg_resources.iter_entry_points('pyomo.command')
19 except:
20 pyomo_commands = []
21 #
22 # Load modules associated with Plugins that are defined in
23 # EGG files.
24 #
25 for entrypoint in pyomo_commands:
26 try:
27 plugin_class = entrypoint.load()
28 except Exception:
29 exctype, err, tb = sys.exc_info() # BUG?
30 import traceback
31
32 msg = (
33 "Error loading pyomo.command entry point %s:\nOriginal %s: %s\n"
34 "Traceback:\n%s"
35 % (entrypoint, exctype.__name__, err, ''.join(traceback.format_tb(tb)))
36 )
37 # clear local variables to remove circular references
38 exctype = err = tb = None
39 # TODO: Should this just log an error and re-raise the original
40 # exception?
41 raise ImportError(msg)
42
43
44 def main(args=None):
45 #
46 # Load subcommands
47 #
48 from pyomo.scripting import pyomo_parser
49 import pyomo.environ
50
51 #
52 # Parse the arguments
53 #
54 parser = pyomo_parser.get_parser()
55 if args is None:
56 args = copy.copy(sys.argv[1:])
57 #
58 # This is a hack to convert a command-line to a 'solve' subcommand
59 #
60 if not args:
61 args.append('-h')
62 # FIXME: This should use the logger and not print()
63 if args[0][0] == '-':
64 if args[0] not in ['-h', '--help', '--version']:
65 print("WARNING: converting to the 'pyomo solve' subcommand")
66 args = ['solve'] + args[0:]
67 elif args[0] not in pyomo_parser.subparsers:
68 print("WARNING: converting to the 'pyomo solve' subcommand")
69 args = ['solve'] + args[0:]
70 #
71 # Process arguments
72 #
73 _options, _unparsed = parser.parse_known_args(args)
74 #
75 # Process the results
76 #
77 if _options.func.__code__.co_argcount == 1:
78 #
79 # If the execution function only accepts one argument, then we
80 # create an exception if there are unparsed arguments.
81 #
82 if len(_unparsed) > 0:
83 #
84 # Re-parse the command-line to create an exception
85 #
86 parser.parse_args(_unparsed)
87 retval = _options.func(_options)
88 else:
89 retval = _options.func(_options, _unparsed)
90 return retval
91
92
93 def main_console_script():
94 "This is the entry point for the main Pyomo script"
95 # Note that we eat the retval data structure and only return the
96 # process return code
97 ans = main()
98 try:
99 return ans.errorcode
100 except AttributeError:
101 return ans
102
103
104 if __name__ == '__main__':
105 sys.exit(main_console_script())
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyomo/scripting/pyomo_main.py b/pyomo/scripting/pyomo_main.py
--- a/pyomo/scripting/pyomo_main.py
+++ b/pyomo/scripting/pyomo_main.py
@@ -11,6 +11,7 @@
import sys
import copy
+from pyomo.common.deprecation import deprecation_warning
try:
import pkg_resources
@@ -59,13 +60,16 @@
#
if not args:
args.append('-h')
- # FIXME: This should use the logger and not print()
if args[0][0] == '-':
if args[0] not in ['-h', '--help', '--version']:
- print("WARNING: converting to the 'pyomo solve' subcommand")
+ deprecation_warning("Running the 'pyomo' script with no subcommand is deprecated. "
+ "Defaulting to 'pyomo solve'",
+ version='TBD')
args = ['solve'] + args[0:]
elif args[0] not in pyomo_parser.subparsers:
- print("WARNING: converting to the 'pyomo solve' subcommand")
+ deprecation_warning("Running the 'pyomo' script with no subcommand is deprecated. "
+ "Defaulting to 'pyomo solve'",
+ version='TBD')
args = ['solve'] + args[0:]
#
# Process arguments
| {"golden_diff": "diff --git a/pyomo/scripting/pyomo_main.py b/pyomo/scripting/pyomo_main.py\n--- a/pyomo/scripting/pyomo_main.py\n+++ b/pyomo/scripting/pyomo_main.py\n@@ -11,6 +11,7 @@\n \n import sys\n import copy\n+from pyomo.common.deprecation import deprecation_warning\n \n try:\n import pkg_resources\n@@ -59,13 +60,16 @@\n #\n if not args:\n args.append('-h')\n- # FIXME: This should use the logger and not print()\n if args[0][0] == '-':\n if args[0] not in ['-h', '--help', '--version']:\n- print(\"WARNING: converting to the 'pyomo solve' subcommand\")\n+ deprecation_warning(\"Running the 'pyomo' script with no subcommand is deprecated. \"\n+ \"Defaulting to 'pyomo solve'\",\n+ version='TBD')\n args = ['solve'] + args[0:]\n elif args[0] not in pyomo_parser.subparsers:\n- print(\"WARNING: converting to the 'pyomo solve' subcommand\")\n+ deprecation_warning(\"Running the 'pyomo' script with no subcommand is deprecated. \"\n+ \"Defaulting to 'pyomo solve'\",\n+ version='TBD')\n args = ['solve'] + args[0:]\n #\n # Process arguments\n", "issue": "Deprecate the old 'pyomo' command syntax\nIn earlier releases, we supported commands like\r\n` pyomo --solver=glpk foo.py bar.dat`\r\nAnd we moved to a nested pyomo command, which uses the following syntax:\r\n` pyomo solve --solver=glpk foo.py bar.dat`\r\n\r\nHowever, to enable graceful transition for users, we added a hack to transition to the old syntax if it looks like the user is not specifying a valid subcommand. However, this leads to confusing error messages, and I think it's time to deprecate that hack and start throwing warnings that this hack will be removed in an upcoming release.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport sys\nimport copy\n\ntry:\n import pkg_resources\n\n pyomo_commands = pkg_resources.iter_entry_points('pyomo.command')\nexcept:\n pyomo_commands = []\n#\n# Load modules associated with Plugins that are defined in\n# EGG files.\n#\nfor entrypoint in pyomo_commands:\n try:\n plugin_class = entrypoint.load()\n except Exception:\n exctype, err, tb = sys.exc_info() # BUG?\n import traceback\n\n msg = (\n \"Error loading pyomo.command entry point %s:\\nOriginal %s: %s\\n\"\n \"Traceback:\\n%s\"\n % (entrypoint, exctype.__name__, err, ''.join(traceback.format_tb(tb)))\n )\n # clear local variables to remove circular references\n exctype = err = tb = None\n # TODO: Should this just log an error and re-raise the original\n # exception?\n raise ImportError(msg)\n\n\ndef main(args=None):\n #\n # Load subcommands\n #\n from pyomo.scripting import pyomo_parser\n import pyomo.environ\n\n #\n # Parse the arguments\n #\n parser = pyomo_parser.get_parser()\n if args is None:\n args = copy.copy(sys.argv[1:])\n #\n # This is a hack to convert a command-line to a 'solve' subcommand\n #\n if not args:\n args.append('-h')\n # FIXME: This should use the logger and not print()\n if args[0][0] == '-':\n if args[0] not in ['-h', '--help', '--version']:\n print(\"WARNING: converting to the 'pyomo solve' subcommand\")\n args = ['solve'] + args[0:]\n elif args[0] not in pyomo_parser.subparsers:\n print(\"WARNING: converting to the 'pyomo solve' subcommand\")\n args = ['solve'] + args[0:]\n #\n # Process arguments\n #\n _options, _unparsed = parser.parse_known_args(args)\n #\n # Process the results\n #\n if _options.func.__code__.co_argcount == 1:\n #\n # If the execution function only accepts one argument, then we\n # create an exception if there are unparsed arguments.\n #\n if len(_unparsed) > 0:\n #\n # Re-parse the command-line to create an exception\n #\n parser.parse_args(_unparsed)\n retval = _options.func(_options)\n else:\n retval = _options.func(_options, _unparsed)\n return retval\n\n\ndef main_console_script():\n \"This is the entry point for the main Pyomo script\"\n # Note that we eat the retval data structure and only return the\n # process return code\n ans = main()\n try:\n return ans.errorcode\n except AttributeError:\n return ans\n\n\nif __name__ == '__main__':\n sys.exit(main_console_script())\n", "path": "pyomo/scripting/pyomo_main.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport sys\nimport copy\nfrom pyomo.common.deprecation import deprecation_warning\n\ntry:\n import pkg_resources\n\n pyomo_commands = pkg_resources.iter_entry_points('pyomo.command')\nexcept:\n pyomo_commands = []\n#\n# Load modules associated with Plugins that are defined in\n# EGG files.\n#\nfor entrypoint in pyomo_commands:\n try:\n plugin_class = entrypoint.load()\n except Exception:\n exctype, err, tb = sys.exc_info() # BUG?\n import traceback\n\n msg = (\n \"Error loading pyomo.command entry point %s:\\nOriginal %s: %s\\n\"\n \"Traceback:\\n%s\"\n % (entrypoint, exctype.__name__, err, ''.join(traceback.format_tb(tb)))\n )\n # clear local variables to remove circular references\n exctype = err = tb = None\n # TODO: Should this just log an error and re-raise the original\n # exception?\n raise ImportError(msg)\n\n\ndef main(args=None):\n #\n # Load subcommands\n #\n from pyomo.scripting import pyomo_parser\n import pyomo.environ\n\n #\n # Parse the arguments\n #\n parser = pyomo_parser.get_parser()\n if args is None:\n args = copy.copy(sys.argv[1:])\n #\n # This is a hack to convert a command-line to a 'solve' subcommand\n #\n if not args:\n args.append('-h')\n if args[0][0] == '-':\n if args[0] not in ['-h', '--help', '--version']:\n deprecation_warning(\"Running the 'pyomo' script with no subcommand is deprecated. \"\n \"Defaulting to 'pyomo solve'\",\n version='TBD')\n args = ['solve'] + args[0:]\n elif args[0] not in pyomo_parser.subparsers:\n deprecation_warning(\"Running the 'pyomo' script with no subcommand is deprecated. \"\n \"Defaulting to 'pyomo solve'\",\n version='TBD')\n args = ['solve'] + args[0:]\n #\n # Process arguments\n #\n _options, _unparsed = parser.parse_known_args(args)\n #\n # Process the results\n #\n if _options.func.__code__.co_argcount == 1:\n #\n # If the execution function only accepts one argument, then we\n # create an exception if there are unparsed arguments.\n #\n if len(_unparsed) > 0:\n #\n # Re-parse the command-line to create an exception\n #\n parser.parse_args(_unparsed)\n retval = _options.func(_options)\n else:\n retval = _options.func(_options, _unparsed)\n return retval\n\n\ndef main_console_script():\n \"This is the entry point for the main Pyomo script\"\n # Note that we eat the retval data structure and only return the\n # process return code\n ans = main()\n try:\n return ans.errorcode\n except AttributeError:\n return ans\n\n\nif __name__ == '__main__':\n sys.exit(main_console_script())\n", "path": "pyomo/scripting/pyomo_main.py"}]} | 1,363 | 306 |
gh_patches_debug_13095 | rasdani/github-patches | git_diff | huggingface__accelerate-1960 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
patch_environment does not revert previously existing keys
### System Info
```Shell
does not matter here
```
### Information
- [ ] The official example scripts
- [ ] My own modified scripts
### Tasks
- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)
- [ ] My own task or dataset (give details below)
### Reproduction
```python
os.environ["FOO"] = "BAR"
with patch_environment(FOO="BAZ"):
assert os.environ["FOO"] == "BAZ"
assert os.environ["F00"] == "BAR" # KeyError
```
### Expected behavior
After leaving the context, the initial environment variables should be restored.
I noticed this while going through the code. If this is an issue, LMK and I'll provide a fix + tests. If, in the context that this function is used, it's not a problem, feel free to close the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/accelerate/utils/other.py`
Content:
```
1 # Copyright 2022 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import socket
17 from contextlib import contextmanager
18 from types import MethodType
19
20 import torch
21
22 from ..commands.config.default import write_basic_config # noqa: F401
23 from ..state import PartialState
24 from .constants import FSDP_PYTORCH_VERSION
25 from .dataclasses import DistributedType
26 from .imports import is_deepspeed_available, is_tpu_available
27 from .transformer_engine import convert_model
28 from .versions import is_torch_version
29
30
31 if is_deepspeed_available():
32 from deepspeed import DeepSpeedEngine
33
34 if is_tpu_available(check_device=False):
35 import torch_xla.core.xla_model as xm
36
37
38 def is_compiled_module(module):
39 """
40 Check whether the module was compiled with torch.compile()
41 """
42 if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"):
43 return False
44 return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
45
46
47 def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):
48 """
49 Extract a model from its distributed containers.
50
51 Args:
52 model (`torch.nn.Module`):
53 The model to extract.
54 keep_fp32_wrapper (`bool`, *optional*):
55 Whether to remove mixed precision hooks from the model.
56
57 Returns:
58 `torch.nn.Module`: The extracted model.
59 """
60 options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
61
62 is_compiled = is_compiled_module(model)
63 if is_compiled:
64 compiled_model = model
65 model = model._orig_mod
66
67 if is_deepspeed_available():
68 options += (DeepSpeedEngine,)
69
70 if is_torch_version(">=", FSDP_PYTORCH_VERSION):
71 from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
72
73 options += (FSDP,)
74
75 while isinstance(model, options):
76 model = model.module
77
78 if not keep_fp32_wrapper:
79 forward = getattr(model, "forward")
80 original_forward = model.__dict__.pop("_original_forward", None)
81 if original_forward is not None:
82 while hasattr(forward, "__wrapped__"):
83 forward = forward.__wrapped__
84 if forward == original_forward:
85 break
86 model.forward = MethodType(forward, model)
87 if getattr(model, "_converted_to_transformer_engine", False):
88 convert_model(model, to_transformer_engine=False)
89
90 if is_compiled:
91 compiled_model._orig_mod = model
92 model = compiled_model
93
94 return model
95
96
97 def wait_for_everyone():
98 """
99 Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
100
101 <Tip warning={true}>
102
103 Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
104
105 </Tip>
106 """
107 PartialState().wait_for_everyone()
108
109
110 def save(obj, f):
111 """
112 Save the data to disk. Use in place of `torch.save()`.
113
114 Args:
115 obj: The data to save
116 f: The file (or file-like object) to use to save the data
117 """
118 if PartialState().distributed_type == DistributedType.TPU:
119 xm.save(obj, f)
120 elif PartialState().local_process_index == 0:
121 torch.save(obj, f)
122
123
124 @contextmanager
125 def clear_environment():
126 """
127 A context manager that will cache origin `os.environ` and replace it with a empty dictionary in this context.
128
129 When this context exits, the cached `os.environ` will be back.
130
131 Example:
132
133 ```python
134 >>> import os
135 >>> from accelerate.utils import clear_environment
136
137 >>> os.environ["FOO"] = "bar"
138 >>> with clear_environment():
139 ... print(os.environ)
140 ... os.environ["FOO"] = "new_bar"
141 ... print(os.environ["FOO"])
142 {}
143 new_bar
144
145 >>> print(os.environ["FOO"])
146 bar
147 ```
148 """
149 _old_os_environ = os.environ
150 os.environ = dict()
151
152 yield
153
154 os.environ = _old_os_environ
155
156
157 @contextmanager
158 def patch_environment(**kwargs):
159 """
160 A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.
161
162 Will convert the values in `kwargs` to strings and upper-case all the keys.
163
164 Example:
165
166 ```python
167 >>> import os
168 >>> from accelerate.utils import patch_environment
169
170 >>> with patch_environment(FOO="bar"):
171 ... print(os.environ["FOO"]) # prints "bar"
172 >>> print(os.environ["FOO"]) # raises KeyError
173 ```
174 """
175 for key, value in kwargs.items():
176 os.environ[key.upper()] = str(value)
177
178 yield
179
180 for key in kwargs:
181 if key.upper() in os.environ:
182 del os.environ[key.upper()]
183
184
185 def get_pretty_name(obj):
186 """
187 Gets a pretty name from `obj`.
188 """
189 if not hasattr(obj, "__qualname__") and not hasattr(obj, "__name__"):
190 obj = getattr(obj, "__class__", obj)
191 if hasattr(obj, "__qualname__"):
192 return obj.__qualname__
193 if hasattr(obj, "__name__"):
194 return obj.__name__
195 return str(obj)
196
197
198 def merge_dicts(source, destination):
199 """
200 Recursively merges two dictionaries.
201
202 Args:
203 source (`dict`): The dictionary to merge into `destination`.
204 destination (`dict`): The dictionary to merge `source` into.
205 """
206 for key, value in source.items():
207 if isinstance(value, dict):
208 node = destination.setdefault(key, {})
209 merge_dicts(value, node)
210 else:
211 destination[key] = value
212
213 return destination
214
215
216 def is_port_in_use(port: int = None) -> bool:
217 """
218 Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been
219 run and need to see if the port is already in use.
220 """
221 if port is None:
222 port = 29500
223 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
224 return s.connect_ex(("localhost", port)) == 0
225
226
227 def convert_bytes(size):
228 "Converts `size` from bytes to the largest possible unit"
229 for x in ["bytes", "KB", "MB", "GB", "TB"]:
230 if size < 1024.0:
231 return f"{round(size, 2)} {x}"
232 size /= 1024.0
233
234 return f"{round(size, 2)} PB"
235
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py
--- a/src/accelerate/utils/other.py
+++ b/src/accelerate/utils/other.py
@@ -172,14 +172,22 @@
>>> print(os.environ["FOO"]) # raises KeyError
```
"""
+ existing_vars = {}
for key, value in kwargs.items():
- os.environ[key.upper()] = str(value)
+ key = key.upper()
+ if key in os.environ:
+ existing_vars[key] = os.environ[key]
+ os.environ[key] = str(value)
yield
for key in kwargs:
- if key.upper() in os.environ:
- del os.environ[key.upper()]
+ key = key.upper()
+ if key in existing_vars:
+ # restore previous value
+ os.environ[key] = existing_vars[key]
+ else:
+ os.environ.pop(key, None)
def get_pretty_name(obj):
| {"golden_diff": "diff --git a/src/accelerate/utils/other.py b/src/accelerate/utils/other.py\n--- a/src/accelerate/utils/other.py\n+++ b/src/accelerate/utils/other.py\n@@ -172,14 +172,22 @@\n >>> print(os.environ[\"FOO\"]) # raises KeyError\n ```\n \"\"\"\n+ existing_vars = {}\n for key, value in kwargs.items():\n- os.environ[key.upper()] = str(value)\n+ key = key.upper()\n+ if key in os.environ:\n+ existing_vars[key] = os.environ[key]\n+ os.environ[key] = str(value)\n \n yield\n \n for key in kwargs:\n- if key.upper() in os.environ:\n- del os.environ[key.upper()]\n+ key = key.upper()\n+ if key in existing_vars:\n+ # restore previous value\n+ os.environ[key] = existing_vars[key]\n+ else:\n+ os.environ.pop(key, None)\n \n \n def get_pretty_name(obj):\n", "issue": "patch_environment does not revert previously existing keys\n### System Info\n\n```Shell\ndoes not matter here\n```\n\n\n### Information\n\n- [ ] The official example scripts\n- [ ] My own modified scripts\n\n### Tasks\n\n- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\n- [ ] My own task or dataset (give details below)\n\n### Reproduction\n\n```python\r\nos.environ[\"FOO\"] = \"BAR\"\r\nwith patch_environment(FOO=\"BAZ\"):\r\n assert os.environ[\"FOO\"] == \"BAZ\"\r\nassert os.environ[\"F00\"] == \"BAR\" # KeyError\r\n```\n\n### Expected behavior\n\nAfter leaving the context, the initial environment variables should be restored.\r\n\r\nI noticed this while going through the code. If this is an issue, LMK and I'll provide a fix + tests. If, in the context that this function is used, it's not a problem, feel free to close the issue.\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport socket\nfrom contextlib import contextmanager\nfrom types import MethodType\n\nimport torch\n\nfrom ..commands.config.default import write_basic_config # noqa: F401\nfrom ..state import PartialState\nfrom .constants import FSDP_PYTORCH_VERSION\nfrom .dataclasses import DistributedType\nfrom .imports import is_deepspeed_available, is_tpu_available\nfrom .transformer_engine import convert_model\nfrom .versions import is_torch_version\n\n\nif is_deepspeed_available():\n from deepspeed import DeepSpeedEngine\n\nif is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n\n\ndef is_compiled_module(module):\n \"\"\"\n Check whether the module was compiled with torch.compile()\n \"\"\"\n if is_torch_version(\"<\", \"2.0.0\") or not hasattr(torch, \"_dynamo\"):\n return False\n return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)\n\n\ndef extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):\n \"\"\"\n Extract a model from its distributed containers.\n\n Args:\n model (`torch.nn.Module`):\n The model to extract.\n keep_fp32_wrapper (`bool`, *optional*):\n Whether to remove mixed precision hooks from the model.\n\n Returns:\n `torch.nn.Module`: The extracted model.\n \"\"\"\n options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n\n is_compiled = is_compiled_module(model)\n if is_compiled:\n compiled_model = model\n model = model._orig_mod\n\n if is_deepspeed_available():\n options += (DeepSpeedEngine,)\n\n if is_torch_version(\">=\", FSDP_PYTORCH_VERSION):\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n\n options += (FSDP,)\n\n while isinstance(model, options):\n model = model.module\n\n if not keep_fp32_wrapper:\n forward = getattr(model, \"forward\")\n original_forward = model.__dict__.pop(\"_original_forward\", None)\n if original_forward is not None:\n while hasattr(forward, \"__wrapped__\"):\n forward = forward.__wrapped__\n if forward == original_forward:\n break\n model.forward = MethodType(forward, model)\n if getattr(model, \"_converted_to_transformer_engine\", False):\n convert_model(model, to_transformer_engine=False)\n\n if is_compiled:\n compiled_model._orig_mod = model\n model = compiled_model\n\n return model\n\n\ndef wait_for_everyone():\n \"\"\"\n Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n\n <Tip warning={true}>\n\n Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n\n </Tip>\n \"\"\"\n PartialState().wait_for_everyone()\n\n\ndef save(obj, f):\n \"\"\"\n Save the data to disk. Use in place of `torch.save()`.\n\n Args:\n obj: The data to save\n f: The file (or file-like object) to use to save the data\n \"\"\"\n if PartialState().distributed_type == DistributedType.TPU:\n xm.save(obj, f)\n elif PartialState().local_process_index == 0:\n torch.save(obj, f)\n\n\n@contextmanager\ndef clear_environment():\n \"\"\"\n A context manager that will cache origin `os.environ` and replace it with a empty dictionary in this context.\n\n When this context exits, the cached `os.environ` will be back.\n\n Example:\n\n ```python\n >>> import os\n >>> from accelerate.utils import clear_environment\n\n >>> os.environ[\"FOO\"] = \"bar\"\n >>> with clear_environment():\n ... print(os.environ)\n ... os.environ[\"FOO\"] = \"new_bar\"\n ... print(os.environ[\"FOO\"])\n {}\n new_bar\n\n >>> print(os.environ[\"FOO\"])\n bar\n ```\n \"\"\"\n _old_os_environ = os.environ\n os.environ = dict()\n\n yield\n\n os.environ = _old_os_environ\n\n\n@contextmanager\ndef patch_environment(**kwargs):\n \"\"\"\n A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.\n\n Will convert the values in `kwargs` to strings and upper-case all the keys.\n\n Example:\n\n ```python\n >>> import os\n >>> from accelerate.utils import patch_environment\n\n >>> with patch_environment(FOO=\"bar\"):\n ... print(os.environ[\"FOO\"]) # prints \"bar\"\n >>> print(os.environ[\"FOO\"]) # raises KeyError\n ```\n \"\"\"\n for key, value in kwargs.items():\n os.environ[key.upper()] = str(value)\n\n yield\n\n for key in kwargs:\n if key.upper() in os.environ:\n del os.environ[key.upper()]\n\n\ndef get_pretty_name(obj):\n \"\"\"\n Gets a pretty name from `obj`.\n \"\"\"\n if not hasattr(obj, \"__qualname__\") and not hasattr(obj, \"__name__\"):\n obj = getattr(obj, \"__class__\", obj)\n if hasattr(obj, \"__qualname__\"):\n return obj.__qualname__\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n return str(obj)\n\n\ndef merge_dicts(source, destination):\n \"\"\"\n Recursively merges two dictionaries.\n\n Args:\n source (`dict`): The dictionary to merge into `destination`.\n destination (`dict`): The dictionary to merge `source` into.\n \"\"\"\n for key, value in source.items():\n if isinstance(value, dict):\n node = destination.setdefault(key, {})\n merge_dicts(value, node)\n else:\n destination[key] = value\n\n return destination\n\n\ndef is_port_in_use(port: int = None) -> bool:\n \"\"\"\n Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been\n run and need to see if the port is already in use.\n \"\"\"\n if port is None:\n port = 29500\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n return s.connect_ex((\"localhost\", port)) == 0\n\n\ndef convert_bytes(size):\n \"Converts `size` from bytes to the largest possible unit\"\n for x in [\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\"]:\n if size < 1024.0:\n return f\"{round(size, 2)} {x}\"\n size /= 1024.0\n\n return f\"{round(size, 2)} PB\"\n", "path": "src/accelerate/utils/other.py"}], "after_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport socket\nfrom contextlib import contextmanager\nfrom types import MethodType\n\nimport torch\n\nfrom ..commands.config.default import write_basic_config # noqa: F401\nfrom ..state import PartialState\nfrom .constants import FSDP_PYTORCH_VERSION\nfrom .dataclasses import DistributedType\nfrom .imports import is_deepspeed_available, is_tpu_available\nfrom .transformer_engine import convert_model\nfrom .versions import is_torch_version\n\n\nif is_deepspeed_available():\n from deepspeed import DeepSpeedEngine\n\nif is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n\n\ndef is_compiled_module(module):\n \"\"\"\n Check whether the module was compiled with torch.compile()\n \"\"\"\n if is_torch_version(\"<\", \"2.0.0\") or not hasattr(torch, \"_dynamo\"):\n return False\n return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)\n\n\ndef extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):\n \"\"\"\n Extract a model from its distributed containers.\n\n Args:\n model (`torch.nn.Module`):\n The model to extract.\n keep_fp32_wrapper (`bool`, *optional*):\n Whether to remove mixed precision hooks from the model.\n\n Returns:\n `torch.nn.Module`: The extracted model.\n \"\"\"\n options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n\n is_compiled = is_compiled_module(model)\n if is_compiled:\n compiled_model = model\n model = model._orig_mod\n\n if is_deepspeed_available():\n options += (DeepSpeedEngine,)\n\n if is_torch_version(\">=\", FSDP_PYTORCH_VERSION):\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n\n options += (FSDP,)\n\n while isinstance(model, options):\n model = model.module\n\n if not keep_fp32_wrapper:\n forward = getattr(model, \"forward\")\n original_forward = model.__dict__.pop(\"_original_forward\", None)\n if original_forward is not None:\n while hasattr(forward, \"__wrapped__\"):\n forward = forward.__wrapped__\n if forward == original_forward:\n break\n model.forward = MethodType(forward, model)\n if getattr(model, \"_converted_to_transformer_engine\", False):\n convert_model(model, to_transformer_engine=False)\n\n if is_compiled:\n compiled_model._orig_mod = model\n model = compiled_model\n\n return model\n\n\ndef wait_for_everyone():\n \"\"\"\n Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n\n <Tip warning={true}>\n\n Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n\n </Tip>\n \"\"\"\n PartialState().wait_for_everyone()\n\n\ndef save(obj, f):\n \"\"\"\n Save the data to disk. Use in place of `torch.save()`.\n\n Args:\n obj: The data to save\n f: The file (or file-like object) to use to save the data\n \"\"\"\n if PartialState().distributed_type == DistributedType.TPU:\n xm.save(obj, f)\n elif PartialState().local_process_index == 0:\n torch.save(obj, f)\n\n\n@contextmanager\ndef clear_environment():\n \"\"\"\n A context manager that will cache origin `os.environ` and replace it with a empty dictionary in this context.\n\n When this context exits, the cached `os.environ` will be back.\n\n Example:\n\n ```python\n >>> import os\n >>> from accelerate.utils import clear_environment\n\n >>> os.environ[\"FOO\"] = \"bar\"\n >>> with clear_environment():\n ... print(os.environ)\n ... os.environ[\"FOO\"] = \"new_bar\"\n ... print(os.environ[\"FOO\"])\n {}\n new_bar\n\n >>> print(os.environ[\"FOO\"])\n bar\n ```\n \"\"\"\n _old_os_environ = os.environ\n os.environ = dict()\n\n yield\n\n os.environ = _old_os_environ\n\n\n@contextmanager\ndef patch_environment(**kwargs):\n \"\"\"\n A context manager that will add each keyword argument passed to `os.environ` and remove them when exiting.\n\n Will convert the values in `kwargs` to strings and upper-case all the keys.\n\n Example:\n\n ```python\n >>> import os\n >>> from accelerate.utils import patch_environment\n\n >>> with patch_environment(FOO=\"bar\"):\n ... print(os.environ[\"FOO\"]) # prints \"bar\"\n >>> print(os.environ[\"FOO\"]) # raises KeyError\n ```\n \"\"\"\n existing_vars = {}\n for key, value in kwargs.items():\n key = key.upper()\n if key in os.environ:\n existing_vars[key] = os.environ[key]\n os.environ[key] = str(value)\n\n yield\n\n for key in kwargs:\n key = key.upper()\n if key in existing_vars:\n # restore previous value\n os.environ[key] = existing_vars[key]\n else:\n os.environ.pop(key, None)\n\n\ndef get_pretty_name(obj):\n \"\"\"\n Gets a pretty name from `obj`.\n \"\"\"\n if not hasattr(obj, \"__qualname__\") and not hasattr(obj, \"__name__\"):\n obj = getattr(obj, \"__class__\", obj)\n if hasattr(obj, \"__qualname__\"):\n return obj.__qualname__\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n return str(obj)\n\n\ndef merge_dicts(source, destination):\n \"\"\"\n Recursively merges two dictionaries.\n\n Args:\n source (`dict`): The dictionary to merge into `destination`.\n destination (`dict`): The dictionary to merge `source` into.\n \"\"\"\n for key, value in source.items():\n if isinstance(value, dict):\n node = destination.setdefault(key, {})\n merge_dicts(value, node)\n else:\n destination[key] = value\n\n return destination\n\n\ndef is_port_in_use(port: int = None) -> bool:\n \"\"\"\n Checks if a port is in use on `localhost`. Useful for checking if multiple `accelerate launch` commands have been\n run and need to see if the port is already in use.\n \"\"\"\n if port is None:\n port = 29500\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n return s.connect_ex((\"localhost\", port)) == 0\n\n\ndef convert_bytes(size):\n \"Converts `size` from bytes to the largest possible unit\"\n for x in [\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\"]:\n if size < 1024.0:\n return f\"{round(size, 2)} {x}\"\n size /= 1024.0\n\n return f\"{round(size, 2)} PB\"\n", "path": "src/accelerate/utils/other.py"}]} | 2,696 | 222 |
gh_patches_debug_27643 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-7218 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Substack] extractor fails ("Unable to extract preloads")
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting that yt-dlp is broken on a **supported** site
- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
global
### Provide a description that is worded well enough to be understood
Ran yt-dlp on a rather ordinary Substack post, and received an error message along with a request to report the issue.
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['--restrict-filenames', '-o', '%(title)s-%(id)s-%(uploader)s.%(ext)s', '-w', '-v', 'https://pharmafiles.substack.com/p/how-big-pharma-calculates-a-patients']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] [392389b7d]
[debug] Python 3.11.3 (CPython x86_64 64bit) - Linux-5.15.88-with-glibc2.36 (OpenSSL 1.1.1t 7 Feb 2023, glibc 2.36)
[debug] exe versions: ffmpeg 4.4.3 (setts), ffprobe 4.4.3
[debug] Optional libraries: certifi-3021.03.16, pycrypto-3.17
[debug] Proxy map: {}
[debug] Loaded 1786 extractors
[Substack] Extracting URL: https://pharmafiles.substack.com/p/how-big-pharma-calculates-a-patients
[Substack] how-big-pharma-calculates-a-patients: Downloading webpage
ERROR: [Substack] how-big-pharma-calculates-a-patients: Unable to extract preloads; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
File "/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py", line 694, in extract
ie_result = self._real_extract(url)
^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/yt_dlp/extractor/substack.py", line 80, in _real_extract
webpage_info = self._search_json(r'<script[^>]*>\s*window\._preloads\s*=', webpage, 'preloads', display_id)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py", line 1256, in _search_json
json_string = self._search_regex(
^^^^^^^^^^^^^^^^^^^
File "/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py", line 1242, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/substack.py`
Content:
```
1 import re
2 import urllib.parse
3
4 from .common import InfoExtractor
5 from ..utils import str_or_none, traverse_obj
6
7
8 class SubstackIE(InfoExtractor):
9 _VALID_URL = r'https?://(?P<username>[\w-]+)\.substack\.com/p/(?P<id>[\w-]+)'
10 _TESTS = [{
11 'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r',
12 'md5': 'f27e4fc6252001d48d479f45e65cdfd5',
13 'info_dict': {
14 'id': '47660949',
15 'ext': 'mp4',
16 'title': 'I MADE A VLOG',
17 'description': 'md5:10c01ff93439a62e70ce963b2aa0b7f6',
18 'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',
19 'uploader': 'Maybe Baby',
20 'uploader_id': '33628',
21 }
22 }, {
23 'url': 'https://haleynahman.substack.com/p/-dear-danny-i-found-my-boyfriends?s=r',
24 'md5': '0a63eacec877a1171a62cfa69710fcea',
25 'info_dict': {
26 'id': '51045592',
27 'ext': 'mpga',
28 'title': "🎧 Dear Danny: I found my boyfriend's secret Twitter account",
29 'description': 'md5:a57f2439319e56e0af92dd0c95d75797',
30 'thumbnail': 'md5:daa40b6b79249417c14ff8103db29639',
31 'uploader': 'Maybe Baby',
32 'uploader_id': '33628',
33 }
34 }, {
35 'url': 'https://andrewzimmern.substack.com/p/mussels-with-black-bean-sauce-recipe',
36 'md5': 'fd3c07077b02444ff0130715b5f632bb',
37 'info_dict': {
38 'id': '47368578',
39 'ext': 'mp4',
40 'title': 'Mussels with Black Bean Sauce: Recipe of the Week #7',
41 'description': 'md5:b96234a2906c7d854d5229818d889515',
42 'thumbnail': 'md5:e30bfaa9da40e82aa62354263a9dd232',
43 'uploader': "Andrew Zimmern's Spilled Milk ",
44 'uploader_id': '577659',
45 }
46 }]
47
48 @classmethod
49 def _extract_embed_urls(cls, url, webpage):
50 if not re.search(r'<script[^>]+src=["\']https://substackcdn.com/[^"\']+\.js', webpage):
51 return
52
53 mobj = re.search(r'{[^}]*["\']subdomain["\']\s*:\s*["\'](?P<subdomain>[^"]+)', webpage)
54 if mobj:
55 parsed = urllib.parse.urlparse(url)
56 yield parsed._replace(netloc=f'{mobj.group("subdomain")}.substack.com').geturl()
57 raise cls.StopExtraction()
58
59 def _extract_video_formats(self, video_id, username):
60 formats, subtitles = [], {}
61 for video_format in ('hls', 'mp4'):
62 video_url = f'https://{username}.substack.com/api/v1/video/upload/{video_id}/src?type={video_format}'
63
64 if video_format == 'hls':
65 fmts, subs = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', fatal=False)
66 formats.extend(fmts)
67 self._merge_subtitles(subs, target=subtitles)
68 else:
69 formats.append({
70 'url': video_url,
71 'ext': video_format,
72 })
73
74 return formats, subtitles
75
76 def _real_extract(self, url):
77 display_id, username = self._match_valid_url(url).group('id', 'username')
78 webpage = self._download_webpage(url, display_id)
79
80 webpage_info = self._search_json(r'<script[^>]*>\s*window\._preloads\s*=', webpage, 'preloads', display_id)
81
82 post_type = webpage_info['post']['type']
83 formats, subtitles = [], {}
84 if post_type == 'podcast':
85 formats, subtitles = [{'url': webpage_info['post']['podcast_url']}], {}
86 elif post_type == 'video':
87 formats, subtitles = self._extract_video_formats(webpage_info['post']['videoUpload']['id'], username)
88 else:
89 self.raise_no_formats(f'Page type "{post_type}" is not supported')
90
91 return {
92 'id': str(webpage_info['post']['id']),
93 'formats': formats,
94 'subtitles': subtitles,
95 'title': traverse_obj(webpage_info, ('post', 'title')),
96 'description': traverse_obj(webpage_info, ('post', 'description')),
97 'thumbnail': traverse_obj(webpage_info, ('post', 'cover_image')),
98 'uploader': traverse_obj(webpage_info, ('pub', 'name')),
99 'uploader_id': str_or_none(traverse_obj(webpage_info, ('post', 'publication_id'))),
100 }
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt_dlp/extractor/substack.py b/yt_dlp/extractor/substack.py
--- a/yt_dlp/extractor/substack.py
+++ b/yt_dlp/extractor/substack.py
@@ -2,7 +2,7 @@
import urllib.parse
from .common import InfoExtractor
-from ..utils import str_or_none, traverse_obj
+from ..utils import js_to_json, str_or_none, traverse_obj
class SubstackIE(InfoExtractor):
@@ -14,7 +14,7 @@
'id': '47660949',
'ext': 'mp4',
'title': 'I MADE A VLOG',
- 'description': 'md5:10c01ff93439a62e70ce963b2aa0b7f6',
+ 'description': 'md5:9248af9a759321e1027226f988f54d96',
'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',
'uploader': 'Maybe Baby',
'uploader_id': '33628',
@@ -77,7 +77,9 @@
display_id, username = self._match_valid_url(url).group('id', 'username')
webpage = self._download_webpage(url, display_id)
- webpage_info = self._search_json(r'<script[^>]*>\s*window\._preloads\s*=', webpage, 'preloads', display_id)
+ webpage_info = self._parse_json(self._search_json(
+ r'window\._preloads\s*=\s*JSON\.parse\(', webpage, 'json string',
+ display_id, transform_source=js_to_json, contains_pattern=r'"{(?s:.+)}"'), display_id)
post_type = webpage_info['post']['type']
formats, subtitles = [], {}
| {"golden_diff": "diff --git a/yt_dlp/extractor/substack.py b/yt_dlp/extractor/substack.py\n--- a/yt_dlp/extractor/substack.py\n+++ b/yt_dlp/extractor/substack.py\n@@ -2,7 +2,7 @@\n import urllib.parse\n \n from .common import InfoExtractor\n-from ..utils import str_or_none, traverse_obj\n+from ..utils import js_to_json, str_or_none, traverse_obj\n \n \n class SubstackIE(InfoExtractor):\n@@ -14,7 +14,7 @@\n 'id': '47660949',\n 'ext': 'mp4',\n 'title': 'I MADE A VLOG',\n- 'description': 'md5:10c01ff93439a62e70ce963b2aa0b7f6',\n+ 'description': 'md5:9248af9a759321e1027226f988f54d96',\n 'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',\n 'uploader': 'Maybe Baby',\n 'uploader_id': '33628',\n@@ -77,7 +77,9 @@\n display_id, username = self._match_valid_url(url).group('id', 'username')\n webpage = self._download_webpage(url, display_id)\n \n- webpage_info = self._search_json(r'<script[^>]*>\\s*window\\._preloads\\s*=', webpage, 'preloads', display_id)\n+ webpage_info = self._parse_json(self._search_json(\n+ r'window\\._preloads\\s*=\\s*JSON\\.parse\\(', webpage, 'json string',\n+ display_id, transform_source=js_to_json, contains_pattern=r'\"{(?s:.+)}\"'), display_id)\n \n post_type = webpage_info['post']['type']\n formats, subtitles = [], {}\n", "issue": "[Substack] extractor fails (\"Unable to extract preloads\")\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm reporting that yt-dlp is broken on a **supported** site\n- [X] I've verified that I'm running yt-dlp version **2023.03.04** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nglobal\n\n### Provide a description that is worded well enough to be understood\n\nRan yt-dlp on a rather ordinary Substack post, and received an error message along with a request to report the issue.\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['--restrict-filenames', '-o', '%(title)s-%(id)s-%(uploader)s.%(ext)s', '-w', '-v', 'https://pharmafiles.substack.com/p/how-big-pharma-calculates-a-patients']\r\n[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version [email protected] [392389b7d]\r\n[debug] Python 3.11.3 (CPython x86_64 64bit) - Linux-5.15.88-with-glibc2.36 (OpenSSL 1.1.1t 7 Feb 2023, glibc 2.36)\r\n[debug] exe versions: ffmpeg 4.4.3 (setts), ffprobe 4.4.3\r\n[debug] Optional libraries: certifi-3021.03.16, pycrypto-3.17\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1786 extractors\r\n[Substack] Extracting URL: https://pharmafiles.substack.com/p/how-big-pharma-calculates-a-patients\r\n[Substack] how-big-pharma-calculates-a-patients: Downloading webpage\r\nERROR: [Substack] how-big-pharma-calculates-a-patients: Unable to extract preloads; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\n File \"/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py\", line 694, in extract\r\n ie_result = self._real_extract(url)\r\n ^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/yt_dlp/extractor/substack.py\", line 80, in _real_extract\r\n webpage_info = self._search_json(r'<script[^>]*>\\s*window\\._preloads\\s*=', webpage, 'preloads', display_id)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py\", line 1256, in _search_json\r\n json_string = self._search_regex(\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/lib/python3.11/site-packages/yt_dlp/extractor/common.py\", line 1242, in _search_regex\r\n raise RegexNotFoundError('Unable to extract %s' % _name)\n```\n\n", "before_files": [{"content": "import re\nimport urllib.parse\n\nfrom .common import InfoExtractor\nfrom ..utils import str_or_none, traverse_obj\n\n\nclass SubstackIE(InfoExtractor):\n _VALID_URL = r'https?://(?P<username>[\\w-]+)\\.substack\\.com/p/(?P<id>[\\w-]+)'\n _TESTS = [{\n 'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r',\n 'md5': 'f27e4fc6252001d48d479f45e65cdfd5',\n 'info_dict': {\n 'id': '47660949',\n 'ext': 'mp4',\n 'title': 'I MADE A VLOG',\n 'description': 'md5:10c01ff93439a62e70ce963b2aa0b7f6',\n 'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',\n 'uploader': 'Maybe Baby',\n 'uploader_id': '33628',\n }\n }, {\n 'url': 'https://haleynahman.substack.com/p/-dear-danny-i-found-my-boyfriends?s=r',\n 'md5': '0a63eacec877a1171a62cfa69710fcea',\n 'info_dict': {\n 'id': '51045592',\n 'ext': 'mpga',\n 'title': \"\ud83c\udfa7 Dear Danny: I found my boyfriend's secret Twitter account\",\n 'description': 'md5:a57f2439319e56e0af92dd0c95d75797',\n 'thumbnail': 'md5:daa40b6b79249417c14ff8103db29639',\n 'uploader': 'Maybe Baby',\n 'uploader_id': '33628',\n }\n }, {\n 'url': 'https://andrewzimmern.substack.com/p/mussels-with-black-bean-sauce-recipe',\n 'md5': 'fd3c07077b02444ff0130715b5f632bb',\n 'info_dict': {\n 'id': '47368578',\n 'ext': 'mp4',\n 'title': 'Mussels with Black Bean Sauce: Recipe of the Week #7',\n 'description': 'md5:b96234a2906c7d854d5229818d889515',\n 'thumbnail': 'md5:e30bfaa9da40e82aa62354263a9dd232',\n 'uploader': \"Andrew Zimmern's Spilled Milk \",\n 'uploader_id': '577659',\n }\n }]\n\n @classmethod\n def _extract_embed_urls(cls, url, webpage):\n if not re.search(r'<script[^>]+src=[\"\\']https://substackcdn.com/[^\"\\']+\\.js', webpage):\n return\n\n mobj = re.search(r'{[^}]*[\"\\']subdomain[\"\\']\\s*:\\s*[\"\\'](?P<subdomain>[^\"]+)', webpage)\n if mobj:\n parsed = urllib.parse.urlparse(url)\n yield parsed._replace(netloc=f'{mobj.group(\"subdomain\")}.substack.com').geturl()\n raise cls.StopExtraction()\n\n def _extract_video_formats(self, video_id, username):\n formats, subtitles = [], {}\n for video_format in ('hls', 'mp4'):\n video_url = f'https://{username}.substack.com/api/v1/video/upload/{video_id}/src?type={video_format}'\n\n if video_format == 'hls':\n fmts, subs = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', fatal=False)\n formats.extend(fmts)\n self._merge_subtitles(subs, target=subtitles)\n else:\n formats.append({\n 'url': video_url,\n 'ext': video_format,\n })\n\n return formats, subtitles\n\n def _real_extract(self, url):\n display_id, username = self._match_valid_url(url).group('id', 'username')\n webpage = self._download_webpage(url, display_id)\n\n webpage_info = self._search_json(r'<script[^>]*>\\s*window\\._preloads\\s*=', webpage, 'preloads', display_id)\n\n post_type = webpage_info['post']['type']\n formats, subtitles = [], {}\n if post_type == 'podcast':\n formats, subtitles = [{'url': webpage_info['post']['podcast_url']}], {}\n elif post_type == 'video':\n formats, subtitles = self._extract_video_formats(webpage_info['post']['videoUpload']['id'], username)\n else:\n self.raise_no_formats(f'Page type \"{post_type}\" is not supported')\n\n return {\n 'id': str(webpage_info['post']['id']),\n 'formats': formats,\n 'subtitles': subtitles,\n 'title': traverse_obj(webpage_info, ('post', 'title')),\n 'description': traverse_obj(webpage_info, ('post', 'description')),\n 'thumbnail': traverse_obj(webpage_info, ('post', 'cover_image')),\n 'uploader': traverse_obj(webpage_info, ('pub', 'name')),\n 'uploader_id': str_or_none(traverse_obj(webpage_info, ('post', 'publication_id'))),\n }\n", "path": "yt_dlp/extractor/substack.py"}], "after_files": [{"content": "import re\nimport urllib.parse\n\nfrom .common import InfoExtractor\nfrom ..utils import js_to_json, str_or_none, traverse_obj\n\n\nclass SubstackIE(InfoExtractor):\n _VALID_URL = r'https?://(?P<username>[\\w-]+)\\.substack\\.com/p/(?P<id>[\\w-]+)'\n _TESTS = [{\n 'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r',\n 'md5': 'f27e4fc6252001d48d479f45e65cdfd5',\n 'info_dict': {\n 'id': '47660949',\n 'ext': 'mp4',\n 'title': 'I MADE A VLOG',\n 'description': 'md5:9248af9a759321e1027226f988f54d96',\n 'thumbnail': 'md5:bec758a34d8ee9142d43bcebdf33af18',\n 'uploader': 'Maybe Baby',\n 'uploader_id': '33628',\n }\n }, {\n 'url': 'https://haleynahman.substack.com/p/-dear-danny-i-found-my-boyfriends?s=r',\n 'md5': '0a63eacec877a1171a62cfa69710fcea',\n 'info_dict': {\n 'id': '51045592',\n 'ext': 'mpga',\n 'title': \"\ud83c\udfa7 Dear Danny: I found my boyfriend's secret Twitter account\",\n 'description': 'md5:a57f2439319e56e0af92dd0c95d75797',\n 'thumbnail': 'md5:daa40b6b79249417c14ff8103db29639',\n 'uploader': 'Maybe Baby',\n 'uploader_id': '33628',\n }\n }, {\n 'url': 'https://andrewzimmern.substack.com/p/mussels-with-black-bean-sauce-recipe',\n 'md5': 'fd3c07077b02444ff0130715b5f632bb',\n 'info_dict': {\n 'id': '47368578',\n 'ext': 'mp4',\n 'title': 'Mussels with Black Bean Sauce: Recipe of the Week #7',\n 'description': 'md5:b96234a2906c7d854d5229818d889515',\n 'thumbnail': 'md5:e30bfaa9da40e82aa62354263a9dd232',\n 'uploader': \"Andrew Zimmern's Spilled Milk \",\n 'uploader_id': '577659',\n }\n }]\n\n @classmethod\n def _extract_embed_urls(cls, url, webpage):\n if not re.search(r'<script[^>]+src=[\"\\']https://substackcdn.com/[^\"\\']+\\.js', webpage):\n return\n\n mobj = re.search(r'{[^}]*[\"\\']subdomain[\"\\']\\s*:\\s*[\"\\'](?P<subdomain>[^\"]+)', webpage)\n if mobj:\n parsed = urllib.parse.urlparse(url)\n yield parsed._replace(netloc=f'{mobj.group(\"subdomain\")}.substack.com').geturl()\n raise cls.StopExtraction()\n\n def _extract_video_formats(self, video_id, username):\n formats, subtitles = [], {}\n for video_format in ('hls', 'mp4'):\n video_url = f'https://{username}.substack.com/api/v1/video/upload/{video_id}/src?type={video_format}'\n\n if video_format == 'hls':\n fmts, subs = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', fatal=False)\n formats.extend(fmts)\n self._merge_subtitles(subs, target=subtitles)\n else:\n formats.append({\n 'url': video_url,\n 'ext': video_format,\n })\n\n return formats, subtitles\n\n def _real_extract(self, url):\n display_id, username = self._match_valid_url(url).group('id', 'username')\n webpage = self._download_webpage(url, display_id)\n\n webpage_info = self._parse_json(self._search_json(\n r'window\\._preloads\\s*=\\s*JSON\\.parse\\(', webpage, 'json string',\n display_id, transform_source=js_to_json, contains_pattern=r'\"{(?s:.+)}\"'), display_id)\n\n post_type = webpage_info['post']['type']\n formats, subtitles = [], {}\n if post_type == 'podcast':\n formats, subtitles = [{'url': webpage_info['post']['podcast_url']}], {}\n elif post_type == 'video':\n formats, subtitles = self._extract_video_formats(webpage_info['post']['videoUpload']['id'], username)\n else:\n self.raise_no_formats(f'Page type \"{post_type}\" is not supported')\n\n return {\n 'id': str(webpage_info['post']['id']),\n 'formats': formats,\n 'subtitles': subtitles,\n 'title': traverse_obj(webpage_info, ('post', 'title')),\n 'description': traverse_obj(webpage_info, ('post', 'description')),\n 'thumbnail': traverse_obj(webpage_info, ('post', 'cover_image')),\n 'uploader': traverse_obj(webpage_info, ('pub', 'name')),\n 'uploader_id': str_or_none(traverse_obj(webpage_info, ('post', 'publication_id'))),\n }\n", "path": "yt_dlp/extractor/substack.py"}]} | 2,911 | 454 |
gh_patches_debug_21797 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-664 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
403 attempting to login

It looks like if I open another tab I am logged into my account though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/authentication.py`
Content:
```
1 ''' class views for login/register views '''
2 from django.contrib.auth import authenticate, login, logout
3 from django.contrib.auth.decorators import login_required
4 from django.core.exceptions import PermissionDenied
5 from django.shortcuts import get_object_or_404, redirect
6 from django.template.response import TemplateResponse
7 from django.utils import timezone
8 from django.utils.decorators import method_decorator
9 from django.views import View
10
11 from bookwyrm import forms, models
12 from bookwyrm.settings import DOMAIN
13
14
15 # pylint: disable= no-self-use
16 class Login(View):
17 ''' authenticate an existing user '''
18 def get(self, request):
19 ''' login page '''
20 if request.user.is_authenticated:
21 return redirect('/')
22 # sene user to the login page
23 data = {
24 'title': 'Login',
25 'login_form': forms.LoginForm(),
26 'register_form': forms.RegisterForm(),
27 }
28 return TemplateResponse(request, 'login.html', data)
29
30 def post(self, request):
31 ''' authentication action '''
32 login_form = forms.LoginForm(request.POST)
33
34 localname = login_form.data['localname']
35 if '@' in localname: # looks like an email address to me
36 email = localname
37 try:
38 username = models.User.objects.get(email=email)
39 except models.User.DoesNotExist: # maybe it's a full username?
40 username = localname
41 else:
42 username = '%s@%s' % (localname, DOMAIN)
43 password = login_form.data['password']
44 user = authenticate(request, username=username, password=password)
45 if user is not None:
46 # successful login
47 login(request, user)
48 user.last_active_date = timezone.now()
49 user.save(broadcast=False)
50 return redirect(request.GET.get('next', '/'))
51
52 # login errors
53 login_form.non_field_errors = 'Username or password are incorrect'
54 register_form = forms.RegisterForm()
55 data = {
56 'login_form': login_form,
57 'register_form': register_form
58 }
59 return TemplateResponse(request, 'login.html', data)
60
61
62 class Register(View):
63 ''' register a user '''
64 def post(self, request):
65 ''' join the server '''
66 if not models.SiteSettings.get().allow_registration:
67 invite_code = request.POST.get('invite_code')
68
69 if not invite_code:
70 raise PermissionDenied
71
72 invite = get_object_or_404(models.SiteInvite, code=invite_code)
73 if not invite.valid():
74 raise PermissionDenied
75 else:
76 invite = None
77
78 form = forms.RegisterForm(request.POST)
79 errors = False
80 if not form.is_valid():
81 errors = True
82
83 localname = form.data['localname'].strip()
84 email = form.data['email']
85 password = form.data['password']
86
87 # check localname and email uniqueness
88 if models.User.objects.filter(localname=localname).first():
89 form.errors['localname'] = [
90 'User with this username already exists']
91 errors = True
92
93 if errors:
94 data = {
95 'login_form': forms.LoginForm(),
96 'register_form': form,
97 'invite': invite,
98 'valid': invite.valid() if invite else True,
99 }
100 if invite:
101 return TemplateResponse(request, 'invite.html', data)
102 return TemplateResponse(request, 'login.html', data)
103
104 username = '%s@%s' % (localname, DOMAIN)
105 user = models.User.objects.create_user(
106 username, email, password, localname=localname, local=True)
107 if invite:
108 invite.times_used += 1
109 invite.save()
110
111 login(request, user)
112 return redirect('/')
113
114
115 @method_decorator(login_required, name='dispatch')
116 class Logout(View):
117 ''' log out '''
118 def get(self, request):
119 ''' done with this place! outa here! '''
120 logout(request)
121 return redirect('/')
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/authentication.py b/bookwyrm/views/authentication.py
--- a/bookwyrm/views/authentication.py
+++ b/bookwyrm/views/authentication.py
@@ -6,6 +6,7 @@
from django.template.response import TemplateResponse
from django.utils import timezone
from django.utils.decorators import method_decorator
+from django.views.decorators.csrf import csrf_exempt
from django.views import View
from bookwyrm import forms, models
@@ -13,6 +14,7 @@
# pylint: disable= no-self-use
+@method_decorator(csrf_exempt, name='dispatch')
class Login(View):
''' authenticate an existing user '''
def get(self, request):
@@ -29,6 +31,8 @@
def post(self, request):
''' authentication action '''
+ if request.user.is_authenticated:
+ return redirect('/')
login_form = forms.LoginForm(request.POST)
localname = login_form.data['localname']
| {"golden_diff": "diff --git a/bookwyrm/views/authentication.py b/bookwyrm/views/authentication.py\n--- a/bookwyrm/views/authentication.py\n+++ b/bookwyrm/views/authentication.py\n@@ -6,6 +6,7 @@\n from django.template.response import TemplateResponse\n from django.utils import timezone\n from django.utils.decorators import method_decorator\n+from django.views.decorators.csrf import csrf_exempt\n from django.views import View\n \n from bookwyrm import forms, models\n@@ -13,6 +14,7 @@\n \n \n # pylint: disable= no-self-use\n+@method_decorator(csrf_exempt, name='dispatch')\n class Login(View):\n ''' authenticate an existing user '''\n def get(self, request):\n@@ -29,6 +31,8 @@\n \n def post(self, request):\n ''' authentication action '''\n+ if request.user.is_authenticated:\n+ return redirect('/')\n login_form = forms.LoginForm(request.POST)\n \n localname = login_form.data['localname']\n", "issue": "403 attempting to login\n\r\n\r\nIt looks like if I open another tab I am logged into my account though. \n", "before_files": [{"content": "''' class views for login/register views '''\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import DOMAIN\n\n\n# pylint: disable= no-self-use\nclass Login(View):\n ''' authenticate an existing user '''\n def get(self, request):\n ''' login page '''\n if request.user.is_authenticated:\n return redirect('/')\n # sene user to the login page\n data = {\n 'title': 'Login',\n 'login_form': forms.LoginForm(),\n 'register_form': forms.RegisterForm(),\n }\n return TemplateResponse(request, 'login.html', data)\n\n def post(self, request):\n ''' authentication action '''\n login_form = forms.LoginForm(request.POST)\n\n localname = login_form.data['localname']\n if '@' in localname: # looks like an email address to me\n email = localname\n try:\n username = models.User.objects.get(email=email)\n except models.User.DoesNotExist: # maybe it's a full username?\n username = localname\n else:\n username = '%s@%s' % (localname, DOMAIN)\n password = login_form.data['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n # successful login\n login(request, user)\n user.last_active_date = timezone.now()\n user.save(broadcast=False)\n return redirect(request.GET.get('next', '/'))\n\n # login errors\n login_form.non_field_errors = 'Username or password are incorrect'\n register_form = forms.RegisterForm()\n data = {\n 'login_form': login_form,\n 'register_form': register_form\n }\n return TemplateResponse(request, 'login.html', data)\n\n\nclass Register(View):\n ''' register a user '''\n def post(self, request):\n ''' join the server '''\n if not models.SiteSettings.get().allow_registration:\n invite_code = request.POST.get('invite_code')\n\n if not invite_code:\n raise PermissionDenied\n\n invite = get_object_or_404(models.SiteInvite, code=invite_code)\n if not invite.valid():\n raise PermissionDenied\n else:\n invite = None\n\n form = forms.RegisterForm(request.POST)\n errors = False\n if not form.is_valid():\n errors = True\n\n localname = form.data['localname'].strip()\n email = form.data['email']\n password = form.data['password']\n\n # check localname and email uniqueness\n if models.User.objects.filter(localname=localname).first():\n form.errors['localname'] = [\n 'User with this username already exists']\n errors = True\n\n if errors:\n data = {\n 'login_form': forms.LoginForm(),\n 'register_form': form,\n 'invite': invite,\n 'valid': invite.valid() if invite else True,\n }\n if invite:\n return TemplateResponse(request, 'invite.html', data)\n return TemplateResponse(request, 'login.html', data)\n\n username = '%s@%s' % (localname, DOMAIN)\n user = models.User.objects.create_user(\n username, email, password, localname=localname, local=True)\n if invite:\n invite.times_used += 1\n invite.save()\n\n login(request, user)\n return redirect('/')\n\n\n@method_decorator(login_required, name='dispatch')\nclass Logout(View):\n ''' log out '''\n def get(self, request):\n ''' done with this place! outa here! '''\n logout(request)\n return redirect('/')\n", "path": "bookwyrm/views/authentication.py"}], "after_files": [{"content": "''' class views for login/register views '''\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import DOMAIN\n\n\n# pylint: disable= no-self-use\n@method_decorator(csrf_exempt, name='dispatch')\nclass Login(View):\n ''' authenticate an existing user '''\n def get(self, request):\n ''' login page '''\n if request.user.is_authenticated:\n return redirect('/')\n # sene user to the login page\n data = {\n 'title': 'Login',\n 'login_form': forms.LoginForm(),\n 'register_form': forms.RegisterForm(),\n }\n return TemplateResponse(request, 'login.html', data)\n\n def post(self, request):\n ''' authentication action '''\n if request.user.is_authenticated:\n return redirect('/')\n login_form = forms.LoginForm(request.POST)\n\n localname = login_form.data['localname']\n if '@' in localname: # looks like an email address to me\n email = localname\n try:\n username = models.User.objects.get(email=email)\n except models.User.DoesNotExist: # maybe it's a full username?\n username = localname\n else:\n username = '%s@%s' % (localname, DOMAIN)\n password = login_form.data['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n # successful login\n login(request, user)\n user.last_active_date = timezone.now()\n user.save(broadcast=False)\n return redirect(request.GET.get('next', '/'))\n\n # login errors\n login_form.non_field_errors = 'Username or password are incorrect'\n register_form = forms.RegisterForm()\n data = {\n 'login_form': login_form,\n 'register_form': register_form\n }\n return TemplateResponse(request, 'login.html', data)\n\n\nclass Register(View):\n ''' register a user '''\n def post(self, request):\n ''' join the server '''\n if not models.SiteSettings.get().allow_registration:\n invite_code = request.POST.get('invite_code')\n\n if not invite_code:\n raise PermissionDenied\n\n invite = get_object_or_404(models.SiteInvite, code=invite_code)\n if not invite.valid():\n raise PermissionDenied\n else:\n invite = None\n\n form = forms.RegisterForm(request.POST)\n errors = False\n if not form.is_valid():\n errors = True\n\n localname = form.data['localname'].strip()\n email = form.data['email']\n password = form.data['password']\n\n # check localname and email uniqueness\n if models.User.objects.filter(localname=localname).first():\n form.errors['localname'] = [\n 'User with this username already exists']\n errors = True\n\n if errors:\n data = {\n 'login_form': forms.LoginForm(),\n 'register_form': form,\n 'invite': invite,\n 'valid': invite.valid() if invite else True,\n }\n if invite:\n return TemplateResponse(request, 'invite.html', data)\n return TemplateResponse(request, 'login.html', data)\n\n username = '%s@%s' % (localname, DOMAIN)\n user = models.User.objects.create_user(\n username, email, password, localname=localname, local=True)\n if invite:\n invite.times_used += 1\n invite.save()\n\n login(request, user)\n return redirect('/')\n\n\n@method_decorator(login_required, name='dispatch')\nclass Logout(View):\n ''' log out '''\n def get(self, request):\n ''' done with this place! outa here! '''\n logout(request)\n return redirect('/')\n", "path": "bookwyrm/views/authentication.py"}]} | 1,454 | 205 |
gh_patches_debug_18116 | rasdani/github-patches | git_diff | beeware__toga-2139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Android Read-only Multi-line text input displays suggestions (Spell Checking)
### Describe the bug
When using the "MultilineTextInput" with the "readonly" parameter set to "True" on Android, it will suggest spelling improvements based on dictionary candidates. This behavior results in underlined text on android devices when using a language other than the keyboard language or with unknown words in general.
### Steps to reproduce
1. Use MultilineTextInput in readonly with a set text
2. Compile for android and set the KeyBoard language to a different one than the text.
3. The suggestions should appear, underlining the Text.
### Expected behavior
Suggestions do not show up in MultilineTextInput if set to readonly.
### Screenshots
_No response_
### Environment
- Operating System: Fedora 38 Workstation / Lineage OS 20.0
- Python version: 3.11.5
- Software versions:
- Briefcase: 0.3.15
- Toga: 0.3.1
[tool.briefcase.app.trinker.android]
requires = [
#"toga-android~=0.3.1"
'git+https://github.com/beeware/toga.git#subdirectory=core',
'git+https://github.com/beeware/toga.git#subdirectory=android',
]
### Logs
```
```
### Additional context
I did some research on the topic and while I'm not really an Android dev, I think I found the problem. In "android/src/toga_android/widgets/textinput.py" we need to set the flag [TYPE_TEXT_FLAG_NO_SUGGESTIONS](https://developer.android.com/reference/android/text/InputType#TYPE_TEXT_FLAG_NO_SUGGESTIONS) if "readonly" is true.
As said before, I'm not really an Android dev and am not sure how this would be implemented.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `android/src/toga_android/widgets/textinput.py`
Content:
```
1 from java import dynamic_proxy
2 from travertino.size import at_least
3
4 from android.text import InputType, TextWatcher
5 from android.view import Gravity, View
6 from android.widget import EditText
7 from toga_android.keys import toga_key
8
9 from .label import TextViewWidget
10
11
12 class TogaTextWatcher(dynamic_proxy(TextWatcher)):
13 def __init__(self, impl):
14 super().__init__()
15 self.impl = impl
16
17 def beforeTextChanged(self, _charSequence, _start, _count, _after):
18 pass
19
20 def afterTextChanged(self, _editable):
21 self.impl._on_change()
22
23 def onTextChanged(self, _charSequence, _start, _before, _count):
24 pass
25
26
27 class TogaKeyListener(dynamic_proxy(View.OnKeyListener)):
28 def __init__(self, impl):
29 super().__init__()
30 self.impl = impl
31
32 def onKey(self, _view, _key, _event):
33 event_info = toga_key(_event)
34 if event_info is None:
35 pass # pragma: nocover
36 else:
37 key_pressed = event_info["key"].value
38 if (key_pressed == "<enter>" or key_pressed == "numpad:enter") and (
39 int(_event.getAction()) == 1
40 ):
41 self.impl._on_confirm()
42 return False
43
44
45 class TogaFocusListener(dynamic_proxy(View.OnFocusChangeListener)):
46 def __init__(self, impl):
47 super().__init__()
48 self.impl = impl
49
50 def onFocusChange(self, view, has_focus):
51 if has_focus:
52 self.impl._on_gain_focus()
53 else:
54 self.impl._on_lose_focus()
55
56
57 class TextInput(TextViewWidget):
58 def create(self, input_type=InputType.TYPE_CLASS_TEXT):
59 self.native = EditText(self._native_activity)
60 self.native.setInputType(input_type)
61 self.cache_textview_defaults()
62
63 self.native.addTextChangedListener(TogaTextWatcher(self))
64 self.native.setOnKeyListener(TogaKeyListener(self))
65 self.native.setOnFocusChangeListener(TogaFocusListener(self))
66
67 def get_value(self):
68 return str(self.native.getText())
69
70 def set_value(self, value):
71 self.native.setText(value)
72
73 def get_readonly(self):
74 return not self.native.isFocusable()
75
76 def set_readonly(self, readonly):
77 if readonly:
78 # Implicitly calls setFocusableInTouchMode(False)
79 self.native.setFocusable(False)
80 else:
81 # Implicitly calls setFocusable(True)
82 self.native.setFocusableInTouchMode(True)
83
84 def get_placeholder(self):
85 return str(self.native.getHint())
86
87 def set_placeholder(self, value):
88 self.native.setHint(value)
89
90 def set_alignment(self, value):
91 self.set_textview_alignment(value, Gravity.CENTER_VERTICAL)
92
93 def set_error(self, error_message):
94 self.native.setError(error_message)
95
96 def clear_error(self):
97 self.native.setError(None)
98
99 def is_valid(self):
100 return self.native.getError() is None
101
102 def _on_change(self):
103 self.interface.on_change(None)
104 self.interface._validate()
105
106 def _on_confirm(self):
107 self.interface.on_confirm(None)
108
109 def _on_gain_focus(self):
110 self.interface.on_gain_focus(None)
111
112 def _on_lose_focus(self):
113 self.interface.on_lose_focus(None)
114
115 def rehint(self):
116 self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)
117 self.native.measure(View.MeasureSpec.UNSPECIFIED, View.MeasureSpec.UNSPECIFIED)
118 self.interface.intrinsic.height = self.native.getMeasuredHeight()
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/android/src/toga_android/widgets/textinput.py b/android/src/toga_android/widgets/textinput.py
--- a/android/src/toga_android/widgets/textinput.py
+++ b/android/src/toga_android/widgets/textinput.py
@@ -77,9 +77,19 @@
if readonly:
# Implicitly calls setFocusableInTouchMode(False)
self.native.setFocusable(False)
+ # Add TYPE_TEXT_FLAG_NO_SUGGESTIONS to the input type to disable suggestions
+ input_type = (
+ self.native.getInputType() | InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS
+ )
+ self.native.setInputType(input_type)
else:
# Implicitly calls setFocusable(True)
self.native.setFocusableInTouchMode(True)
+ # Remove TYPE_TEXT_FLAG_NO_SUGGESTIONS to enable suggestions
+ input_type = (
+ self.native.getInputType() & ~InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS
+ )
+ self.native.setInputType(input_type)
def get_placeholder(self):
return str(self.native.getHint())
| {"golden_diff": "diff --git a/android/src/toga_android/widgets/textinput.py b/android/src/toga_android/widgets/textinput.py\n--- a/android/src/toga_android/widgets/textinput.py\n+++ b/android/src/toga_android/widgets/textinput.py\n@@ -77,9 +77,19 @@\n if readonly:\n # Implicitly calls setFocusableInTouchMode(False)\n self.native.setFocusable(False)\n+ # Add TYPE_TEXT_FLAG_NO_SUGGESTIONS to the input type to disable suggestions\n+ input_type = (\n+ self.native.getInputType() | InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS\n+ )\n+ self.native.setInputType(input_type)\n else:\n # Implicitly calls setFocusable(True)\n self.native.setFocusableInTouchMode(True)\n+ # Remove TYPE_TEXT_FLAG_NO_SUGGESTIONS to enable suggestions\n+ input_type = (\n+ self.native.getInputType() & ~InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS\n+ )\n+ self.native.setInputType(input_type)\n \n def get_placeholder(self):\n return str(self.native.getHint())\n", "issue": "Android Read-only Multi-line text input displays suggestions (Spell Checking)\n### Describe the bug\n\nWhen using the \"MultilineTextInput\" with the \"readonly\" parameter set to \"True\" on Android, it will suggest spelling improvements based on dictionary candidates. This behavior results in underlined text on android devices when using a language other than the keyboard language or with unknown words in general.\n\n### Steps to reproduce\n\n1. Use MultilineTextInput in readonly with a set text\r\n2. Compile for android and set the KeyBoard language to a different one than the text.\r\n3. The suggestions should appear, underlining the Text.\n\n### Expected behavior\n\nSuggestions do not show up in MultilineTextInput if set to readonly.\n\n### Screenshots\n\n_No response_\n\n### Environment\n\n- Operating System: Fedora 38 Workstation / Lineage OS 20.0\r\n- Python version: 3.11.5\r\n- Software versions:\r\n - Briefcase: 0.3.15\r\n - Toga: 0.3.1 \r\n\r\n[tool.briefcase.app.trinker.android]\r\nrequires = [\r\n #\"toga-android~=0.3.1\"\r\n 'git+https://github.com/beeware/toga.git#subdirectory=core',\r\n 'git+https://github.com/beeware/toga.git#subdirectory=android',\r\n]\r\n\r\n\n\n### Logs\n\n```\r\n\r\n```\r\n\n\n### Additional context\n\nI did some research on the topic and while I'm not really an Android dev, I think I found the problem. In \"android/src/toga_android/widgets/textinput.py\" we need to set the flag [TYPE_TEXT_FLAG_NO_SUGGESTIONS](https://developer.android.com/reference/android/text/InputType#TYPE_TEXT_FLAG_NO_SUGGESTIONS) if \"readonly\" is true.\r\n\r\nAs said before, I'm not really an Android dev and am not sure how this would be implemented.\r\n\n", "before_files": [{"content": "from java import dynamic_proxy\nfrom travertino.size import at_least\n\nfrom android.text import InputType, TextWatcher\nfrom android.view import Gravity, View\nfrom android.widget import EditText\nfrom toga_android.keys import toga_key\n\nfrom .label import TextViewWidget\n\n\nclass TogaTextWatcher(dynamic_proxy(TextWatcher)):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def beforeTextChanged(self, _charSequence, _start, _count, _after):\n pass\n\n def afterTextChanged(self, _editable):\n self.impl._on_change()\n\n def onTextChanged(self, _charSequence, _start, _before, _count):\n pass\n\n\nclass TogaKeyListener(dynamic_proxy(View.OnKeyListener)):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def onKey(self, _view, _key, _event):\n event_info = toga_key(_event)\n if event_info is None:\n pass # pragma: nocover\n else:\n key_pressed = event_info[\"key\"].value\n if (key_pressed == \"<enter>\" or key_pressed == \"numpad:enter\") and (\n int(_event.getAction()) == 1\n ):\n self.impl._on_confirm()\n return False\n\n\nclass TogaFocusListener(dynamic_proxy(View.OnFocusChangeListener)):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def onFocusChange(self, view, has_focus):\n if has_focus:\n self.impl._on_gain_focus()\n else:\n self.impl._on_lose_focus()\n\n\nclass TextInput(TextViewWidget):\n def create(self, input_type=InputType.TYPE_CLASS_TEXT):\n self.native = EditText(self._native_activity)\n self.native.setInputType(input_type)\n self.cache_textview_defaults()\n\n self.native.addTextChangedListener(TogaTextWatcher(self))\n self.native.setOnKeyListener(TogaKeyListener(self))\n self.native.setOnFocusChangeListener(TogaFocusListener(self))\n\n def get_value(self):\n return str(self.native.getText())\n\n def set_value(self, value):\n self.native.setText(value)\n\n def get_readonly(self):\n return not self.native.isFocusable()\n\n def set_readonly(self, readonly):\n if readonly:\n # Implicitly calls setFocusableInTouchMode(False)\n self.native.setFocusable(False)\n else:\n # Implicitly calls setFocusable(True)\n self.native.setFocusableInTouchMode(True)\n\n def get_placeholder(self):\n return str(self.native.getHint())\n\n def set_placeholder(self, value):\n self.native.setHint(value)\n\n def set_alignment(self, value):\n self.set_textview_alignment(value, Gravity.CENTER_VERTICAL)\n\n def set_error(self, error_message):\n self.native.setError(error_message)\n\n def clear_error(self):\n self.native.setError(None)\n\n def is_valid(self):\n return self.native.getError() is None\n\n def _on_change(self):\n self.interface.on_change(None)\n self.interface._validate()\n\n def _on_confirm(self):\n self.interface.on_confirm(None)\n\n def _on_gain_focus(self):\n self.interface.on_gain_focus(None)\n\n def _on_lose_focus(self):\n self.interface.on_lose_focus(None)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)\n self.native.measure(View.MeasureSpec.UNSPECIFIED, View.MeasureSpec.UNSPECIFIED)\n self.interface.intrinsic.height = self.native.getMeasuredHeight()\n", "path": "android/src/toga_android/widgets/textinput.py"}], "after_files": [{"content": "from java import dynamic_proxy\nfrom travertino.size import at_least\n\nfrom android.text import InputType, TextWatcher\nfrom android.view import Gravity, View\nfrom android.widget import EditText\nfrom toga_android.keys import toga_key\n\nfrom .label import TextViewWidget\n\n\nclass TogaTextWatcher(dynamic_proxy(TextWatcher)):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def beforeTextChanged(self, _charSequence, _start, _count, _after):\n pass\n\n def afterTextChanged(self, _editable):\n self.impl._on_change()\n\n def onTextChanged(self, _charSequence, _start, _before, _count):\n pass\n\n\nclass TogaKeyListener(dynamic_proxy(View.OnKeyListener)):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def onKey(self, _view, _key, _event):\n event_info = toga_key(_event)\n if event_info is None:\n pass # pragma: nocover\n else:\n key_pressed = event_info[\"key\"].value\n if (key_pressed == \"<enter>\" or key_pressed == \"numpad:enter\") and (\n int(_event.getAction()) == 1\n ):\n self.impl._on_confirm()\n return False\n\n\nclass TogaFocusListener(dynamic_proxy(View.OnFocusChangeListener)):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def onFocusChange(self, view, has_focus):\n if has_focus:\n self.impl._on_gain_focus()\n else:\n self.impl._on_lose_focus()\n\n\nclass TextInput(TextViewWidget):\n def create(self, input_type=InputType.TYPE_CLASS_TEXT):\n self.native = EditText(self._native_activity)\n self.native.setInputType(input_type)\n self.cache_textview_defaults()\n\n self.native.addTextChangedListener(TogaTextWatcher(self))\n self.native.setOnKeyListener(TogaKeyListener(self))\n self.native.setOnFocusChangeListener(TogaFocusListener(self))\n\n def get_value(self):\n return str(self.native.getText())\n\n def set_value(self, value):\n self.native.setText(value)\n\n def get_readonly(self):\n return not self.native.isFocusable()\n\n def set_readonly(self, readonly):\n if readonly:\n # Implicitly calls setFocusableInTouchMode(False)\n self.native.setFocusable(False)\n # Add TYPE_TEXT_FLAG_NO_SUGGESTIONS to the input type to disable suggestions\n input_type = (\n self.native.getInputType() | InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS\n )\n self.native.setInputType(input_type)\n else:\n # Implicitly calls setFocusable(True)\n self.native.setFocusableInTouchMode(True)\n # Remove TYPE_TEXT_FLAG_NO_SUGGESTIONS to enable suggestions\n input_type = (\n self.native.getInputType() & ~InputType.TYPE_TEXT_FLAG_NO_SUGGESTIONS\n )\n self.native.setInputType(input_type)\n\n def get_placeholder(self):\n return str(self.native.getHint())\n\n def set_placeholder(self, value):\n self.native.setHint(value)\n\n def set_alignment(self, value):\n self.set_textview_alignment(value, Gravity.CENTER_VERTICAL)\n\n def set_error(self, error_message):\n self.native.setError(error_message)\n\n def clear_error(self):\n self.native.setError(None)\n\n def is_valid(self):\n return self.native.getError() is None\n\n def _on_change(self):\n self.interface.on_change(None)\n self.interface._validate()\n\n def _on_confirm(self):\n self.interface.on_confirm(None)\n\n def _on_gain_focus(self):\n self.interface.on_gain_focus(None)\n\n def _on_lose_focus(self):\n self.interface.on_lose_focus(None)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)\n self.native.measure(View.MeasureSpec.UNSPECIFIED, View.MeasureSpec.UNSPECIFIED)\n self.interface.intrinsic.height = self.native.getMeasuredHeight()\n", "path": "android/src/toga_android/widgets/textinput.py"}]} | 1,670 | 234 |
gh_patches_debug_14420 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-6774 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
XDG_DATA_DIRS environment variable being overriden when running packaged app
I have a simple app that runs a local Flask web server and opens a web browser to show it on start up using the following command:
webbrowser.open('http://localhost:%d' % port, new=2, autoraise=True)
When I run it in development mode, this works fine. However, when it's packaged using pyinstaller and run as an executable, I get this:
gvfs-open: http://localhost:5000: error opening location: No application is registered as handling this file
The order of the browser's web browser will try is the same in both instances:
'xdg-open', 'gvfs-open', 'gnome-open', 'x-www-browser', 'firefox'
I tried using specific commands using `webbrowser.get(NAME).open` but none worked (except Firefox).
It seems like the XDG_DATA_DIRS environment variable is being over ridden:
`print('XDG_DATA_DIRS: ' + os.environ.get('XDG_DATA_DIRS', 'not found'))`
yields
`/usr/share/ubuntu:/usr/share/gnome:/usr/local/share:/usr/share:/var/lib/snapd/desktop:/var/lib/snapd/desktop`
in development mode and
`SOURCE_DIR/dist/linux/app/share`
in the packaged executable.
I'm using Python 3.5.2 and pyinstaller 3.3.1 on Ubuntu 16.04 with the command
pyinstaller --add-data="static:static" app.py
Stack overflow link: https://stackoverflow.com/questions/51657864/pyinstaller-webbrowser-open-doesnt-work-in-packaged-app
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/rthooks/pyi_rth_glib.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2015-2022, PyInstaller Development Team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: Apache-2.0
10 #-----------------------------------------------------------------------------
11
12 import os
13 import sys
14
15 os.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/rthooks/pyi_rth_glib.py b/PyInstaller/hooks/rthooks/pyi_rth_glib.py
--- a/PyInstaller/hooks/rthooks/pyi_rth_glib.py
+++ b/PyInstaller/hooks/rthooks/pyi_rth_glib.py
@@ -12,4 +12,20 @@
import os
import sys
-os.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')
+# Prepend the frozen application's data dir to XDG_DATA_DIRS. We need to avoid overwriting the existing paths in order
+# to allow the frozen application to run system-installed applications (for example, launch a web browser via the
+# webbrowser module on Linux). Should the user desire complete isolation of the frozen application from the system,
+# they need to clean up XDG_DATA_DIRS at the start of their program (i.e., remove all entries but first).
+pyi_data_dir = os.path.join(sys._MEIPASS, 'share')
+
+xdg_data_dirs = os.environ.get('XDG_DATA_DIRS', None)
+if xdg_data_dirs:
+ if pyi_data_dir not in xdg_data_dirs:
+ xdg_data_dirs = pyi_data_dir + os.pathsep + xdg_data_dirs
+else:
+ xdg_data_dirs = pyi_data_dir
+os.environ['XDG_DATA_DIRS'] = xdg_data_dirs
+
+# Cleanup aux variables
+del xdg_data_dirs
+del pyi_data_dir
| {"golden_diff": "diff --git a/PyInstaller/hooks/rthooks/pyi_rth_glib.py b/PyInstaller/hooks/rthooks/pyi_rth_glib.py\n--- a/PyInstaller/hooks/rthooks/pyi_rth_glib.py\n+++ b/PyInstaller/hooks/rthooks/pyi_rth_glib.py\n@@ -12,4 +12,20 @@\n import os\n import sys\n \n-os.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')\n+# Prepend the frozen application's data dir to XDG_DATA_DIRS. We need to avoid overwriting the existing paths in order\n+# to allow the frozen application to run system-installed applications (for example, launch a web browser via the\n+# webbrowser module on Linux). Should the user desire complete isolation of the frozen application from the system,\n+# they need to clean up XDG_DATA_DIRS at the start of their program (i.e., remove all entries but first).\n+pyi_data_dir = os.path.join(sys._MEIPASS, 'share')\n+\n+xdg_data_dirs = os.environ.get('XDG_DATA_DIRS', None)\n+if xdg_data_dirs:\n+ if pyi_data_dir not in xdg_data_dirs:\n+ xdg_data_dirs = pyi_data_dir + os.pathsep + xdg_data_dirs\n+else:\n+ xdg_data_dirs = pyi_data_dir\n+os.environ['XDG_DATA_DIRS'] = xdg_data_dirs\n+\n+# Cleanup aux variables\n+del xdg_data_dirs\n+del pyi_data_dir\n", "issue": "XDG_DATA_DIRS environment variable being overriden when running packaged app\nI have a simple app that runs a local Flask web server and opens a web browser to show it on start up using the following command: \r\n\r\n webbrowser.open('http://localhost:%d' % port, new=2, autoraise=True)\r\n\r\nWhen I run it in development mode, this works fine. However, when it's packaged using pyinstaller and run as an executable, I get this:\r\n\r\n gvfs-open: http://localhost:5000: error opening location: No application is registered as handling this file\r\n\r\nThe order of the browser's web browser will try is the same in both instances: \r\n\r\n 'xdg-open', 'gvfs-open', 'gnome-open', 'x-www-browser', 'firefox'\r\n\r\nI tried using specific commands using `webbrowser.get(NAME).open` but none worked (except Firefox). \r\n\r\nIt seems like the XDG_DATA_DIRS environment variable is being over ridden: \r\n\r\n`print('XDG_DATA_DIRS: ' + os.environ.get('XDG_DATA_DIRS', 'not found'))` \r\n\r\nyields \r\n\r\n`/usr/share/ubuntu:/usr/share/gnome:/usr/local/share:/usr/share:/var/lib/snapd/desktop:/var/lib/snapd/desktop` \r\n\r\nin development mode and \r\n\r\n`SOURCE_DIR/dist/linux/app/share`\r\n\r\nin the packaged executable. \r\n\r\nI'm using Python 3.5.2 and pyinstaller 3.3.1 on Ubuntu 16.04 with the command \r\n\r\n pyinstaller --add-data=\"static:static\" app.py\r\n\r\nStack overflow link: https://stackoverflow.com/questions/51657864/pyinstaller-webbrowser-open-doesnt-work-in-packaged-app\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2015-2022, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\nimport os\nimport sys\n\nos.environ['XDG_DATA_DIRS'] = os.path.join(sys._MEIPASS, 'share')\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_glib.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2015-2022, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\nimport os\nimport sys\n\n# Prepend the frozen application's data dir to XDG_DATA_DIRS. We need to avoid overwriting the existing paths in order\n# to allow the frozen application to run system-installed applications (for example, launch a web browser via the\n# webbrowser module on Linux). Should the user desire complete isolation of the frozen application from the system,\n# they need to clean up XDG_DATA_DIRS at the start of their program (i.e., remove all entries but first).\npyi_data_dir = os.path.join(sys._MEIPASS, 'share')\n\nxdg_data_dirs = os.environ.get('XDG_DATA_DIRS', None)\nif xdg_data_dirs:\n if pyi_data_dir not in xdg_data_dirs:\n xdg_data_dirs = pyi_data_dir + os.pathsep + xdg_data_dirs\nelse:\n xdg_data_dirs = pyi_data_dir\nos.environ['XDG_DATA_DIRS'] = xdg_data_dirs\n\n# Cleanup aux variables\ndel xdg_data_dirs\ndel pyi_data_dir\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_glib.py"}]} | 769 | 333 |
gh_patches_debug_28621 | rasdani/github-patches | git_diff | conan-io__conan-center-index-20413 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cocoyaxi: make deprecated
Specify library name and version: **cocoyaxi/***
---
- [x] I've read the [contributing guidelines](https://github.com/conan-io/conan-center-index/blob/master/CONTRIBUTING.md).
- [ ] I've used a [recent](https://github.com/conan-io/conan/releases/latest) Conan client version close to the [currently deployed](https://github.com/conan-io/conan-center-index/blob/master/.c3i/config_v1.yml#L6).
- [ ] I've tried at least one configuration locally with the [conan-center hook](https://github.com/conan-io/hooks.git) activated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/cocoyaxi/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.build import check_min_cppstd
4 from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
5 from conan.tools.files import copy, get
6 from conan.tools.microsoft import is_msvc, is_msvc_static_runtime
7 import os
8
9 required_conan_version = ">=1.50.0"
10
11
12 class CocoyaxiConan(ConanFile):
13 name = "cocoyaxi"
14 url = "https://github.com/conan-io/conan-center-index"
15 homepage = "https://github.com/idealvin/cocoyaxi"
16 license = "MIT"
17 description = "A go-style coroutine library in C++11 and more."
18 topics = ("cocoyaxi", "coroutine", "c++11")
19
20 settings = "os", "arch", "compiler", "build_type"
21 options = {
22 "shared": [True, False],
23 "fPIC": [True, False],
24 "with_libcurl": [True, False],
25 "with_openssl": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 "with_libcurl": False,
31 "with_openssl": False,
32 }
33
34 def config_options(self):
35 if self.settings.os == "Windows":
36 del self.options.fPIC
37
38 def configure(self):
39 if self.options.shared:
40 del self.options.fPIC
41
42 def requirements(self):
43 if self.options.with_libcurl:
44 self.requires("libcurl/7.80.0")
45 if self.options.with_libcurl or self.options.with_openssl:
46 self.requires("openssl/1.1.1q")
47
48 def validate(self):
49 if self.info.settings.compiler.cppstd:
50 check_min_cppstd(self, 11)
51 if self.info.options.with_libcurl:
52 if not self.info.options.with_openssl:
53 raise ConanInvalidConfiguration(f"{self.name} requires with_openssl=True when using with_libcurl=True")
54 if self.dependencies["libcurl"].options.with_ssl != "openssl":
55 raise ConanInvalidConfiguration(f"{self.name} requires libcurl:with_ssl='openssl' to be enabled")
56 if not self.dependencies["libcurl"].options.with_zlib:
57 raise ConanInvalidConfiguration(f"{self.name} requires libcurl:with_zlib=True to be enabled")
58
59 def layout(self):
60 cmake_layout(self, src_folder="src")
61
62 def source(self):
63 get(self, **self.conan_data["sources"][self.version],
64 destination=self.source_folder, strip_root=True)
65
66 def generate(self):
67 tc = CMakeToolchain(self)
68 if is_msvc(self):
69 tc.variables["STATIC_VS_CRT"] = is_msvc_static_runtime(self)
70 tc.variables["WITH_LIBCURL"] = self.options.with_libcurl
71 tc.variables["WITH_OPENSSL"] = self.options.with_openssl
72 tc.generate()
73 cd = CMakeDeps(self)
74 cd.generate()
75
76 def build(self):
77 cmake = CMake(self)
78 cmake.configure()
79 cmake.build()
80
81 def package(self):
82 copy(self, "LICENSE.md", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
83 cmake = CMake(self)
84 cmake.install()
85
86 def package_info(self):
87 self.cpp_info.set_property("cmake_file_name", "cocoyaxi")
88 self.cpp_info.set_property("cmake_target_name", "cocoyaxi::co")
89 # TODO: back to global scope in conan v2 once legacy generators removed
90 self.cpp_info.components["co"].libs = ["co"]
91
92 # TODO: to remove in conan v2 once legacy generators removed
93 self.cpp_info.components["co"].set_property("cmake_target_name", "cocoyaxi::co")
94 if self.options.with_libcurl:
95 self.cpp_info.components["co"].requires.append("libcurl::libcurl")
96 if self.options.with_libcurl or self.options.with_openssl:
97 self.cpp_info.components["co"].requires.append("openssl::openssl")
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/cocoyaxi/all/conanfile.py b/recipes/cocoyaxi/all/conanfile.py
--- a/recipes/cocoyaxi/all/conanfile.py
+++ b/recipes/cocoyaxi/all/conanfile.py
@@ -30,6 +30,7 @@
"with_libcurl": False,
"with_openssl": False,
}
+ deprecated = "coost"
def config_options(self):
if self.settings.os == "Windows":
@@ -37,13 +38,13 @@
def configure(self):
if self.options.shared:
- del self.options.fPIC
+ self.options.rm_safe("fPIC")
def requirements(self):
if self.options.with_libcurl:
- self.requires("libcurl/7.80.0")
+ self.requires("libcurl/8.2.1")
if self.options.with_libcurl or self.options.with_openssl:
- self.requires("openssl/1.1.1q")
+ self.requires("openssl/[>=1.1 <4]")
def validate(self):
if self.info.settings.compiler.cppstd:
@@ -95,3 +96,7 @@
self.cpp_info.components["co"].requires.append("libcurl::libcurl")
if self.options.with_libcurl or self.options.with_openssl:
self.cpp_info.components["co"].requires.append("openssl::openssl")
+ if self.settings.os in ["Linux", "FreeBSD"]:
+ self.cpp_info.components["co"].system_libs.extend(["pthread", "dl", "m"])
+ elif self.settings.os == "Windows":
+ self.cpp_info.components["co"].system_libs.append("ws2_32")
| {"golden_diff": "diff --git a/recipes/cocoyaxi/all/conanfile.py b/recipes/cocoyaxi/all/conanfile.py\n--- a/recipes/cocoyaxi/all/conanfile.py\n+++ b/recipes/cocoyaxi/all/conanfile.py\n@@ -30,6 +30,7 @@\n \"with_libcurl\": False,\n \"with_openssl\": False,\n }\n+ deprecated = \"coost\"\n \n def config_options(self):\n if self.settings.os == \"Windows\":\n@@ -37,13 +38,13 @@\n \n def configure(self):\n if self.options.shared:\n- del self.options.fPIC\n+ self.options.rm_safe(\"fPIC\")\n \n def requirements(self):\n if self.options.with_libcurl:\n- self.requires(\"libcurl/7.80.0\")\n+ self.requires(\"libcurl/8.2.1\")\n if self.options.with_libcurl or self.options.with_openssl:\n- self.requires(\"openssl/1.1.1q\")\n+ self.requires(\"openssl/[>=1.1 <4]\")\n \n def validate(self):\n if self.info.settings.compiler.cppstd:\n@@ -95,3 +96,7 @@\n self.cpp_info.components[\"co\"].requires.append(\"libcurl::libcurl\")\n if self.options.with_libcurl or self.options.with_openssl:\n self.cpp_info.components[\"co\"].requires.append(\"openssl::openssl\")\n+ if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n+ self.cpp_info.components[\"co\"].system_libs.extend([\"pthread\", \"dl\", \"m\"])\n+ elif self.settings.os == \"Windows\":\n+ self.cpp_info.components[\"co\"].system_libs.append(\"ws2_32\")\n", "issue": "cocoyaxi: make deprecated\nSpecify library name and version: **cocoyaxi/***\r\n\r\n---\r\n\r\n- [x] I've read the [contributing guidelines](https://github.com/conan-io/conan-center-index/blob/master/CONTRIBUTING.md).\r\n- [ ] I've used a [recent](https://github.com/conan-io/conan/releases/latest) Conan client version close to the [currently deployed](https://github.com/conan-io/conan-center-index/blob/master/.c3i/config_v1.yml#L6).\r\n- [ ] I've tried at least one configuration locally with the [conan-center hook](https://github.com/conan-io/hooks.git) activated.\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.microsoft import is_msvc, is_msvc_static_runtime\nimport os\n\nrequired_conan_version = \">=1.50.0\"\n\n\nclass CocoyaxiConan(ConanFile):\n name = \"cocoyaxi\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/idealvin/cocoyaxi\"\n license = \"MIT\"\n description = \"A go-style coroutine library in C++11 and more.\"\n topics = (\"cocoyaxi\", \"coroutine\", \"c++11\")\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_libcurl\": [True, False],\n \"with_openssl\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_libcurl\": False,\n \"with_openssl\": False,\n }\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def requirements(self):\n if self.options.with_libcurl:\n self.requires(\"libcurl/7.80.0\")\n if self.options.with_libcurl or self.options.with_openssl:\n self.requires(\"openssl/1.1.1q\")\n\n def validate(self):\n if self.info.settings.compiler.cppstd:\n check_min_cppstd(self, 11)\n if self.info.options.with_libcurl:\n if not self.info.options.with_openssl:\n raise ConanInvalidConfiguration(f\"{self.name} requires with_openssl=True when using with_libcurl=True\")\n if self.dependencies[\"libcurl\"].options.with_ssl != \"openssl\":\n raise ConanInvalidConfiguration(f\"{self.name} requires libcurl:with_ssl='openssl' to be enabled\")\n if not self.dependencies[\"libcurl\"].options.with_zlib:\n raise ConanInvalidConfiguration(f\"{self.name} requires libcurl:with_zlib=True to be enabled\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n if is_msvc(self):\n tc.variables[\"STATIC_VS_CRT\"] = is_msvc_static_runtime(self)\n tc.variables[\"WITH_LIBCURL\"] = self.options.with_libcurl\n tc.variables[\"WITH_OPENSSL\"] = self.options.with_openssl\n tc.generate()\n cd = CMakeDeps(self)\n cd.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE.md\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"cocoyaxi\")\n self.cpp_info.set_property(\"cmake_target_name\", \"cocoyaxi::co\")\n # TODO: back to global scope in conan v2 once legacy generators removed\n self.cpp_info.components[\"co\"].libs = [\"co\"]\n\n # TODO: to remove in conan v2 once legacy generators removed\n self.cpp_info.components[\"co\"].set_property(\"cmake_target_name\", \"cocoyaxi::co\")\n if self.options.with_libcurl:\n self.cpp_info.components[\"co\"].requires.append(\"libcurl::libcurl\")\n if self.options.with_libcurl or self.options.with_openssl:\n self.cpp_info.components[\"co\"].requires.append(\"openssl::openssl\")\n", "path": "recipes/cocoyaxi/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.files import copy, get\nfrom conan.tools.microsoft import is_msvc, is_msvc_static_runtime\nimport os\n\nrequired_conan_version = \">=1.50.0\"\n\n\nclass CocoyaxiConan(ConanFile):\n name = \"cocoyaxi\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/idealvin/cocoyaxi\"\n license = \"MIT\"\n description = \"A go-style coroutine library in C++11 and more.\"\n topics = (\"cocoyaxi\", \"coroutine\", \"c++11\")\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_libcurl\": [True, False],\n \"with_openssl\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_libcurl\": False,\n \"with_openssl\": False,\n }\n deprecated = \"coost\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def requirements(self):\n if self.options.with_libcurl:\n self.requires(\"libcurl/8.2.1\")\n if self.options.with_libcurl or self.options.with_openssl:\n self.requires(\"openssl/[>=1.1 <4]\")\n\n def validate(self):\n if self.info.settings.compiler.cppstd:\n check_min_cppstd(self, 11)\n if self.info.options.with_libcurl:\n if not self.info.options.with_openssl:\n raise ConanInvalidConfiguration(f\"{self.name} requires with_openssl=True when using with_libcurl=True\")\n if self.dependencies[\"libcurl\"].options.with_ssl != \"openssl\":\n raise ConanInvalidConfiguration(f\"{self.name} requires libcurl:with_ssl='openssl' to be enabled\")\n if not self.dependencies[\"libcurl\"].options.with_zlib:\n raise ConanInvalidConfiguration(f\"{self.name} requires libcurl:with_zlib=True to be enabled\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n if is_msvc(self):\n tc.variables[\"STATIC_VS_CRT\"] = is_msvc_static_runtime(self)\n tc.variables[\"WITH_LIBCURL\"] = self.options.with_libcurl\n tc.variables[\"WITH_OPENSSL\"] = self.options.with_openssl\n tc.generate()\n cd = CMakeDeps(self)\n cd.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE.md\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"cocoyaxi\")\n self.cpp_info.set_property(\"cmake_target_name\", \"cocoyaxi::co\")\n # TODO: back to global scope in conan v2 once legacy generators removed\n self.cpp_info.components[\"co\"].libs = [\"co\"]\n\n # TODO: to remove in conan v2 once legacy generators removed\n self.cpp_info.components[\"co\"].set_property(\"cmake_target_name\", \"cocoyaxi::co\")\n if self.options.with_libcurl:\n self.cpp_info.components[\"co\"].requires.append(\"libcurl::libcurl\")\n if self.options.with_libcurl or self.options.with_openssl:\n self.cpp_info.components[\"co\"].requires.append(\"openssl::openssl\")\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.components[\"co\"].system_libs.extend([\"pthread\", \"dl\", \"m\"])\n elif self.settings.os == \"Windows\":\n self.cpp_info.components[\"co\"].system_libs.append(\"ws2_32\")\n", "path": "recipes/cocoyaxi/all/conanfile.py"}]} | 1,504 | 385 |
gh_patches_debug_11424 | rasdani/github-patches | git_diff | cobbler__cobbler-3552 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SafeConfigParser was removed in 3.12
### Describe the bug
From https://docs.python.org/3.12/whatsnew/3.12.html:
Several names deprecated in the configparser way back in 3.2 have been removed per gh-89336:
- configparser.ParsingError no longer has a filename attribute or argument. Use the source attribute and argument instead.
- configparser no longer has a SafeConfigParser class. Use the shorter ConfigParser name instead.
- configparser.ConfigParser no longer has a readfp method. Use read_file() instead.
<!--- HINT: You can paste gist.github.com links for long logs or larger files -->
### Steps to reproduce
1. systemctl start cobblerd
### Expected behavior
<!--- A clear and concise description of what you expected to happen. -->
### Cobbler version
<!--- Paste output from `cobbler version` -->
````paste below
Cobbler 3.4.0
````
### Operating system
Fedora 39
### Cobbler log
<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->
````paste below
cobblerd[1614]: INFO | Exception raised when loading module authorization.configfile
cobblerd[1614]: INFO | Exception occurred: <class 'ImportError'>
cobblerd[1614]: INFO | Exception value: cannot import name 'SafeConfigParser' from 'configparser' (/usr/lib64/python3.12/configparser.py)
````
### Screenshots
<!--- If applicable, add screenshots to help explain your problem. -->
### Additional information
<!--- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cobbler/modules/authorization/configfile.py`
Content:
```
1 """
2 Authorization module that allow users listed in
3 /etc/cobbler/users.conf to be permitted to access resources.
4 For instance, when using authz_ldap, you want to use authn_configfile,
5 not authz_allowall, which will most likely NOT do what you want.
6 """
7 # SPDX-License-Identifier: GPL-2.0-or-later
8 # SPDX-FileCopyrightText: Copyright 2007-2009, Red Hat, Inc and Others
9 # SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>
10
11
12 import os
13 from configparser import SafeConfigParser
14 from typing import TYPE_CHECKING, Any, Dict
15
16 if TYPE_CHECKING:
17 from cobbler.api import CobblerAPI
18
19
20 CONFIG_FILE = "/etc/cobbler/users.conf"
21
22
23 def register() -> str:
24 """
25 The mandatory Cobbler module registration hook.
26
27 :return: Always "authz".
28 """
29 return "authz"
30
31
32 def __parse_config() -> Dict[str, Dict[Any, Any]]:
33 """
34 Parse the the users.conf file.
35
36 :return: The data of the config file.
37 """
38 if not os.path.exists(CONFIG_FILE):
39 return {}
40 config = SafeConfigParser()
41 config.read(CONFIG_FILE)
42 alldata: Dict[str, Dict[str, Any]] = {}
43 groups = config.sections()
44 for group in groups:
45 alldata[str(group)] = {}
46 options = config.options(group)
47 for option in options:
48 alldata[group][option] = 1
49 return alldata
50
51
52 def authorize(
53 api_handle: "CobblerAPI",
54 user: str,
55 resource: str,
56 arg1: Any = None,
57 arg2: Any = None,
58 ) -> int:
59 """
60 Validate a user against a resource. All users in the file are permitted by this module.
61
62 :param api_handle: This parameter is not used currently.
63 :param user: The user to authorize.
64 :param resource: This parameter is not used currently.
65 :param arg1: This parameter is not used currently.
66 :param arg2: This parameter is not used currently.
67 :return: "0" if no authorized, "1" if authorized.
68 """
69 # FIXME: this must be modified to use the new ACL engine
70
71 data = __parse_config()
72 for _, group_data in data.items():
73 if user.lower() in group_data:
74 return 1
75 return 0
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cobbler/modules/authorization/configfile.py b/cobbler/modules/authorization/configfile.py
--- a/cobbler/modules/authorization/configfile.py
+++ b/cobbler/modules/authorization/configfile.py
@@ -10,7 +10,7 @@
import os
-from configparser import SafeConfigParser
+from configparser import ConfigParser
from typing import TYPE_CHECKING, Any, Dict
if TYPE_CHECKING:
@@ -37,7 +37,7 @@
"""
if not os.path.exists(CONFIG_FILE):
return {}
- config = SafeConfigParser()
+ config = ConfigParser()
config.read(CONFIG_FILE)
alldata: Dict[str, Dict[str, Any]] = {}
groups = config.sections()
| {"golden_diff": "diff --git a/cobbler/modules/authorization/configfile.py b/cobbler/modules/authorization/configfile.py\n--- a/cobbler/modules/authorization/configfile.py\n+++ b/cobbler/modules/authorization/configfile.py\n@@ -10,7 +10,7 @@\n \n \n import os\n-from configparser import SafeConfigParser\n+from configparser import ConfigParser\n from typing import TYPE_CHECKING, Any, Dict\n \n if TYPE_CHECKING:\n@@ -37,7 +37,7 @@\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n- config = SafeConfigParser()\n+ config = ConfigParser()\n config.read(CONFIG_FILE)\n alldata: Dict[str, Dict[str, Any]] = {}\n groups = config.sections()\n", "issue": "SafeConfigParser was removed in 3.12\n### Describe the bug\r\n\r\nFrom https://docs.python.org/3.12/whatsnew/3.12.html:\r\n\r\nSeveral names deprecated in the configparser way back in 3.2 have been removed per gh-89336:\r\n- configparser.ParsingError no longer has a filename attribute or argument. Use the source attribute and argument instead.\r\n- configparser no longer has a SafeConfigParser class. Use the shorter ConfigParser name instead.\r\n- configparser.ConfigParser no longer has a readfp method. Use read_file() instead.\r\n\r\n\r\n<!--- HINT: You can paste gist.github.com links for long logs or larger files -->\r\n\r\n### Steps to reproduce\r\n\r\n1. systemctl start cobblerd\r\n\r\n### Expected behavior\r\n\r\n<!--- A clear and concise description of what you expected to happen. -->\r\n\r\n### Cobbler version\r\n\r\n<!--- Paste output from `cobbler version` -->\r\n````paste below\r\nCobbler 3.4.0\r\n````\r\n\r\n### Operating system\r\n\r\nFedora 39\r\n\r\n### Cobbler log\r\n\r\n<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->\r\n````paste below\r\ncobblerd[1614]: INFO | Exception raised when loading module authorization.configfile\r\ncobblerd[1614]: INFO | Exception occurred: <class 'ImportError'>\r\ncobblerd[1614]: INFO | Exception value: cannot import name 'SafeConfigParser' from 'configparser' (/usr/lib64/python3.12/configparser.py)\r\n````\r\n\r\n### Screenshots\r\n\r\n<!--- If applicable, add screenshots to help explain your problem. -->\r\n\r\n### Additional information\r\n\r\n<!--- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "\"\"\"\nAuthorization module that allow users listed in\n/etc/cobbler/users.conf to be permitted to access resources.\nFor instance, when using authz_ldap, you want to use authn_configfile,\nnot authz_allowall, which will most likely NOT do what you want.\n\"\"\"\n# SPDX-License-Identifier: GPL-2.0-or-later\n# SPDX-FileCopyrightText: Copyright 2007-2009, Red Hat, Inc and Others\n# SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>\n\n\nimport os\nfrom configparser import SafeConfigParser\nfrom typing import TYPE_CHECKING, Any, Dict\n\nif TYPE_CHECKING:\n from cobbler.api import CobblerAPI\n\n\nCONFIG_FILE = \"/etc/cobbler/users.conf\"\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n\n :return: Always \"authz\".\n \"\"\"\n return \"authz\"\n\n\ndef __parse_config() -> Dict[str, Dict[Any, Any]]:\n \"\"\"\n Parse the the users.conf file.\n\n :return: The data of the config file.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n config = SafeConfigParser()\n config.read(CONFIG_FILE)\n alldata: Dict[str, Dict[str, Any]] = {}\n groups = config.sections()\n for group in groups:\n alldata[str(group)] = {}\n options = config.options(group)\n for option in options:\n alldata[group][option] = 1\n return alldata\n\n\ndef authorize(\n api_handle: \"CobblerAPI\",\n user: str,\n resource: str,\n arg1: Any = None,\n arg2: Any = None,\n) -> int:\n \"\"\"\n Validate a user against a resource. All users in the file are permitted by this module.\n\n :param api_handle: This parameter is not used currently.\n :param user: The user to authorize.\n :param resource: This parameter is not used currently.\n :param arg1: This parameter is not used currently.\n :param arg2: This parameter is not used currently.\n :return: \"0\" if no authorized, \"1\" if authorized.\n \"\"\"\n # FIXME: this must be modified to use the new ACL engine\n\n data = __parse_config()\n for _, group_data in data.items():\n if user.lower() in group_data:\n return 1\n return 0\n", "path": "cobbler/modules/authorization/configfile.py"}], "after_files": [{"content": "\"\"\"\nAuthorization module that allow users listed in\n/etc/cobbler/users.conf to be permitted to access resources.\nFor instance, when using authz_ldap, you want to use authn_configfile,\nnot authz_allowall, which will most likely NOT do what you want.\n\"\"\"\n# SPDX-License-Identifier: GPL-2.0-or-later\n# SPDX-FileCopyrightText: Copyright 2007-2009, Red Hat, Inc and Others\n# SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>\n\n\nimport os\nfrom configparser import ConfigParser\nfrom typing import TYPE_CHECKING, Any, Dict\n\nif TYPE_CHECKING:\n from cobbler.api import CobblerAPI\n\n\nCONFIG_FILE = \"/etc/cobbler/users.conf\"\n\n\ndef register() -> str:\n \"\"\"\n The mandatory Cobbler module registration hook.\n\n :return: Always \"authz\".\n \"\"\"\n return \"authz\"\n\n\ndef __parse_config() -> Dict[str, Dict[Any, Any]]:\n \"\"\"\n Parse the the users.conf file.\n\n :return: The data of the config file.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n config = ConfigParser()\n config.read(CONFIG_FILE)\n alldata: Dict[str, Dict[str, Any]] = {}\n groups = config.sections()\n for group in groups:\n alldata[str(group)] = {}\n options = config.options(group)\n for option in options:\n alldata[group][option] = 1\n return alldata\n\n\ndef authorize(\n api_handle: \"CobblerAPI\",\n user: str,\n resource: str,\n arg1: Any = None,\n arg2: Any = None,\n) -> int:\n \"\"\"\n Validate a user against a resource. All users in the file are permitted by this module.\n\n :param api_handle: This parameter is not used currently.\n :param user: The user to authorize.\n :param resource: This parameter is not used currently.\n :param arg1: This parameter is not used currently.\n :param arg2: This parameter is not used currently.\n :return: \"0\" if no authorized, \"1\" if authorized.\n \"\"\"\n # FIXME: this must be modified to use the new ACL engine\n\n data = __parse_config()\n for _, group_data in data.items():\n if user.lower() in group_data:\n return 1\n return 0\n", "path": "cobbler/modules/authorization/configfile.py"}]} | 1,315 | 161 |
gh_patches_debug_19861 | rasdani/github-patches | git_diff | doccano__doccano-1958 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug report] Static files are not copied on pip installation
How to reproduce the behaviour
---------
Seems like (some?) static files are not copied on pip installation.
For instance `http://site.com/favicon.ico` is available on Docker Compose installation. But it is 404'd on pip installation.
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: CentOS 8.3
* Python Version Used: 3.8.8
* When you install doccano: v1.2.1
* How did you install doccano (Heroku button etc): pip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/config/urls.py`
Content:
```
1 """app URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/2.0/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.urls import include, path
14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 """
16 import os
17 import re
18
19 from django.conf import settings
20 from django.contrib import admin
21 from django.contrib.auth.views import TemplateView
22 from django.urls import include, path, re_path
23 from django.views.static import serve
24 from drf_yasg import openapi
25 from drf_yasg.views import get_schema_view
26
27 schema_view = get_schema_view(
28 openapi.Info(
29 title="doccano API",
30 default_version="v1",
31 description="doccano API description",
32 license=openapi.License(name="MIT License"),
33 ),
34 public=True,
35 )
36
37 urlpatterns = []
38 if settings.DEBUG or os.environ.get("STANDALONE", False):
39 # For showing images and audios in the case of pip and Docker.
40 urlpatterns.append(
41 re_path(
42 r"^%s(?P<path>.*)$" % re.escape(settings.MEDIA_URL.lstrip("/")),
43 serve,
44 {"document_root": settings.MEDIA_ROOT},
45 )
46 )
47
48 urlpatterns += [
49 path("admin/", admin.site.urls),
50 path("api-auth/", include("rest_framework.urls")),
51 path("v1/health/", include("health_check.urls")),
52 path("v1/", include("api.urls")),
53 path("v1/", include("roles.urls")),
54 path("v1/", include("users.urls")),
55 path("v1/", include("data_import.urls")),
56 path("v1/", include("data_export.urls")),
57 path("v1/", include("projects.urls")),
58 path("v1/projects/<int:project_id>/metrics/", include("metrics.urls")),
59 path("v1/projects/<int:project_id>/", include("auto_labeling.urls")),
60 path("v1/projects/<int:project_id>/", include("examples.urls")),
61 path("v1/projects/<int:project_id>/", include("labels.urls")),
62 path("v1/projects/<int:project_id>/", include("label_types.urls")),
63 path("swagger/", schema_view.with_ui("swagger", cache_timeout=0), name="schema-swagger-ui"),
64 re_path("", TemplateView.as_view(template_name="index.html")),
65 ]
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/config/urls.py b/backend/config/urls.py
--- a/backend/config/urls.py
+++ b/backend/config/urls.py
@@ -15,6 +15,7 @@
"""
import os
import re
+from pathlib import Path
from django.conf import settings
from django.contrib import admin
@@ -36,6 +37,7 @@
urlpatterns = []
if settings.DEBUG or os.environ.get("STANDALONE", False):
+ static_dir = Path(__file__).resolve().parent.parent / "client" / "dist"
# For showing images and audios in the case of pip and Docker.
urlpatterns.append(
re_path(
@@ -44,6 +46,8 @@
{"document_root": settings.MEDIA_ROOT},
)
)
+ # For showing favicon on the case of pip and Docker.
+ urlpatterns.append(path("favicon.ico", serve, {"document_root": static_dir, "path": "favicon.ico"}))
urlpatterns += [
path("admin/", admin.site.urls),
| {"golden_diff": "diff --git a/backend/config/urls.py b/backend/config/urls.py\n--- a/backend/config/urls.py\n+++ b/backend/config/urls.py\n@@ -15,6 +15,7 @@\n \"\"\"\n import os\n import re\n+from pathlib import Path\n \n from django.conf import settings\n from django.contrib import admin\n@@ -36,6 +37,7 @@\n \n urlpatterns = []\n if settings.DEBUG or os.environ.get(\"STANDALONE\", False):\n+ static_dir = Path(__file__).resolve().parent.parent / \"client\" / \"dist\"\n # For showing images and audios in the case of pip and Docker.\n urlpatterns.append(\n re_path(\n@@ -44,6 +46,8 @@\n {\"document_root\": settings.MEDIA_ROOT},\n )\n )\n+ # For showing favicon on the case of pip and Docker.\n+ urlpatterns.append(path(\"favicon.ico\", serve, {\"document_root\": static_dir, \"path\": \"favicon.ico\"}))\n \n urlpatterns += [\n path(\"admin/\", admin.site.urls),\n", "issue": "[Bug report] Static files are not copied on pip installation\nHow to reproduce the behaviour\r\n---------\r\nSeems like (some?) static files are not copied on pip installation.\r\nFor instance `http://site.com/favicon.ico` is available on Docker Compose installation. But it is 404'd on pip installation.\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: CentOS 8.3\r\n* Python Version Used: 3.8.8\r\n* When you install doccano: v1.2.1\r\n* How did you install doccano (Heroku button etc): pip\r\n\n", "before_files": [{"content": "\"\"\"app URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nimport os\nimport re\n\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.auth.views import TemplateView\nfrom django.urls import include, path, re_path\nfrom django.views.static import serve\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"doccano API\",\n default_version=\"v1\",\n description=\"doccano API description\",\n license=openapi.License(name=\"MIT License\"),\n ),\n public=True,\n)\n\nurlpatterns = []\nif settings.DEBUG or os.environ.get(\"STANDALONE\", False):\n # For showing images and audios in the case of pip and Docker.\n urlpatterns.append(\n re_path(\n r\"^%s(?P<path>.*)$\" % re.escape(settings.MEDIA_URL.lstrip(\"/\")),\n serve,\n {\"document_root\": settings.MEDIA_ROOT},\n )\n )\n\nurlpatterns += [\n path(\"admin/\", admin.site.urls),\n path(\"api-auth/\", include(\"rest_framework.urls\")),\n path(\"v1/health/\", include(\"health_check.urls\")),\n path(\"v1/\", include(\"api.urls\")),\n path(\"v1/\", include(\"roles.urls\")),\n path(\"v1/\", include(\"users.urls\")),\n path(\"v1/\", include(\"data_import.urls\")),\n path(\"v1/\", include(\"data_export.urls\")),\n path(\"v1/\", include(\"projects.urls\")),\n path(\"v1/projects/<int:project_id>/metrics/\", include(\"metrics.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"auto_labeling.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"examples.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"labels.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"label_types.urls\")),\n path(\"swagger/\", schema_view.with_ui(\"swagger\", cache_timeout=0), name=\"schema-swagger-ui\"),\n re_path(\"\", TemplateView.as_view(template_name=\"index.html\")),\n]\n", "path": "backend/config/urls.py"}], "after_files": [{"content": "\"\"\"app URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nimport os\nimport re\nfrom pathlib import Path\n\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.auth.views import TemplateView\nfrom django.urls import include, path, re_path\nfrom django.views.static import serve\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"doccano API\",\n default_version=\"v1\",\n description=\"doccano API description\",\n license=openapi.License(name=\"MIT License\"),\n ),\n public=True,\n)\n\nurlpatterns = []\nif settings.DEBUG or os.environ.get(\"STANDALONE\", False):\n static_dir = Path(__file__).resolve().parent.parent / \"client\" / \"dist\"\n # For showing images and audios in the case of pip and Docker.\n urlpatterns.append(\n re_path(\n r\"^%s(?P<path>.*)$\" % re.escape(settings.MEDIA_URL.lstrip(\"/\")),\n serve,\n {\"document_root\": settings.MEDIA_ROOT},\n )\n )\n # For showing favicon on the case of pip and Docker.\n urlpatterns.append(path(\"favicon.ico\", serve, {\"document_root\": static_dir, \"path\": \"favicon.ico\"}))\n\nurlpatterns += [\n path(\"admin/\", admin.site.urls),\n path(\"api-auth/\", include(\"rest_framework.urls\")),\n path(\"v1/health/\", include(\"health_check.urls\")),\n path(\"v1/\", include(\"api.urls\")),\n path(\"v1/\", include(\"roles.urls\")),\n path(\"v1/\", include(\"users.urls\")),\n path(\"v1/\", include(\"data_import.urls\")),\n path(\"v1/\", include(\"data_export.urls\")),\n path(\"v1/\", include(\"projects.urls\")),\n path(\"v1/projects/<int:project_id>/metrics/\", include(\"metrics.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"auto_labeling.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"examples.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"labels.urls\")),\n path(\"v1/projects/<int:project_id>/\", include(\"label_types.urls\")),\n path(\"swagger/\", schema_view.with_ui(\"swagger\", cache_timeout=0), name=\"schema-swagger-ui\"),\n re_path(\"\", TemplateView.as_view(template_name=\"index.html\")),\n]\n", "path": "backend/config/urls.py"}]} | 1,121 | 223 |
gh_patches_debug_5649 | rasdani/github-patches | git_diff | vaexio__vaex-1268 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEATURE-REQUEST] Read files from ADLS Gen 2 (Cloud support for Azure Data Lake)
Hello,
in addition to support for S3 and GCS, it'd be awesome if it'd also be possible to read files directly from ADLS Gen 2, since some of us are restricted in the choice of cloud platforms we can use.
Thanks,
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/vaex-core/vaex/file/__init__.py`
Content:
```
1 __author__ = 'breddels'
2 import pathlib
3 import logging
4 from glob import glob as local_glob
5 import io
6 import os
7 import re
8 import sys
9 from urllib.parse import parse_qs
10 import warnings
11 import pkg_resources
12
13 import pyarrow as pa
14 import pyarrow.fs
15
16 import vaex.file.cache
17
18
19 normal_open = open
20 logger = logging.getLogger("vaex.file")
21
22
23 class FileProxy:
24 '''Wraps a file object, giving it a name a dup() method
25
26 The dup is needed since a file is stateful, and needs to be duplicated in threads
27 '''
28 def __init__(self, file, name, dup):
29 self.file = file
30 self.name = name
31 self.dup = dup
32 self.closed = False
33
34 def __iter__(self):
35 raise NotImplementedError('This is just for looking like a file object to Pandas')
36
37 def write(self, *args):
38 return self.file.write(*args)
39
40 def read(self, *args):
41 return self.file.read(*args)
42
43 def seek(self, *args):
44 return self.file.seek(*args)
45
46 def readinto(self, *args):
47 return self.file.readinto(*args)
48
49 def tell(self):
50 return self.file.tell()
51
52 def close(self):
53 self.closed = True
54 return self.file.close()
55
56 def __enter__(self, *args):
57 return self
58
59 def __exit__(self, *args):
60 self.file.close()
61
62
63 def is_file_object(file):
64 return hasattr(file, 'read') and hasattr(file, 'seek')
65
66
67 def file_and_path(file, mode='r', fs_options={}, fs=None):
68 if is_file_object(file):
69 return file, stringyfy(file)
70 else:
71 file = open(file, mode=mode, fs_options=fs_options, fs=fs)
72 return file, stringyfy(file)
73
74
75 def is_path_like(path):
76 try:
77 stringyfy(path)
78 return True
79 except ValueError:
80 return False
81
82
83 def stringyfy(path):
84 """Get string from path like object of file like object
85
86 >>> import sys, pytest
87 >>> if sys.platform.startswith('win'):
88 ... pytest.skip('this doctest does not work on Windows')
89 ...
90 >>> stringyfy('/tmp/test')
91 '/tmp/test'
92 >>> from pathlib import Path
93 >>> stringyfy(Path('/tmp/test'))
94 '/tmp/test'
95 """
96 try:
97 # Pathlib support
98 path = path.__fspath__()
99 except AttributeError:
100 pass
101 if hasattr(path, 'name'): # passed in a file
102 path = path.name
103 if isinstance(path, str):
104 return path
105 raise ValueError(f'Cannot convert {path} to a path')
106
107
108 def split_scheme(path):
109 path = stringyfy(path)
110 if '://' in path:
111 scheme, path = path[:path.index('://')], path[path.index('://')+3:]
112 else:
113 scheme = None
114 return scheme, path
115
116
117 def memory_mappable(path):
118 path = stringyfy(path)
119 scheme, _ = split_scheme(path)
120 return scheme is None
121
122
123 def split_options(path, fs_options={}):
124 if isinstance(path, list):
125 paths = []
126 previous_options = None
127 for path in path:
128 path, options = split_options(path, fs_options)
129 if previous_options is not None:
130 if previous_options != options:
131 raise ValueError(f'Inconsistent set of fs_options given: {previous_options} {options}')
132 else:
133 previous_options = options
134 paths.append(path)
135 return paths, previous_options
136 path = stringyfy(path)
137 match = re.match(r'(.*?)\?((&?[^=&?]+=[^=&?]+)+)', path)
138 if match:
139 naked_path, query = match.groups()[:2]
140 else:
141 naked_path = path
142 query = ''
143 options = fs_options.copy()
144 options.update({key: values[0] for key, values in parse_qs(query).items()})
145 return naked_path, options
146
147
148 def split_ext(path, fs_options={}):
149 path, fs_options = split_options(path, fs_options=fs_options)
150 base, ext = os.path.splitext(path)
151 return base, ext, fs_options
152
153
154 def exists(path, fs_options={}, fs=None):
155 """Checks if file exists.
156
157 >>> vaex.file.exists('/you/do/not')
158 False
159
160 >>> vaex.file.exists('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True})
161 True
162 """
163 fs, path = parse(path, fs_options=fs_options, fs=fs)
164 if fs is None:
165 return os.path.exists(path)
166 else:
167 return fs.get_file_info([path])[0].type != pa.fs.FileType.NotFound
168
169
170 def _get_scheme_handler(path):
171 scheme, _ = split_scheme(path)
172 for entry in pkg_resources.iter_entry_points(group='vaex.file.scheme'):
173 if entry.name == scheme:
174 return entry.load()
175 raise ValueError(f'Do not know how to open {path}, no handler for {scheme} is known')
176
177
178 def remove(path):
179 scheme, path = split_scheme(path)
180 if scheme:
181 raise ValueError('Cannot delete non-local files yet')
182 os.remove(path)
183
184
185 def parse(path, fs_options={}, fs=None, for_arrow=False):
186 if fs is not None:
187 if fs_options:
188 warnings.warn(f'Passed fs_options while fs was specified, {fs_options} are ignored')
189 if 'fsspec' in sys.modules:
190 import fsspec
191 if isinstance(fs, fsspec.AbstractFileSystem):
192 fs = pa.fs.FSSpecHandler(fs)
193 return fs, path
194 if isinstance(path, (list, tuple)):
195 scheme, _ = split_scheme(path[0])
196 else:
197 scheme, _ = split_scheme(path)
198 if not scheme:
199 return None, path
200 if isinstance(path, (list, tuple)):
201 module = _get_scheme_handler(path[0])
202 return module.parse(path[0], fs_options, for_arrow=for_arrow)[0], path
203 else:
204 module = _get_scheme_handler(path)
205 return module.parse(path, fs_options, for_arrow=for_arrow)
206
207
208 def create_dir(path, fs_options, fs=None):
209 fs, path = parse(path, fs_options=fs_options, fs=fs)
210 if fs is None:
211 fs = pa.fs.LocalFileSystem()
212 fs.create_dir(path, recursive=True)
213
214
215 def fingerprint(path, fs_options={}, fs=None):
216 """Deterministic fingerprint for a file, useful in combination with dask or detecting file changes.
217
218 Based on mtime (modification time), file size, and the path. May lead to
219 false negative if the path changes, but not the content.
220
221 >>> fingerprint('/data/taxi.parquet') # doctest: +SKIP
222 '0171ec50cb2cf71b8e4f813212063a19'
223
224 >>> fingerprint('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True}) # doctest: +SKIP
225 '7c962e2d8c21b6a3681afb682d3bf91b'
226 """
227 fs, path = parse(path, fs_options, fs=fs)
228 path = stringyfy(path)
229 if fs is None:
230 mtime = os.path.getmtime(path)
231 size = os.path.getsize(path)
232 else:
233 info = fs.get_file_info([path])[0]
234 mtime = info.mtime_ns
235 size = info.size
236 import vaex.cache
237 return vaex.cache.fingerprint(('file', (path, mtime, size)))
238
239
240 def open(path, mode='rb', fs_options={}, fs=None, for_arrow=False, mmap=False, encoding="utf8"):
241 if is_file_object(path):
242 return path
243 fs, path = parse(path, fs_options=fs_options, fs=fs, for_arrow=for_arrow)
244 if fs is None:
245 path = stringyfy(path)
246 if for_arrow:
247 if fs_options:
248 raise ValueError(f'fs_options not supported for local files. You passed: {repr(fs_options)}.')
249 if mmap:
250 return pa.memory_map(path, mode)
251 else:
252 return pa.OSFile(path, mode)
253 else:
254 if 'b' not in mode:
255 return normal_open(path, mode, encoding=encoding)
256 else:
257 return normal_open(path, mode)
258 if mode == 'rb':
259 def create():
260 return fs.open_input_file(path)
261 elif mode == "r":
262 def create():
263 return io.TextIOWrapper(fs.open_input_file(path), encoding=encoding)
264 elif mode == 'wb':
265 def create():
266 return fs.open_output_stream(path)
267 elif mode == "w":
268 def create():
269 return io.TextIOWrapper(fs.open_output_stream(path), encoding=encoding)
270 else:
271 raise ValueError(f'Only mode=rb/bw/r/w are supported, not {mode}')
272 return FileProxy(create(), path, create)
273
274
275 def dup(file):
276 """Duplicate a file like object, s3 or cached file supported"""
277 if isinstance(file, (vaex.file.cache.CachedFile, FileProxy)):
278 return file.dup()
279 else:
280 return normal_open(file.name, file.mode)
281
282 def glob(path, fs_options={}, fs=None):
283 if fs:
284 raise ValueError('globbing with custom fs not supported yet, please open an issue.')
285 scheme, _ = split_scheme(path)
286 if not scheme:
287 return local_glob(path)
288 module = _get_scheme_handler(path)
289 return module.glob(path, fs_options)
290
291
292 def ext(path):
293 path = stringyfy(path)
294 path, options = split_options(path)
295 return os.path.splitext(path)[1]
296
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/vaex-core/vaex/file/__init__.py b/packages/vaex-core/vaex/file/__init__.py
--- a/packages/vaex-core/vaex/file/__init__.py
+++ b/packages/vaex-core/vaex/file/__init__.py
@@ -190,6 +190,8 @@
import fsspec
if isinstance(fs, fsspec.AbstractFileSystem):
fs = pa.fs.FSSpecHandler(fs)
+ if for_arrow:
+ fs = pyarrow.fs.PyFileSystem(fs)
return fs, path
if isinstance(path, (list, tuple)):
scheme, _ = split_scheme(path[0])
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/file/__init__.py b/packages/vaex-core/vaex/file/__init__.py\n--- a/packages/vaex-core/vaex/file/__init__.py\n+++ b/packages/vaex-core/vaex/file/__init__.py\n@@ -190,6 +190,8 @@\n import fsspec\n if isinstance(fs, fsspec.AbstractFileSystem):\n fs = pa.fs.FSSpecHandler(fs)\n+ if for_arrow:\n+ fs = pyarrow.fs.PyFileSystem(fs)\n return fs, path\n if isinstance(path, (list, tuple)):\n scheme, _ = split_scheme(path[0])\n", "issue": "[FEATURE-REQUEST] Read files from ADLS Gen 2 (Cloud support for Azure Data Lake)\nHello,\r\n\r\nin addition to support for S3 and GCS, it'd be awesome if it'd also be possible to read files directly from ADLS Gen 2, since some of us are restricted in the choice of cloud platforms we can use.\r\n\r\nThanks,\n", "before_files": [{"content": "__author__ = 'breddels'\nimport pathlib\nimport logging\nfrom glob import glob as local_glob\nimport io\nimport os\nimport re\nimport sys\nfrom urllib.parse import parse_qs\nimport warnings\nimport pkg_resources\n\nimport pyarrow as pa\nimport pyarrow.fs\n\nimport vaex.file.cache\n\n\nnormal_open = open\nlogger = logging.getLogger(\"vaex.file\")\n\n\nclass FileProxy:\n '''Wraps a file object, giving it a name a dup() method\n\n The dup is needed since a file is stateful, and needs to be duplicated in threads\n '''\n def __init__(self, file, name, dup):\n self.file = file\n self.name = name\n self.dup = dup\n self.closed = False\n\n def __iter__(self):\n raise NotImplementedError('This is just for looking like a file object to Pandas')\n\n def write(self, *args):\n return self.file.write(*args)\n\n def read(self, *args):\n return self.file.read(*args)\n\n def seek(self, *args):\n return self.file.seek(*args)\n\n def readinto(self, *args):\n return self.file.readinto(*args)\n\n def tell(self):\n return self.file.tell()\n\n def close(self):\n self.closed = True\n return self.file.close()\n\n def __enter__(self, *args):\n return self\n\n def __exit__(self, *args):\n self.file.close()\n\n\ndef is_file_object(file):\n return hasattr(file, 'read') and hasattr(file, 'seek')\n\n\ndef file_and_path(file, mode='r', fs_options={}, fs=None):\n if is_file_object(file):\n return file, stringyfy(file)\n else:\n file = open(file, mode=mode, fs_options=fs_options, fs=fs)\n return file, stringyfy(file)\n\n\ndef is_path_like(path):\n try:\n stringyfy(path)\n return True\n except ValueError:\n return False\n\n\ndef stringyfy(path):\n \"\"\"Get string from path like object of file like object\n\n >>> import sys, pytest\n >>> if sys.platform.startswith('win'):\n ... pytest.skip('this doctest does not work on Windows')\n ...\n >>> stringyfy('/tmp/test')\n '/tmp/test'\n >>> from pathlib import Path\n >>> stringyfy(Path('/tmp/test'))\n '/tmp/test'\n \"\"\"\n try:\n # Pathlib support\n path = path.__fspath__()\n except AttributeError:\n pass\n if hasattr(path, 'name'): # passed in a file\n path = path.name\n if isinstance(path, str):\n return path\n raise ValueError(f'Cannot convert {path} to a path')\n\n\ndef split_scheme(path):\n path = stringyfy(path)\n if '://' in path:\n scheme, path = path[:path.index('://')], path[path.index('://')+3:]\n else:\n scheme = None\n return scheme, path\n\n\ndef memory_mappable(path):\n path = stringyfy(path)\n scheme, _ = split_scheme(path)\n return scheme is None\n\n\ndef split_options(path, fs_options={}):\n if isinstance(path, list):\n paths = []\n previous_options = None\n for path in path:\n path, options = split_options(path, fs_options)\n if previous_options is not None:\n if previous_options != options:\n raise ValueError(f'Inconsistent set of fs_options given: {previous_options} {options}')\n else:\n previous_options = options\n paths.append(path)\n return paths, previous_options\n path = stringyfy(path)\n match = re.match(r'(.*?)\\?((&?[^=&?]+=[^=&?]+)+)', path)\n if match:\n naked_path, query = match.groups()[:2]\n else:\n naked_path = path\n query = ''\n options = fs_options.copy()\n options.update({key: values[0] for key, values in parse_qs(query).items()})\n return naked_path, options\n\n\ndef split_ext(path, fs_options={}):\n path, fs_options = split_options(path, fs_options=fs_options)\n base, ext = os.path.splitext(path)\n return base, ext, fs_options\n\n\ndef exists(path, fs_options={}, fs=None):\n \"\"\"Checks if file exists.\n\n >>> vaex.file.exists('/you/do/not')\n False\n\n >>> vaex.file.exists('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True})\n True\n \"\"\"\n fs, path = parse(path, fs_options=fs_options, fs=fs)\n if fs is None:\n return os.path.exists(path)\n else:\n return fs.get_file_info([path])[0].type != pa.fs.FileType.NotFound\n\n\ndef _get_scheme_handler(path):\n scheme, _ = split_scheme(path)\n for entry in pkg_resources.iter_entry_points(group='vaex.file.scheme'):\n if entry.name == scheme:\n return entry.load()\n raise ValueError(f'Do not know how to open {path}, no handler for {scheme} is known')\n\n\ndef remove(path):\n scheme, path = split_scheme(path)\n if scheme:\n raise ValueError('Cannot delete non-local files yet')\n os.remove(path)\n\n\ndef parse(path, fs_options={}, fs=None, for_arrow=False):\n if fs is not None:\n if fs_options:\n warnings.warn(f'Passed fs_options while fs was specified, {fs_options} are ignored')\n if 'fsspec' in sys.modules:\n import fsspec\n if isinstance(fs, fsspec.AbstractFileSystem):\n fs = pa.fs.FSSpecHandler(fs)\n return fs, path\n if isinstance(path, (list, tuple)):\n scheme, _ = split_scheme(path[0])\n else:\n scheme, _ = split_scheme(path)\n if not scheme:\n return None, path\n if isinstance(path, (list, tuple)):\n module = _get_scheme_handler(path[0])\n return module.parse(path[0], fs_options, for_arrow=for_arrow)[0], path\n else:\n module = _get_scheme_handler(path)\n return module.parse(path, fs_options, for_arrow=for_arrow)\n\n\ndef create_dir(path, fs_options, fs=None):\n fs, path = parse(path, fs_options=fs_options, fs=fs)\n if fs is None:\n fs = pa.fs.LocalFileSystem()\n fs.create_dir(path, recursive=True)\n\n\ndef fingerprint(path, fs_options={}, fs=None):\n \"\"\"Deterministic fingerprint for a file, useful in combination with dask or detecting file changes.\n\n Based on mtime (modification time), file size, and the path. May lead to\n false negative if the path changes, but not the content.\n\n >>> fingerprint('/data/taxi.parquet') # doctest: +SKIP\n '0171ec50cb2cf71b8e4f813212063a19'\n\n >>> fingerprint('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True}) # doctest: +SKIP\n '7c962e2d8c21b6a3681afb682d3bf91b'\n \"\"\"\n fs, path = parse(path, fs_options, fs=fs)\n path = stringyfy(path)\n if fs is None:\n mtime = os.path.getmtime(path)\n size = os.path.getsize(path)\n else:\n info = fs.get_file_info([path])[0]\n mtime = info.mtime_ns\n size = info.size\n import vaex.cache\n return vaex.cache.fingerprint(('file', (path, mtime, size)))\n\n\ndef open(path, mode='rb', fs_options={}, fs=None, for_arrow=False, mmap=False, encoding=\"utf8\"):\n if is_file_object(path):\n return path\n fs, path = parse(path, fs_options=fs_options, fs=fs, for_arrow=for_arrow)\n if fs is None:\n path = stringyfy(path)\n if for_arrow:\n if fs_options:\n raise ValueError(f'fs_options not supported for local files. You passed: {repr(fs_options)}.')\n if mmap:\n return pa.memory_map(path, mode)\n else:\n return pa.OSFile(path, mode)\n else:\n if 'b' not in mode:\n return normal_open(path, mode, encoding=encoding)\n else:\n return normal_open(path, mode)\n if mode == 'rb':\n def create():\n return fs.open_input_file(path)\n elif mode == \"r\":\n def create():\n return io.TextIOWrapper(fs.open_input_file(path), encoding=encoding)\n elif mode == 'wb':\n def create():\n return fs.open_output_stream(path)\n elif mode == \"w\":\n def create():\n return io.TextIOWrapper(fs.open_output_stream(path), encoding=encoding)\n else:\n raise ValueError(f'Only mode=rb/bw/r/w are supported, not {mode}')\n return FileProxy(create(), path, create)\n\n\ndef dup(file):\n \"\"\"Duplicate a file like object, s3 or cached file supported\"\"\"\n if isinstance(file, (vaex.file.cache.CachedFile, FileProxy)):\n return file.dup()\n else:\n return normal_open(file.name, file.mode)\n\ndef glob(path, fs_options={}, fs=None):\n if fs:\n raise ValueError('globbing with custom fs not supported yet, please open an issue.')\n scheme, _ = split_scheme(path)\n if not scheme:\n return local_glob(path)\n module = _get_scheme_handler(path)\n return module.glob(path, fs_options)\n\n\ndef ext(path):\n path = stringyfy(path)\n path, options = split_options(path)\n return os.path.splitext(path)[1]\n", "path": "packages/vaex-core/vaex/file/__init__.py"}], "after_files": [{"content": "__author__ = 'breddels'\nimport pathlib\nimport logging\nfrom glob import glob as local_glob\nimport io\nimport os\nimport re\nimport sys\nfrom urllib.parse import parse_qs\nimport warnings\nimport pkg_resources\n\nimport pyarrow as pa\nimport pyarrow.fs\n\nimport vaex.file.cache\n\n\nnormal_open = open\nlogger = logging.getLogger(\"vaex.file\")\n\n\nclass FileProxy:\n '''Wraps a file object, giving it a name a dup() method\n\n The dup is needed since a file is stateful, and needs to be duplicated in threads\n '''\n def __init__(self, file, name, dup):\n self.file = file\n self.name = name\n self.dup = dup\n self.closed = False\n\n def __iter__(self):\n raise NotImplementedError('This is just for looking like a file object to Pandas')\n\n def write(self, *args):\n return self.file.write(*args)\n\n def read(self, *args):\n return self.file.read(*args)\n\n def seek(self, *args):\n return self.file.seek(*args)\n\n def readinto(self, *args):\n return self.file.readinto(*args)\n\n def tell(self):\n return self.file.tell()\n\n def close(self):\n self.closed = True\n return self.file.close()\n\n def __enter__(self, *args):\n return self\n\n def __exit__(self, *args):\n self.file.close()\n\n\ndef is_file_object(file):\n return hasattr(file, 'read') and hasattr(file, 'seek')\n\n\ndef file_and_path(file, mode='r', fs_options={}, fs=None):\n if is_file_object(file):\n return file, stringyfy(file)\n else:\n file = open(file, mode=mode, fs_options=fs_options, fs=fs)\n return file, stringyfy(file)\n\n\ndef is_path_like(path):\n try:\n stringyfy(path)\n return True\n except ValueError:\n return False\n\n\ndef stringyfy(path):\n \"\"\"Get string from path like object of file like object\n\n >>> import sys, pytest\n >>> if sys.platform.startswith('win'):\n ... pytest.skip('this doctest does not work on Windows')\n ...\n >>> stringyfy('/tmp/test')\n '/tmp/test'\n >>> from pathlib import Path\n >>> stringyfy(Path('/tmp/test'))\n '/tmp/test'\n \"\"\"\n try:\n # Pathlib support\n path = path.__fspath__()\n except AttributeError:\n pass\n if hasattr(path, 'name'): # passed in a file\n path = path.name\n if isinstance(path, str):\n return path\n raise ValueError(f'Cannot convert {path} to a path')\n\n\ndef split_scheme(path):\n path = stringyfy(path)\n if '://' in path:\n scheme, path = path[:path.index('://')], path[path.index('://')+3:]\n else:\n scheme = None\n return scheme, path\n\n\ndef memory_mappable(path):\n path = stringyfy(path)\n scheme, _ = split_scheme(path)\n return scheme is None\n\n\ndef split_options(path, fs_options={}):\n if isinstance(path, list):\n paths = []\n previous_options = None\n for path in path:\n path, options = split_options(path, fs_options)\n if previous_options is not None:\n if previous_options != options:\n raise ValueError(f'Inconsistent set of fs_options given: {previous_options} {options}')\n else:\n previous_options = options\n paths.append(path)\n return paths, previous_options\n path = stringyfy(path)\n match = re.match(r'(.*?)\\?((&?[^=&?]+=[^=&?]+)+)', path)\n if match:\n naked_path, query = match.groups()[:2]\n else:\n naked_path = path\n query = ''\n options = fs_options.copy()\n options.update({key: values[0] for key, values in parse_qs(query).items()})\n return naked_path, options\n\n\ndef split_ext(path, fs_options={}):\n path, fs_options = split_options(path, fs_options=fs_options)\n base, ext = os.path.splitext(path)\n return base, ext, fs_options\n\n\ndef exists(path, fs_options={}, fs=None):\n \"\"\"Checks if file exists.\n\n >>> vaex.file.exists('/you/do/not')\n False\n\n >>> vaex.file.exists('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True})\n True\n \"\"\"\n fs, path = parse(path, fs_options=fs_options, fs=fs)\n if fs is None:\n return os.path.exists(path)\n else:\n return fs.get_file_info([path])[0].type != pa.fs.FileType.NotFound\n\n\ndef _get_scheme_handler(path):\n scheme, _ = split_scheme(path)\n for entry in pkg_resources.iter_entry_points(group='vaex.file.scheme'):\n if entry.name == scheme:\n return entry.load()\n raise ValueError(f'Do not know how to open {path}, no handler for {scheme} is known')\n\n\ndef remove(path):\n scheme, path = split_scheme(path)\n if scheme:\n raise ValueError('Cannot delete non-local files yet')\n os.remove(path)\n\n\ndef parse(path, fs_options={}, fs=None, for_arrow=False):\n if fs is not None:\n if fs_options:\n warnings.warn(f'Passed fs_options while fs was specified, {fs_options} are ignored')\n if 'fsspec' in sys.modules:\n import fsspec\n if isinstance(fs, fsspec.AbstractFileSystem):\n fs = pa.fs.FSSpecHandler(fs)\n if for_arrow:\n fs = pyarrow.fs.PyFileSystem(fs)\n return fs, path\n if isinstance(path, (list, tuple)):\n scheme, _ = split_scheme(path[0])\n else:\n scheme, _ = split_scheme(path)\n if not scheme:\n return None, path\n if isinstance(path, (list, tuple)):\n module = _get_scheme_handler(path[0])\n return module.parse(path[0], fs_options, for_arrow=for_arrow)[0], path\n else:\n module = _get_scheme_handler(path)\n return module.parse(path, fs_options, for_arrow=for_arrow)\n\n\ndef create_dir(path, fs_options, fs=None):\n fs, path = parse(path, fs_options=fs_options, fs=fs)\n if fs is None:\n fs = pa.fs.LocalFileSystem()\n fs.create_dir(path, recursive=True)\n\n\ndef fingerprint(path, fs_options={}, fs=None):\n \"\"\"Deterministic fingerprint for a file, useful in combination with dask or detecting file changes.\n\n Based on mtime (modification time), file size, and the path. May lead to\n false negative if the path changes, but not the content.\n\n >>> fingerprint('/data/taxi.parquet') # doctest: +SKIP\n '0171ec50cb2cf71b8e4f813212063a19'\n\n >>> fingerprint('s3://vaex/taxi/nyc_taxi_2015_mini.parquet', fs_options={'anon': True}) # doctest: +SKIP\n '7c962e2d8c21b6a3681afb682d3bf91b'\n \"\"\"\n fs, path = parse(path, fs_options, fs=fs)\n path = stringyfy(path)\n if fs is None:\n mtime = os.path.getmtime(path)\n size = os.path.getsize(path)\n else:\n info = fs.get_file_info([path])[0]\n mtime = info.mtime_ns\n size = info.size\n import vaex.cache\n return vaex.cache.fingerprint(('file', (path, mtime, size)))\n\n\ndef open(path, mode='rb', fs_options={}, fs=None, for_arrow=False, mmap=False, encoding=\"utf8\"):\n if is_file_object(path):\n return path\n fs, path = parse(path, fs_options=fs_options, fs=fs, for_arrow=for_arrow)\n if fs is None:\n path = stringyfy(path)\n if for_arrow:\n if fs_options:\n raise ValueError(f'fs_options not supported for local files. You passed: {repr(fs_options)}.')\n if mmap:\n return pa.memory_map(path, mode)\n else:\n return pa.OSFile(path, mode)\n else:\n if 'b' not in mode:\n return normal_open(path, mode, encoding=encoding)\n else:\n return normal_open(path, mode)\n if mode == 'rb':\n def create():\n return fs.open_input_file(path)\n elif mode == \"r\":\n def create():\n return io.TextIOWrapper(fs.open_input_file(path), encoding=encoding)\n elif mode == 'wb':\n def create():\n return fs.open_output_stream(path)\n elif mode == \"w\":\n def create():\n return io.TextIOWrapper(fs.open_output_stream(path), encoding=encoding)\n else:\n raise ValueError(f'Only mode=rb/bw/r/w are supported, not {mode}')\n return FileProxy(create(), path, create)\n\n\ndef dup(file):\n \"\"\"Duplicate a file like object, s3 or cached file supported\"\"\"\n if isinstance(file, (vaex.file.cache.CachedFile, FileProxy)):\n return file.dup()\n else:\n return normal_open(file.name, file.mode)\n\ndef glob(path, fs_options={}, fs=None):\n if fs:\n raise ValueError('globbing with custom fs not supported yet, please open an issue.')\n scheme, _ = split_scheme(path)\n if not scheme:\n return local_glob(path)\n module = _get_scheme_handler(path)\n return module.glob(path, fs_options)\n\n\ndef ext(path):\n path = stringyfy(path)\n path, options = split_options(path)\n return os.path.splitext(path)[1]\n", "path": "packages/vaex-core/vaex/file/__init__.py"}]} | 3,299 | 151 |
gh_patches_debug_3129 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3523 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Demo is broken for 0.1.6
## Description
<!-- A clear and concise description of what the bug is. -->
The demo can't be deployed with the current `master` branch (0.1.6)
The reasons are:
- We didn't add the fix to the demo settings in #3499 (just to prod), and
- We have an extra dependency in the demo setup which we need to build in but the current `Dockerfile` doesn't support that.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
We should be able to demo version 0.1.6 on our demo site.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `demo/settings.py`
Content:
```
1 from config.settings.common_settings import * # noqa
2 from decouple import config as decouple_config
3
4 INSTALLED_APPS += [ # noqa
5 "demo",
6 "health_check",
7 ]
8
9 MIDDLEWARE += [ # noqa
10 "demo.middleware.LiveDemoModeMiddleware",
11 ]
12
13 MATHESAR_MODE = 'PRODUCTION'
14 MATHESAR_LIVE_DEMO = True
15 MATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)
16 MATHESAR_LIVE_DEMO_PASSWORD = decouple_config('MATHESAR_LIVE_DEMO_PASSWORD', default=None)
17
18 MATHESAR_DEMO_TEMPLATE = 'mathesar_demo_template'
19 MATHESAR_DEMO_ARXIV_LOG_PATH = decouple_config(
20 'MATHESAR_DEMO_ARXIV_LOG_PATH',
21 default='/var/lib/mathesar/demo/arxiv_db_schema_log'
22 )
23 BASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES += ['demo/analytics.html'] # noqa
24 ROOT_URLCONF = "demo.urls"
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/demo/settings.py b/demo/settings.py
--- a/demo/settings.py
+++ b/demo/settings.py
@@ -10,6 +10,9 @@
"demo.middleware.LiveDemoModeMiddleware",
]
+
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
+
MATHESAR_MODE = 'PRODUCTION'
MATHESAR_LIVE_DEMO = True
MATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)
| {"golden_diff": "diff --git a/demo/settings.py b/demo/settings.py\n--- a/demo/settings.py\n+++ b/demo/settings.py\n@@ -10,6 +10,9 @@\n \"demo.middleware.LiveDemoModeMiddleware\",\n ]\n \n+\n+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n+\n MATHESAR_MODE = 'PRODUCTION'\n MATHESAR_LIVE_DEMO = True\n MATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)\n", "issue": "Demo is broken for 0.1.6\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nThe demo can't be deployed with the current `master` branch (0.1.6)\r\n\r\nThe reasons are:\r\n\r\n- We didn't add the fix to the demo settings in #3499 (just to prod), and\r\n- We have an extra dependency in the demo setup which we need to build in but the current `Dockerfile` doesn't support that.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nWe should be able to demo version 0.1.6 on our demo site.\n", "before_files": [{"content": "from config.settings.common_settings import * # noqa\nfrom decouple import config as decouple_config\n\nINSTALLED_APPS += [ # noqa\n \"demo\",\n \"health_check\",\n]\n\nMIDDLEWARE += [ # noqa\n \"demo.middleware.LiveDemoModeMiddleware\",\n]\n\nMATHESAR_MODE = 'PRODUCTION'\nMATHESAR_LIVE_DEMO = True\nMATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)\nMATHESAR_LIVE_DEMO_PASSWORD = decouple_config('MATHESAR_LIVE_DEMO_PASSWORD', default=None)\n\nMATHESAR_DEMO_TEMPLATE = 'mathesar_demo_template'\nMATHESAR_DEMO_ARXIV_LOG_PATH = decouple_config(\n 'MATHESAR_DEMO_ARXIV_LOG_PATH',\n default='/var/lib/mathesar/demo/arxiv_db_schema_log'\n)\nBASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES += ['demo/analytics.html'] # noqa\nROOT_URLCONF = \"demo.urls\"\n", "path": "demo/settings.py"}], "after_files": [{"content": "from config.settings.common_settings import * # noqa\nfrom decouple import config as decouple_config\n\nINSTALLED_APPS += [ # noqa\n \"demo\",\n \"health_check\",\n]\n\nMIDDLEWARE += [ # noqa\n \"demo.middleware.LiveDemoModeMiddleware\",\n]\n\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\nMATHESAR_MODE = 'PRODUCTION'\nMATHESAR_LIVE_DEMO = True\nMATHESAR_LIVE_DEMO_USERNAME = decouple_config('MATHESAR_LIVE_DEMO_USERNAME', default=None)\nMATHESAR_LIVE_DEMO_PASSWORD = decouple_config('MATHESAR_LIVE_DEMO_PASSWORD', default=None)\n\nMATHESAR_DEMO_TEMPLATE = 'mathesar_demo_template'\nMATHESAR_DEMO_ARXIV_LOG_PATH = decouple_config(\n 'MATHESAR_DEMO_ARXIV_LOG_PATH',\n default='/var/lib/mathesar/demo/arxiv_db_schema_log'\n)\nBASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES += ['demo/analytics.html'] # noqa\nROOT_URLCONF = \"demo.urls\"\n", "path": "demo/settings.py"}]} | 661 | 114 |
gh_patches_debug_18647 | rasdani/github-patches | git_diff | internetarchive__openlibrary-4075 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add "noindex" meta tag to people pages less than 30 days old
### Describe the problem that you'd like solved
<!-- A clear and concise description of what you want to happen. -->
Thousands of spam pages have been added to OL over time. In many instances, people and list pages are where the spam shows up. Spam can harm user trust in OL, affect OL's ranking in search engines, and potentially point to malicious sites and thus harm our users.
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
In this week's community call, the decision was made to add the ["noindex" meta tag](https://en.wikipedia.org/wiki/Noindex) to people (and I think list) pages belonging to accounts less than 30 days old. This will result in those pages not being indexed by search engines that honor that meta tag.
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
@mekarpeles @cdrini
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/core/helpers.py`
Content:
```
1 """Generic helper functions to use in the templates and the webapp.
2 """
3 import web
4 from datetime import datetime
5 import simplejson
6 import re
7
8 from six.moves.urllib.parse import urlsplit
9
10 import babel
11 import babel.core
12 import babel.dates
13 import babel.numbers
14
15 try:
16 import genshi
17 import genshi.filters
18 except ImportError:
19 genshi = None
20
21 try:
22 from bs4 import BeautifulSoup
23 except ImportError:
24 BeautifulSoup = None
25
26 import six
27
28 from infogami import config
29
30 # handy utility to parse ISO date strings
31 from infogami.infobase.utils import parse_datetime
32 from infogami.utils.view import safeint
33
34 # TODO: i18n should be moved to core or infogami
35 from openlibrary.i18n import gettext as _ # noqa: F401
36
37 __all__ = [
38 "sanitize",
39 "json_encode",
40 "safesort",
41 "datestr", "format_date",
42 "sprintf", "cond", "commify", "truncate", "datetimestr_utc",
43 "urlsafe", "texsafe",
44 "percentage", "affiliate_id", "bookreader_host",
45 "private_collections", "private_collection_in",
46
47 # functions imported from elsewhere
48 "parse_datetime", "safeint"
49 ]
50 __docformat__ = "restructuredtext en"
51
52 def sanitize(html, encoding='utf8'):
53 """Removes unsafe tags and attributes from html and adds
54 ``rel="nofollow"`` attribute to all external links.
55 Using encoding=None if passing unicode strings e.g. for Python 3.
56 encoding="utf8" matches default format for earlier versions of Genshi
57 https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version
58 """
59
60 # Can't sanitize unless genshi module is available
61 if genshi is None:
62 return html
63
64 def get_nofollow(name, event):
65 attrs = event[1][1]
66 href = attrs.get('href', '')
67
68 if href:
69 # add rel=nofollow to all absolute links
70 _, host, _, _, _ = urlsplit(href)
71 if host:
72 return 'nofollow'
73
74 try:
75 html = genshi.HTML(html, encoding=encoding)
76
77 # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:
78 # don't catch Unicode errors so we can tell if we're getting bytes
79 except genshi.ParseError:
80 if BeautifulSoup:
81 # Bad html. Tidy it up using BeautifulSoup
82 html = str(BeautifulSoup(html, "lxml"))
83 try:
84 html = genshi.HTML(html)
85 except Exception:
86 # Failed to sanitize.
87 # We can't do any better than returning the original HTML, without sanitizing.
88 return html
89 else:
90 raise
91
92 stream = html \
93 | genshi.filters.HTMLSanitizer() \
94 | genshi.filters.Transformer("//a").attr("rel", get_nofollow)
95 return stream.render()
96
97
98 def json_encode(d, **kw):
99 """Same as simplejson.dumps.
100 """
101 return simplejson.dumps(d, **kw)
102
103
104 def safesort(iterable, key=None, reverse=False):
105 """Sorts heterogeneous of objects without raising errors.
106
107 Sorting heterogeneous objects sometimes causes error. For example,
108 datetime and Nones don't go well together. This function takes special
109 care to make that work.
110 """
111 key = key or (lambda x: x)
112 def safekey(x):
113 k = key(x)
114 return (k.__class__.__name__, k)
115 return sorted(iterable, key=safekey, reverse=reverse)
116
117 def datestr(then, now=None, lang=None, relative=True):
118 """Internationalized version of web.datestr."""
119 lang = lang or web.ctx.get('lang') or "en"
120 if relative:
121 if now is None:
122 now = datetime.now()
123 delta = then - now
124 if abs(delta.days) < 4: # Threshold from web.py
125 return babel.dates.format_timedelta(delta,
126 add_direction=True,
127 locale=_get_babel_locale(lang))
128 return format_date(then, lang=lang)
129
130
131 def datetimestr_utc(then):
132 return then.strftime("%Y-%m-%dT%H:%M:%SZ")
133
134 def format_date(date, lang=None):
135 lang = lang or web.ctx.get('lang') or "en"
136 locale = _get_babel_locale(lang)
137 return babel.dates.format_date(date, format="long", locale=locale)
138
139 def _get_babel_locale(lang):
140 try:
141 return babel.Locale(lang)
142 except babel.core.UnknownLocaleError:
143 return babel.Locale("en")
144
145
146 def sprintf(s, *a, **kw):
147 """Handy utility for string replacements.
148
149 >>> sprintf('hello %s', 'python')
150 'hello python'
151 >>> sprintf('hello %(name)s', name='python')
152 'hello python'
153 """
154 args = kw or a
155 if args:
156 return s % args
157 else:
158 return s
159
160
161 def cond(pred, true_value, false_value=""):
162 """Lisp style cond function.
163
164 Hanly to use instead of if-else expression.
165 """
166 if pred:
167 return true_value
168 else:
169 return false_value
170
171
172 def commify(number, lang=None):
173 """localized version of web.commify"""
174 try:
175 lang = lang or web.ctx.get("lang") or "en"
176 return babel.numbers.format_number(int(number), lang)
177 except:
178 return six.text_type(number)
179
180
181 def truncate(text, limit):
182 """Truncate text and add ellipses if it longer than specified limit."""
183 if not text:
184 return ''
185 if len(text) <= limit:
186 return text
187 return text[:limit] + "..."
188
189
190 def urlsafe(path):
191 """Replaces the unsafe chars from path with underscores.
192 """
193 return _get_safepath_re().sub('_', path).strip('_')[:100]
194
195 @web.memoize
196 def _get_safepath_re():
197 """Make regular expression that matches all unsafe chars."""
198 # unsafe chars according to RFC 2396
199 reserved = ";/?:@&=+$,"
200 delims = '<>#%"'
201 unwise = "{}|\\^[]`"
202 space = ' \n\r'
203
204 unsafe = reserved + delims + unwise + space
205 pattern = '[%s]+' % "".join(re.escape(c) for c in unsafe)
206 return re.compile(pattern)
207
208
209 def get_coverstore_url():
210 """Returns the base url of coverstore by looking at the config."""
211 return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')
212
213
214 _texsafe_map = {
215 '"': r'\textquotedbl{}',
216 '#': r'\#',
217 '$': r'\$',
218 '%': r'\%',
219 '&': r'\&',
220 '<': r'\textless{}',
221 '>': r'\textgreater{}',
222 '\\': r'\textbackslash{}',
223 '^': r'\^{}',
224 '_': r'\_{}',
225 '{': r'\{',
226 '}': r'\}',
227 '|': r'\textbar{}',
228 '~': r'\~{}',
229 }
230
231 _texsafe_re = None
232
233 def texsafe(text):
234 """Escapes the special characters in the given text for using it in tex type setting.
235
236 Tex (or Latex) uses some characters in the ascii character range for
237 special notations. These characters must be escaped when occur in the
238 regular text. This function escapes those special characters.
239
240 The list of special characters and the latex command to typeset them can
241 be found in `The Comprehensive LaTeX Symbol List`_.
242
243 .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf
244 """
245 global _texsafe_re
246 if _texsafe_re is None:
247 pattern = "[%s]" % re.escape("".join(list(_texsafe_map)))
248 _texsafe_re = re.compile(pattern)
249
250 return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)
251
252 def percentage(value, total):
253 """Computes percentage.
254
255 >>> percentage(1, 10)
256 10.0
257 >>> percentage(0, 0)
258 0.0
259 """
260 return (value * 100.0) / total if total else 0.0
261
262 def uniq(values, key=None):
263 """Returns the unique entries from the given values in the original order.
264
265 The value of the optional `key` parameter should be a function that takes
266 a single argument and returns a key to test the uniqueness.
267 """
268 key = key or (lambda x: x)
269 s = set()
270 result = []
271 for v in values:
272 k = key(v)
273 if k not in s:
274 s.add(k)
275 result.append(v)
276 return result
277
278 def affiliate_id(affiliate):
279 return config.get('affiliate_ids', {}).get(affiliate, '')
280
281 def bookreader_host():
282 return config.get('bookreader_host', '')
283
284 def private_collections():
285 """Collections which are lendable but should not be linked from OL
286 TODO: Remove when we can handle institutional books"""
287 return ['georgetown-university-law-library-rr']
288
289 def private_collection_in(collections):
290 return any(x in private_collections() for x in collections)
291
292 def _get_helpers():
293 _globals = globals()
294 return web.storage((k, _globals[k]) for k in __all__)
295
296
297 ## This must be at the end of this module
298 helpers = _get_helpers()
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py
--- a/openlibrary/core/helpers.py
+++ b/openlibrary/core/helpers.py
@@ -35,7 +35,7 @@
"sanitize",
"json_encode",
"safesort",
- "datestr", "format_date",
+ "days_since", "datestr", "format_date",
"sprintf", "cond", "commify", "truncate", "datetimestr_utc",
"urlsafe", "texsafe",
"percentage", "affiliate_id", "bookreader_host",
@@ -111,6 +111,12 @@
return (k.__class__.__name__, k)
return sorted(iterable, key=safekey, reverse=reverse)
+
+def days_since(then, now=None):
+ delta = then - (now or datetime.now())
+ return abs(delta.days)
+
+
def datestr(then, now=None, lang=None, relative=True):
"""Internationalized version of web.datestr."""
lang = lang or web.ctx.get('lang') or "en"
| {"golden_diff": "diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py\n--- a/openlibrary/core/helpers.py\n+++ b/openlibrary/core/helpers.py\n@@ -35,7 +35,7 @@\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n- \"datestr\", \"format_date\",\n+ \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n@@ -111,6 +111,12 @@\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n \n+\n+def days_since(then, now=None):\n+ delta = then - (now or datetime.now())\n+ return abs(delta.days)\n+\n+\n def datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n", "issue": "Add \"noindex\" meta tag to people pages less than 30 days old\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\nThousands of spam pages have been added to OL over time. In many instances, people and list pages are where the spam shows up. Spam can harm user trust in OL, affect OL's ranking in search engines, and potentially point to malicious sites and thus harm our users.\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\nIn this week's community call, the decision was made to add the [\"noindex\" meta tag](https://en.wikipedia.org/wiki/Noindex) to people (and I think list) pages belonging to accounts less than 30 days old. This will result in those pages not being indexed by search engines that honor that meta tag.\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n@mekarpeles @cdrini \r\n\r\n\n", "before_files": [{"content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport simplejson\nimport re\n\nfrom six.moves.urllib.parse import urlsplit\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nimport six\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n# TODO: i18n should be moved to core or infogami\nfrom openlibrary.i18n import gettext as _ # noqa: F401\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as simplejson.dumps.\n \"\"\"\n return simplejson.dumps(d, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\[email protected]\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py"}], "after_files": [{"content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport simplejson\nimport re\n\nfrom six.moves.urllib.parse import urlsplit\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nimport six\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as simplejson.dumps.\n \"\"\"\n return simplejson.dumps(d, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\n\ndef days_since(then, now=None):\n delta = then - (now or datetime.now())\n return abs(delta.days)\n\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\[email protected]\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py"}]} | 3,437 | 241 |
gh_patches_debug_28726 | rasdani/github-patches | git_diff | networkx__networkx-7388 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
3.2.1: pytest fails in networkx/drawing/tests/test_pylab.py with `module 'matplotlib' has no attribute 'use'`
matplotlib 3.8.3 and pytest fails on scanning units with
```console
+ PYTHONPATH=/home/tkloczko/rpmbuild/BUILDROOT/python-networkx-3.2.1-5.fc36.x86_64/usr/lib64/python3.9/site-packages:/home/tkloczko/rpmbuild/BUILDROOT/python-networkx-3.2.1-5.fc36.x86_64/usr/lib/python3.9/site-packages
+ /usr/bin/pytest -ra -m 'not network'
============================= test session starts ==============================
platform linux -- Python 3.9.18, pytest-8.1.1, pluggy-1.4.0
rootdir: /home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1
configfile: pyproject.toml
collected 4831 items / 1 error / 30 skipped
==================================== ERRORS ====================================
____________ ERROR collecting networkx/drawing/tests/test_pylab.py _____________
networkx/drawing/tests/test_pylab.py:10: in <module>
mpl.use("PS")
E AttributeError: module 'matplotlib' has no attribute 'use'
=============================== warnings summary ===============================
networkx/utils/backends.py:135
/home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1/networkx/utils/backends.py:135: RuntimeWarning: networkx backend defined more than once: nx-loopback
backends.update(_get_backends("networkx.backends"))
networkx/utils/backends.py:576
/home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1/networkx/utils/backends.py:576: DeprecationWarning:
random_tree is deprecated and will be removed in NX v3.4
Use random_labeled_tree instead.
return self.orig_func(*args, **kwargs)
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/conftest.py`
Content:
```
1 """
2 Testing
3 =======
4
5 General guidelines for writing good tests:
6
7 - doctests always assume ``import networkx as nx`` so don't add that
8 - prefer pytest fixtures over classes with setup methods.
9 - use the ``@pytest.mark.parametrize`` decorator
10 - use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy.
11 and add the module to the relevant entries below.
12
13 """
14 import os
15 import sys
16 import warnings
17 from importlib.metadata import entry_points
18
19 import pytest
20
21 import networkx
22
23
24 def pytest_addoption(parser):
25 parser.addoption(
26 "--runslow", action="store_true", default=False, help="run slow tests"
27 )
28 parser.addoption(
29 "--backend",
30 action="store",
31 default=None,
32 help="Run tests with a backend by auto-converting nx graphs to backend graphs",
33 )
34 parser.addoption(
35 "--fallback-to-nx",
36 action="store_true",
37 default=False,
38 help="Run nx function if a backend doesn't implement a dispatchable function"
39 " (use with --backend)",
40 )
41
42
43 def pytest_configure(config):
44 config.addinivalue_line("markers", "slow: mark test as slow to run")
45 backend = config.getoption("--backend")
46 if backend is None:
47 backend = os.environ.get("NETWORKX_TEST_BACKEND")
48 # nx-loopback backend is only available when testing
49 backends = entry_points(name="nx-loopback", group="networkx.backends")
50 if backends:
51 networkx.utils.backends.backends["nx-loopback"] = next(iter(backends))
52 else:
53 warnings.warn(
54 "\n\n WARNING: Mixed NetworkX configuration! \n\n"
55 " This environment has mixed configuration for networkx.\n"
56 " The test object nx-loopback is not configured correctly.\n"
57 " You should not be seeing this message.\n"
58 " Try `pip install -e .`, or change your PYTHONPATH\n"
59 " Make sure python finds the networkx repo you are testing\n\n"
60 )
61 if backend:
62 networkx.config["backend_priority"] = [backend]
63 fallback_to_nx = config.getoption("--fallback-to-nx")
64 if not fallback_to_nx:
65 fallback_to_nx = os.environ.get("NETWORKX_FALLBACK_TO_NX")
66 networkx.utils.backends._dispatchable._fallback_to_nx = bool(fallback_to_nx)
67
68
69 def pytest_collection_modifyitems(config, items):
70 # Setting this to True here allows tests to be set up before dispatching
71 # any function call to a backend.
72 networkx.utils.backends._dispatchable._is_testing = True
73 if backend_priority := networkx.config["backend_priority"]:
74 # Allow pluggable backends to add markers to tests (such as skip or xfail)
75 # when running in auto-conversion test mode
76 backend = networkx.utils.backends.backends[backend_priority[0]].load()
77 if hasattr(backend, "on_start_tests"):
78 getattr(backend, "on_start_tests")(items)
79
80 if config.getoption("--runslow"):
81 # --runslow given in cli: do not skip slow tests
82 return
83 skip_slow = pytest.mark.skip(reason="need --runslow option to run")
84 for item in items:
85 if "slow" in item.keywords:
86 item.add_marker(skip_slow)
87
88
89 # TODO: The warnings below need to be dealt with, but for now we silence them.
90 @pytest.fixture(autouse=True)
91 def set_warnings():
92 warnings.filterwarnings(
93 "ignore",
94 category=FutureWarning,
95 message="\n\nsingle_target_shortest_path_length",
96 )
97 warnings.filterwarnings(
98 "ignore",
99 category=FutureWarning,
100 message="\n\nshortest_path",
101 )
102 warnings.filterwarnings(
103 "ignore", category=DeprecationWarning, message="\nforest_str is deprecated"
104 )
105 warnings.filterwarnings(
106 "ignore", category=DeprecationWarning, message="\n\nrandom_tree"
107 )
108 warnings.filterwarnings(
109 "ignore", category=DeprecationWarning, message="Edmonds has been deprecated"
110 )
111 warnings.filterwarnings(
112 "ignore",
113 category=DeprecationWarning,
114 message="MultiDiGraph_EdgeKey has been deprecated",
115 )
116 warnings.filterwarnings(
117 "ignore", category=DeprecationWarning, message="\n\nThe `normalized`"
118 )
119 warnings.filterwarnings(
120 "ignore",
121 category=DeprecationWarning,
122 message="The function `join` is deprecated",
123 )
124 warnings.filterwarnings(
125 "ignore",
126 category=DeprecationWarning,
127 message="\n\nstrongly_connected_components_recursive",
128 )
129 warnings.filterwarnings(
130 "ignore", category=DeprecationWarning, message="\n\nall_triplets"
131 )
132 warnings.filterwarnings(
133 "ignore", category=DeprecationWarning, message="\n\nrandom_triad"
134 )
135 warnings.filterwarnings(
136 "ignore", category=DeprecationWarning, message="minimal_d_separator"
137 )
138 warnings.filterwarnings(
139 "ignore", category=DeprecationWarning, message="d_separated"
140 )
141 warnings.filterwarnings("ignore", category=DeprecationWarning, message="\n\nk_core")
142 warnings.filterwarnings(
143 "ignore", category=DeprecationWarning, message="\n\nk_shell"
144 )
145 warnings.filterwarnings(
146 "ignore", category=DeprecationWarning, message="\n\nk_crust"
147 )
148 warnings.filterwarnings(
149 "ignore", category=DeprecationWarning, message="\n\nk_corona"
150 )
151 warnings.filterwarnings(
152 "ignore", category=DeprecationWarning, message="\n\ntotal_spanning_tree_weight"
153 )
154 warnings.filterwarnings(
155 "ignore", category=DeprecationWarning, message=r"\n\nThe 'create=matrix'"
156 )
157
158
159 @pytest.fixture(autouse=True)
160 def add_nx(doctest_namespace):
161 doctest_namespace["nx"] = networkx
162
163
164 # What dependencies are installed?
165
166 try:
167 import numpy
168
169 has_numpy = True
170 except ImportError:
171 has_numpy = False
172
173 try:
174 import scipy
175
176 has_scipy = True
177 except ImportError:
178 has_scipy = False
179
180 try:
181 import matplotlib
182
183 has_matplotlib = True
184 except ImportError:
185 has_matplotlib = False
186
187 try:
188 import pandas
189
190 has_pandas = True
191 except ImportError:
192 has_pandas = False
193
194 try:
195 import pygraphviz
196
197 has_pygraphviz = True
198 except ImportError:
199 has_pygraphviz = False
200
201 try:
202 import pydot
203
204 has_pydot = True
205 except ImportError:
206 has_pydot = False
207
208 try:
209 import sympy
210
211 has_sympy = True
212 except ImportError:
213 has_sympy = False
214
215
216 # List of files that pytest should ignore
217
218 collect_ignore = []
219
220 needs_numpy = [
221 "algorithms/approximation/traveling_salesman.py",
222 "algorithms/centrality/current_flow_closeness.py",
223 "algorithms/node_classification.py",
224 "algorithms/non_randomness.py",
225 "algorithms/shortest_paths/dense.py",
226 "algorithms/tree/mst.py",
227 "generators/expanders.py",
228 "linalg/bethehessianmatrix.py",
229 "linalg/laplacianmatrix.py",
230 "utils/misc.py",
231 "algorithms/centrality/laplacian.py",
232 ]
233 needs_scipy = [
234 "algorithms/approximation/traveling_salesman.py",
235 "algorithms/assortativity/correlation.py",
236 "algorithms/assortativity/mixing.py",
237 "algorithms/assortativity/pairs.py",
238 "algorithms/bipartite/matrix.py",
239 "algorithms/bipartite/spectral.py",
240 "algorithms/centrality/current_flow_betweenness.py",
241 "algorithms/centrality/current_flow_betweenness_subset.py",
242 "algorithms/centrality/eigenvector.py",
243 "algorithms/centrality/katz.py",
244 "algorithms/centrality/second_order.py",
245 "algorithms/centrality/subgraph_alg.py",
246 "algorithms/communicability_alg.py",
247 "algorithms/link_analysis/hits_alg.py",
248 "algorithms/link_analysis/pagerank_alg.py",
249 "algorithms/node_classification.py",
250 "algorithms/similarity.py",
251 "convert_matrix.py",
252 "drawing/layout.py",
253 "generators/spectral_graph_forge.py",
254 "generators/expanders.py",
255 "linalg/algebraicconnectivity.py",
256 "linalg/attrmatrix.py",
257 "linalg/bethehessianmatrix.py",
258 "linalg/graphmatrix.py",
259 "linalg/modularitymatrix.py",
260 "linalg/spectrum.py",
261 "utils/rcm.py",
262 "algorithms/centrality/laplacian.py",
263 ]
264 needs_matplotlib = ["drawing/nx_pylab.py"]
265 needs_pandas = ["convert_matrix.py"]
266 needs_pygraphviz = ["drawing/nx_agraph.py"]
267 needs_pydot = ["drawing/nx_pydot.py"]
268 needs_sympy = ["algorithms/polynomials.py"]
269
270 if not has_numpy:
271 collect_ignore += needs_numpy
272 if not has_scipy:
273 collect_ignore += needs_scipy
274 if not has_matplotlib:
275 collect_ignore += needs_matplotlib
276 if not has_pandas:
277 collect_ignore += needs_pandas
278 if not has_pygraphviz:
279 collect_ignore += needs_pygraphviz
280 if not has_pydot:
281 collect_ignore += needs_pydot
282 if not has_sympy:
283 collect_ignore += needs_sympy
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/conftest.py b/networkx/conftest.py
--- a/networkx/conftest.py
+++ b/networkx/conftest.py
@@ -241,25 +241,31 @@
"algorithms/centrality/current_flow_betweenness_subset.py",
"algorithms/centrality/eigenvector.py",
"algorithms/centrality/katz.py",
+ "algorithms/centrality/laplacian.py",
"algorithms/centrality/second_order.py",
"algorithms/centrality/subgraph_alg.py",
"algorithms/communicability_alg.py",
+ "algorithms/community/divisive.py",
+ "algorithms/distance_measures.py",
"algorithms/link_analysis/hits_alg.py",
"algorithms/link_analysis/pagerank_alg.py",
"algorithms/node_classification.py",
"algorithms/similarity.py",
+ "algorithms/tree/mst.py",
+ "algorithms/walks.py",
"convert_matrix.py",
"drawing/layout.py",
+ "drawing/nx_pylab.py",
"generators/spectral_graph_forge.py",
"generators/expanders.py",
"linalg/algebraicconnectivity.py",
"linalg/attrmatrix.py",
"linalg/bethehessianmatrix.py",
"linalg/graphmatrix.py",
+ "linalg/laplacianmatrix.py",
"linalg/modularitymatrix.py",
"linalg/spectrum.py",
"utils/rcm.py",
- "algorithms/centrality/laplacian.py",
]
needs_matplotlib = ["drawing/nx_pylab.py"]
needs_pandas = ["convert_matrix.py"]
| {"golden_diff": "diff --git a/networkx/conftest.py b/networkx/conftest.py\n--- a/networkx/conftest.py\n+++ b/networkx/conftest.py\n@@ -241,25 +241,31 @@\n \"algorithms/centrality/current_flow_betweenness_subset.py\",\n \"algorithms/centrality/eigenvector.py\",\n \"algorithms/centrality/katz.py\",\n+ \"algorithms/centrality/laplacian.py\",\n \"algorithms/centrality/second_order.py\",\n \"algorithms/centrality/subgraph_alg.py\",\n \"algorithms/communicability_alg.py\",\n+ \"algorithms/community/divisive.py\",\n+ \"algorithms/distance_measures.py\",\n \"algorithms/link_analysis/hits_alg.py\",\n \"algorithms/link_analysis/pagerank_alg.py\",\n \"algorithms/node_classification.py\",\n \"algorithms/similarity.py\",\n+ \"algorithms/tree/mst.py\",\n+ \"algorithms/walks.py\",\n \"convert_matrix.py\",\n \"drawing/layout.py\",\n+ \"drawing/nx_pylab.py\",\n \"generators/spectral_graph_forge.py\",\n \"generators/expanders.py\",\n \"linalg/algebraicconnectivity.py\",\n \"linalg/attrmatrix.py\",\n \"linalg/bethehessianmatrix.py\",\n \"linalg/graphmatrix.py\",\n+ \"linalg/laplacianmatrix.py\",\n \"linalg/modularitymatrix.py\",\n \"linalg/spectrum.py\",\n \"utils/rcm.py\",\n- \"algorithms/centrality/laplacian.py\",\n ]\n needs_matplotlib = [\"drawing/nx_pylab.py\"]\n needs_pandas = [\"convert_matrix.py\"]\n", "issue": "3.2.1: pytest fails in networkx/drawing/tests/test_pylab.py with `module 'matplotlib' has no attribute 'use'`\nmatplotlib 3.8.3 and pytest fails on scanning units with\r\n```console\r\n+ PYTHONPATH=/home/tkloczko/rpmbuild/BUILDROOT/python-networkx-3.2.1-5.fc36.x86_64/usr/lib64/python3.9/site-packages:/home/tkloczko/rpmbuild/BUILDROOT/python-networkx-3.2.1-5.fc36.x86_64/usr/lib/python3.9/site-packages\r\n+ /usr/bin/pytest -ra -m 'not network'\r\n============================= test session starts ==============================\r\nplatform linux -- Python 3.9.18, pytest-8.1.1, pluggy-1.4.0\r\nrootdir: /home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1\r\nconfigfile: pyproject.toml\r\ncollected 4831 items / 1 error / 30 skipped\r\n\r\n==================================== ERRORS ====================================\r\n____________ ERROR collecting networkx/drawing/tests/test_pylab.py _____________\r\nnetworkx/drawing/tests/test_pylab.py:10: in <module>\r\n mpl.use(\"PS\")\r\nE AttributeError: module 'matplotlib' has no attribute 'use'\r\n=============================== warnings summary ===============================\r\nnetworkx/utils/backends.py:135\r\n /home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1/networkx/utils/backends.py:135: RuntimeWarning: networkx backend defined more than once: nx-loopback\r\n backends.update(_get_backends(\"networkx.backends\"))\r\n\r\nnetworkx/utils/backends.py:576\r\n /home/tkloczko/rpmbuild/BUILD/networkx-networkx-3.2.1/networkx/utils/backends.py:576: DeprecationWarning:\r\n\r\n random_tree is deprecated and will be removed in NX v3.4\r\n Use random_labeled_tree instead.\r\n return self.orig_func(*args, **kwargs)\r\n\r\n-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html\r\n```\n", "before_files": [{"content": "\"\"\"\nTesting\n=======\n\nGeneral guidelines for writing good tests:\n\n- doctests always assume ``import networkx as nx`` so don't add that\n- prefer pytest fixtures over classes with setup methods.\n- use the ``@pytest.mark.parametrize`` decorator\n- use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy.\n and add the module to the relevant entries below.\n\n\"\"\"\nimport os\nimport sys\nimport warnings\nfrom importlib.metadata import entry_points\n\nimport pytest\n\nimport networkx\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n )\n parser.addoption(\n \"--backend\",\n action=\"store\",\n default=None,\n help=\"Run tests with a backend by auto-converting nx graphs to backend graphs\",\n )\n parser.addoption(\n \"--fallback-to-nx\",\n action=\"store_true\",\n default=False,\n help=\"Run nx function if a backend doesn't implement a dispatchable function\"\n \" (use with --backend)\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n backend = config.getoption(\"--backend\")\n if backend is None:\n backend = os.environ.get(\"NETWORKX_TEST_BACKEND\")\n # nx-loopback backend is only available when testing\n backends = entry_points(name=\"nx-loopback\", group=\"networkx.backends\")\n if backends:\n networkx.utils.backends.backends[\"nx-loopback\"] = next(iter(backends))\n else:\n warnings.warn(\n \"\\n\\n WARNING: Mixed NetworkX configuration! \\n\\n\"\n \" This environment has mixed configuration for networkx.\\n\"\n \" The test object nx-loopback is not configured correctly.\\n\"\n \" You should not be seeing this message.\\n\"\n \" Try `pip install -e .`, or change your PYTHONPATH\\n\"\n \" Make sure python finds the networkx repo you are testing\\n\\n\"\n )\n if backend:\n networkx.config[\"backend_priority\"] = [backend]\n fallback_to_nx = config.getoption(\"--fallback-to-nx\")\n if not fallback_to_nx:\n fallback_to_nx = os.environ.get(\"NETWORKX_FALLBACK_TO_NX\")\n networkx.utils.backends._dispatchable._fallback_to_nx = bool(fallback_to_nx)\n\n\ndef pytest_collection_modifyitems(config, items):\n # Setting this to True here allows tests to be set up before dispatching\n # any function call to a backend.\n networkx.utils.backends._dispatchable._is_testing = True\n if backend_priority := networkx.config[\"backend_priority\"]:\n # Allow pluggable backends to add markers to tests (such as skip or xfail)\n # when running in auto-conversion test mode\n backend = networkx.utils.backends.backends[backend_priority[0]].load()\n if hasattr(backend, \"on_start_tests\"):\n getattr(backend, \"on_start_tests\")(items)\n\n if config.getoption(\"--runslow\"):\n # --runslow given in cli: do not skip slow tests\n return\n skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n for item in items:\n if \"slow\" in item.keywords:\n item.add_marker(skip_slow)\n\n\n# TODO: The warnings below need to be dealt with, but for now we silence them.\[email protected](autouse=True)\ndef set_warnings():\n warnings.filterwarnings(\n \"ignore\",\n category=FutureWarning,\n message=\"\\n\\nsingle_target_shortest_path_length\",\n )\n warnings.filterwarnings(\n \"ignore\",\n category=FutureWarning,\n message=\"\\n\\nshortest_path\",\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\nforest_str is deprecated\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nrandom_tree\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"Edmonds has been deprecated\"\n )\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"MultiDiGraph_EdgeKey has been deprecated\",\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nThe `normalized`\"\n )\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"The function `join` is deprecated\",\n )\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"\\n\\nstrongly_connected_components_recursive\",\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nall_triplets\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nrandom_triad\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"minimal_d_separator\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"d_separated\"\n )\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning, message=\"\\n\\nk_core\")\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nk_shell\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nk_crust\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nk_corona\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\ntotal_spanning_tree_weight\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=r\"\\n\\nThe 'create=matrix'\"\n )\n\n\[email protected](autouse=True)\ndef add_nx(doctest_namespace):\n doctest_namespace[\"nx\"] = networkx\n\n\n# What dependencies are installed?\n\ntry:\n import numpy\n\n has_numpy = True\nexcept ImportError:\n has_numpy = False\n\ntry:\n import scipy\n\n has_scipy = True\nexcept ImportError:\n has_scipy = False\n\ntry:\n import matplotlib\n\n has_matplotlib = True\nexcept ImportError:\n has_matplotlib = False\n\ntry:\n import pandas\n\n has_pandas = True\nexcept ImportError:\n has_pandas = False\n\ntry:\n import pygraphviz\n\n has_pygraphviz = True\nexcept ImportError:\n has_pygraphviz = False\n\ntry:\n import pydot\n\n has_pydot = True\nexcept ImportError:\n has_pydot = False\n\ntry:\n import sympy\n\n has_sympy = True\nexcept ImportError:\n has_sympy = False\n\n\n# List of files that pytest should ignore\n\ncollect_ignore = []\n\nneeds_numpy = [\n \"algorithms/approximation/traveling_salesman.py\",\n \"algorithms/centrality/current_flow_closeness.py\",\n \"algorithms/node_classification.py\",\n \"algorithms/non_randomness.py\",\n \"algorithms/shortest_paths/dense.py\",\n \"algorithms/tree/mst.py\",\n \"generators/expanders.py\",\n \"linalg/bethehessianmatrix.py\",\n \"linalg/laplacianmatrix.py\",\n \"utils/misc.py\",\n \"algorithms/centrality/laplacian.py\",\n]\nneeds_scipy = [\n \"algorithms/approximation/traveling_salesman.py\",\n \"algorithms/assortativity/correlation.py\",\n \"algorithms/assortativity/mixing.py\",\n \"algorithms/assortativity/pairs.py\",\n \"algorithms/bipartite/matrix.py\",\n \"algorithms/bipartite/spectral.py\",\n \"algorithms/centrality/current_flow_betweenness.py\",\n \"algorithms/centrality/current_flow_betweenness_subset.py\",\n \"algorithms/centrality/eigenvector.py\",\n \"algorithms/centrality/katz.py\",\n \"algorithms/centrality/second_order.py\",\n \"algorithms/centrality/subgraph_alg.py\",\n \"algorithms/communicability_alg.py\",\n \"algorithms/link_analysis/hits_alg.py\",\n \"algorithms/link_analysis/pagerank_alg.py\",\n \"algorithms/node_classification.py\",\n \"algorithms/similarity.py\",\n \"convert_matrix.py\",\n \"drawing/layout.py\",\n \"generators/spectral_graph_forge.py\",\n \"generators/expanders.py\",\n \"linalg/algebraicconnectivity.py\",\n \"linalg/attrmatrix.py\",\n \"linalg/bethehessianmatrix.py\",\n \"linalg/graphmatrix.py\",\n \"linalg/modularitymatrix.py\",\n \"linalg/spectrum.py\",\n \"utils/rcm.py\",\n \"algorithms/centrality/laplacian.py\",\n]\nneeds_matplotlib = [\"drawing/nx_pylab.py\"]\nneeds_pandas = [\"convert_matrix.py\"]\nneeds_pygraphviz = [\"drawing/nx_agraph.py\"]\nneeds_pydot = [\"drawing/nx_pydot.py\"]\nneeds_sympy = [\"algorithms/polynomials.py\"]\n\nif not has_numpy:\n collect_ignore += needs_numpy\nif not has_scipy:\n collect_ignore += needs_scipy\nif not has_matplotlib:\n collect_ignore += needs_matplotlib\nif not has_pandas:\n collect_ignore += needs_pandas\nif not has_pygraphviz:\n collect_ignore += needs_pygraphviz\nif not has_pydot:\n collect_ignore += needs_pydot\nif not has_sympy:\n collect_ignore += needs_sympy\n", "path": "networkx/conftest.py"}], "after_files": [{"content": "\"\"\"\nTesting\n=======\n\nGeneral guidelines for writing good tests:\n\n- doctests always assume ``import networkx as nx`` so don't add that\n- prefer pytest fixtures over classes with setup methods.\n- use the ``@pytest.mark.parametrize`` decorator\n- use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy.\n and add the module to the relevant entries below.\n\n\"\"\"\nimport os\nimport sys\nimport warnings\nfrom importlib.metadata import entry_points\n\nimport pytest\n\nimport networkx\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--runslow\", action=\"store_true\", default=False, help=\"run slow tests\"\n )\n parser.addoption(\n \"--backend\",\n action=\"store\",\n default=None,\n help=\"Run tests with a backend by auto-converting nx graphs to backend graphs\",\n )\n parser.addoption(\n \"--fallback-to-nx\",\n action=\"store_true\",\n default=False,\n help=\"Run nx function if a backend doesn't implement a dispatchable function\"\n \" (use with --backend)\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"slow: mark test as slow to run\")\n backend = config.getoption(\"--backend\")\n if backend is None:\n backend = os.environ.get(\"NETWORKX_TEST_BACKEND\")\n # nx-loopback backend is only available when testing\n backends = entry_points(name=\"nx-loopback\", group=\"networkx.backends\")\n if backends:\n networkx.utils.backends.backends[\"nx-loopback\"] = next(iter(backends))\n else:\n warnings.warn(\n \"\\n\\n WARNING: Mixed NetworkX configuration! \\n\\n\"\n \" This environment has mixed configuration for networkx.\\n\"\n \" The test object nx-loopback is not configured correctly.\\n\"\n \" You should not be seeing this message.\\n\"\n \" Try `pip install -e .`, or change your PYTHONPATH\\n\"\n \" Make sure python finds the networkx repo you are testing\\n\\n\"\n )\n if backend:\n networkx.config[\"backend_priority\"] = [backend]\n fallback_to_nx = config.getoption(\"--fallback-to-nx\")\n if not fallback_to_nx:\n fallback_to_nx = os.environ.get(\"NETWORKX_FALLBACK_TO_NX\")\n networkx.utils.backends._dispatchable._fallback_to_nx = bool(fallback_to_nx)\n\n\ndef pytest_collection_modifyitems(config, items):\n # Setting this to True here allows tests to be set up before dispatching\n # any function call to a backend.\n networkx.utils.backends._dispatchable._is_testing = True\n if backend_priority := networkx.config[\"backend_priority\"]:\n # Allow pluggable backends to add markers to tests (such as skip or xfail)\n # when running in auto-conversion test mode\n backend = networkx.utils.backends.backends[backend_priority[0]].load()\n if hasattr(backend, \"on_start_tests\"):\n getattr(backend, \"on_start_tests\")(items)\n\n if config.getoption(\"--runslow\"):\n # --runslow given in cli: do not skip slow tests\n return\n skip_slow = pytest.mark.skip(reason=\"need --runslow option to run\")\n for item in items:\n if \"slow\" in item.keywords:\n item.add_marker(skip_slow)\n\n\n# TODO: The warnings below need to be dealt with, but for now we silence them.\[email protected](autouse=True)\ndef set_warnings():\n warnings.filterwarnings(\n \"ignore\",\n category=FutureWarning,\n message=\"\\n\\nsingle_target_shortest_path_length\",\n )\n warnings.filterwarnings(\n \"ignore\",\n category=FutureWarning,\n message=\"\\n\\nshortest_path\",\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\nforest_str is deprecated\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nrandom_tree\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"Edmonds has been deprecated\"\n )\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"MultiDiGraph_EdgeKey has been deprecated\",\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nThe `normalized`\"\n )\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"The function `join` is deprecated\",\n )\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"\\n\\nstrongly_connected_components_recursive\",\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nall_triplets\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nrandom_triad\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"minimal_d_separator\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"d_separated\"\n )\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning, message=\"\\n\\nk_core\")\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nk_shell\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nk_crust\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\nk_corona\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=\"\\n\\ntotal_spanning_tree_weight\"\n )\n warnings.filterwarnings(\n \"ignore\", category=DeprecationWarning, message=r\"\\n\\nThe 'create=matrix'\"\n )\n\n\[email protected](autouse=True)\ndef add_nx(doctest_namespace):\n doctest_namespace[\"nx\"] = networkx\n\n\n# What dependencies are installed?\n\ntry:\n import numpy\n\n has_numpy = True\nexcept ImportError:\n has_numpy = False\n\ntry:\n import scipy\n\n has_scipy = True\nexcept ImportError:\n has_scipy = False\n\ntry:\n import matplotlib\n\n has_matplotlib = True\nexcept ImportError:\n has_matplotlib = False\n\ntry:\n import pandas\n\n has_pandas = True\nexcept ImportError:\n has_pandas = False\n\ntry:\n import pygraphviz\n\n has_pygraphviz = True\nexcept ImportError:\n has_pygraphviz = False\n\ntry:\n import pydot\n\n has_pydot = True\nexcept ImportError:\n has_pydot = False\n\ntry:\n import sympy\n\n has_sympy = True\nexcept ImportError:\n has_sympy = False\n\n\n# List of files that pytest should ignore\n\ncollect_ignore = []\n\nneeds_numpy = [\n \"algorithms/approximation/traveling_salesman.py\",\n \"algorithms/centrality/current_flow_closeness.py\",\n \"algorithms/node_classification.py\",\n \"algorithms/non_randomness.py\",\n \"algorithms/shortest_paths/dense.py\",\n \"algorithms/tree/mst.py\",\n \"generators/expanders.py\",\n \"linalg/bethehessianmatrix.py\",\n \"linalg/laplacianmatrix.py\",\n \"utils/misc.py\",\n \"algorithms/centrality/laplacian.py\",\n]\nneeds_scipy = [\n \"algorithms/approximation/traveling_salesman.py\",\n \"algorithms/assortativity/correlation.py\",\n \"algorithms/assortativity/mixing.py\",\n \"algorithms/assortativity/pairs.py\",\n \"algorithms/bipartite/matrix.py\",\n \"algorithms/bipartite/spectral.py\",\n \"algorithms/centrality/current_flow_betweenness.py\",\n \"algorithms/centrality/current_flow_betweenness_subset.py\",\n \"algorithms/centrality/eigenvector.py\",\n \"algorithms/centrality/katz.py\",\n \"algorithms/centrality/laplacian.py\",\n \"algorithms/centrality/second_order.py\",\n \"algorithms/centrality/subgraph_alg.py\",\n \"algorithms/communicability_alg.py\",\n \"algorithms/community/divisive.py\",\n \"algorithms/distance_measures.py\",\n \"algorithms/link_analysis/hits_alg.py\",\n \"algorithms/link_analysis/pagerank_alg.py\",\n \"algorithms/node_classification.py\",\n \"algorithms/similarity.py\",\n \"algorithms/tree/mst.py\",\n \"algorithms/walks.py\",\n \"convert_matrix.py\",\n \"drawing/layout.py\",\n \"drawing/nx_pylab.py\",\n \"generators/spectral_graph_forge.py\",\n \"generators/expanders.py\",\n \"linalg/algebraicconnectivity.py\",\n \"linalg/attrmatrix.py\",\n \"linalg/bethehessianmatrix.py\",\n \"linalg/graphmatrix.py\",\n \"linalg/laplacianmatrix.py\",\n \"linalg/modularitymatrix.py\",\n \"linalg/spectrum.py\",\n \"utils/rcm.py\",\n]\nneeds_matplotlib = [\"drawing/nx_pylab.py\"]\nneeds_pandas = [\"convert_matrix.py\"]\nneeds_pygraphviz = [\"drawing/nx_agraph.py\"]\nneeds_pydot = [\"drawing/nx_pydot.py\"]\nneeds_sympy = [\"algorithms/polynomials.py\"]\n\nif not has_numpy:\n collect_ignore += needs_numpy\nif not has_scipy:\n collect_ignore += needs_scipy\nif not has_matplotlib:\n collect_ignore += needs_matplotlib\nif not has_pandas:\n collect_ignore += needs_pandas\nif not has_pygraphviz:\n collect_ignore += needs_pygraphviz\nif not has_pydot:\n collect_ignore += needs_pydot\nif not has_sympy:\n collect_ignore += needs_sympy\n", "path": "networkx/conftest.py"}]} | 3,607 | 382 |
gh_patches_debug_1885 | rasdani/github-patches | git_diff | ansible__ansible-17707 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lookup properties
<!--- Verify first that your issue/request is not already reported in GitHub -->
##### ISSUE TYPE
<!--- Pick one below and delete the rest: -->
- Bug Report
##### ANSIBLE VERSION
<!--- Paste verbatim output from “ansible --version” between quotes below -->
```
ansible 2.2.0 (devel 4e369a31db) last updated 2016/07/02 15:01:01 (GMT +400)
lib/ansible/modules/core: (detached HEAD 1d0d5db97a) last updated 2016/07/02 15:01:12 (GMT +400)
lib/ansible/modules/extras: (detached HEAD 00b8b96906) last updated 2016/07/02 15:01:12 (GMT +400)
config file = /etc/ansible/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
##### OS / ENVIRONMENT
"N/A"
##### SUMMARY
[commit](https://github.com/ansible/ansible/commit/4ba60d00c8d7e62912a37ec24f90f6e5d0770c4d)
this commit breaks `lookup` for `type=properties`
##### STEPS TO REPRODUCE
just try to read some properties through `lookup`
```
- name: lookup
vars:
property_value: "{{ lookup('ini', 'some.java.property type=properties file=config.properties') }}"
debug: msg="{{ property_value }}"
```
##### EXPECTED RESULTS
read correct value from property file
##### ACTUAL RESULTS
```
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py", line 512, in _read
raise MissingSectionHeaderError(fpname, lineno, line)
MissingSectionHeaderError: File contains no section headers.
file: /config.properties, line: 3
'environment=dev\n'
fatal: [localhost]: FAILED! => {"failed": true, "msg": "Unexpected failure during module execution.", "stdout": ""}
NO MORE HOSTS LEFT *************************************************************
to retry, use: --limit @test.retry
PLAY RECAP *********************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=1
```
By the way, it would be great to implement 'autofill' properties
```
host=127.0.0.1
api.host=${host}
```
cc @jctanner
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/plugins/lookup/ini.py`
Content:
```
1 # (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17 from __future__ import (absolute_import, division, print_function)
18 __metaclass__ = type
19
20 from io import StringIO
21 import os
22 import re
23
24 try:
25 # python2
26 import ConfigParser as configparser
27 except ImportError:
28 # python3
29 import configparser
30
31 from ansible.errors import AnsibleError
32 from ansible.plugins.lookup import LookupBase
33 from ansible.module_utils._text import to_bytes, to_text
34
35
36 def _parse_params(term):
37 '''Safely split parameter term to preserve spaces'''
38
39 keys = ['key', 'section', 'file', 're']
40 params = {}
41 for k in keys:
42 params[k] = ''
43
44 thiskey = 'key'
45 for idp,phrase in enumerate(term.split()):
46 for k in keys:
47 if ('%s=' % k) in phrase:
48 thiskey = k
49 if idp == 0 or not params[thiskey]:
50 params[thiskey] = phrase
51 else:
52 params[thiskey] += ' ' + phrase
53
54 rparams = [params[x] for x in keys if params[x]]
55 return rparams
56
57
58 class LookupModule(LookupBase):
59
60 def read_properties(self, filename, key, dflt, is_regexp):
61 config = StringIO()
62 current_cfg_file = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')
63
64 config.write(u'[java_properties]\n' + to_text(current_cfg_file.read(), errors='surrogate_or_strict'))
65 config.seek(0, os.SEEK_SET)
66 self.cp.readfp(config)
67 return self.get_value(key, 'java_properties', dflt, is_regexp)
68
69 def read_ini(self, filename, key, section, dflt, is_regexp):
70 self.cp.readfp(open(to_bytes(filename, errors='surrogate_or_strict')))
71 return self.get_value(key, section, dflt, is_regexp)
72
73 def get_value(self, key, section, dflt, is_regexp):
74 # Retrieve all values from a section using a regexp
75 if is_regexp:
76 return [v for k, v in self.cp.items(section) if re.match(key, k)]
77 value = None
78 # Retrieve a single value
79 try:
80 value = self.cp.get(section, key)
81 except configparser.NoOptionError:
82 return dflt
83 return value
84
85 def run(self, terms, variables=None, **kwargs):
86
87 basedir = self.get_basedir(variables)
88 self.basedir = basedir
89 self.cp = configparser.ConfigParser()
90
91 ret = []
92 for term in terms:
93 params = _parse_params(term)
94 key = params[0]
95
96 paramvals = {
97 'file' : 'ansible.ini',
98 're' : False,
99 'default' : None,
100 'section' : "global",
101 'type' : "ini",
102 }
103
104 # parameters specified?
105 try:
106 for param in params[1:]:
107 name, value = param.split('=')
108 assert(name in paramvals)
109 paramvals[name] = value
110 except (ValueError, AssertionError) as e:
111 raise AnsibleError(e)
112
113 path = self.find_file_in_search_path(variables, 'files', paramvals['file'])
114 if paramvals['type'] == "properties":
115 var = self.read_properties(path, key, paramvals['default'], paramvals['re'])
116 else:
117 var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])
118 if var is not None:
119 if type(var) is list:
120 for v in var:
121 ret.append(v)
122 else:
123 ret.append(var)
124 return ret
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py
--- a/lib/ansible/plugins/lookup/ini.py
+++ b/lib/ansible/plugins/lookup/ini.py
@@ -36,7 +36,7 @@
def _parse_params(term):
'''Safely split parameter term to preserve spaces'''
- keys = ['key', 'section', 'file', 're']
+ keys = ['key', 'type', 'section', 'file', 're']
params = {}
for k in keys:
params[k] = ''
| {"golden_diff": "diff --git a/lib/ansible/plugins/lookup/ini.py b/lib/ansible/plugins/lookup/ini.py\n--- a/lib/ansible/plugins/lookup/ini.py\n+++ b/lib/ansible/plugins/lookup/ini.py\n@@ -36,7 +36,7 @@\n def _parse_params(term):\n '''Safely split parameter term to preserve spaces'''\n \n- keys = ['key', 'section', 'file', 're']\n+ keys = ['key', 'type', 'section', 'file', 're']\n params = {}\n for k in keys:\n params[k] = ''\n", "issue": "lookup properties\n<!--- Verify first that your issue/request is not already reported in GitHub -->\n##### ISSUE TYPE\n\n<!--- Pick one below and delete the rest: -->\n- Bug Report\n##### ANSIBLE VERSION\n\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\n\n```\nansible 2.2.0 (devel 4e369a31db) last updated 2016/07/02 15:01:01 (GMT +400)\n lib/ansible/modules/core: (detached HEAD 1d0d5db97a) last updated 2016/07/02 15:01:12 (GMT +400)\n lib/ansible/modules/extras: (detached HEAD 00b8b96906) last updated 2016/07/02 15:01:12 (GMT +400)\n config file = /etc/ansible/ansible.cfg\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n##### OS / ENVIRONMENT\n\n\"N/A\"\n##### SUMMARY\n\n[commit](https://github.com/ansible/ansible/commit/4ba60d00c8d7e62912a37ec24f90f6e5d0770c4d)\nthis commit breaks `lookup` for `type=properties`\n##### STEPS TO REPRODUCE\n\njust try to read some properties through `lookup`\n\n```\n- name: lookup\n vars:\n property_value: \"{{ lookup('ini', 'some.java.property type=properties file=config.properties') }}\"\n debug: msg=\"{{ property_value }}\"\n```\n##### EXPECTED RESULTS\n\nread correct value from property file\n##### ACTUAL RESULTS\n\n```\n File \"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py\", line 512, in _read\n raise MissingSectionHeaderError(fpname, lineno, line)\nMissingSectionHeaderError: File contains no section headers.\nfile: /config.properties, line: 3\n'environment=dev\\n'\n\nfatal: [localhost]: FAILED! => {\"failed\": true, \"msg\": \"Unexpected failure during module execution.\", \"stdout\": \"\"}\n\nNO MORE HOSTS LEFT *************************************************************\n to retry, use: --limit @test.retry\n\nPLAY RECAP *********************************************************************\nlocalhost : ok=1 changed=0 unreachable=0 failed=1\n```\n\nBy the way, it would be great to implement 'autofill' properties\n\n```\nhost=127.0.0.1\napi.host=${host}\n```\n\ncc @jctanner \n\n", "before_files": [{"content": "# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom io import StringIO\nimport os\nimport re\n\ntry:\n # python2\n import ConfigParser as configparser\nexcept ImportError:\n # python3\n import configparser\n\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.lookup import LookupBase\nfrom ansible.module_utils._text import to_bytes, to_text\n\n\ndef _parse_params(term):\n '''Safely split parameter term to preserve spaces'''\n\n keys = ['key', 'section', 'file', 're']\n params = {}\n for k in keys:\n params[k] = ''\n\n thiskey = 'key'\n for idp,phrase in enumerate(term.split()):\n for k in keys:\n if ('%s=' % k) in phrase:\n thiskey = k\n if idp == 0 or not params[thiskey]:\n params[thiskey] = phrase\n else:\n params[thiskey] += ' ' + phrase\n\n rparams = [params[x] for x in keys if params[x]]\n return rparams\n\n\nclass LookupModule(LookupBase):\n\n def read_properties(self, filename, key, dflt, is_regexp):\n config = StringIO()\n current_cfg_file = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')\n\n config.write(u'[java_properties]\\n' + to_text(current_cfg_file.read(), errors='surrogate_or_strict'))\n config.seek(0, os.SEEK_SET)\n self.cp.readfp(config)\n return self.get_value(key, 'java_properties', dflt, is_regexp)\n\n def read_ini(self, filename, key, section, dflt, is_regexp):\n self.cp.readfp(open(to_bytes(filename, errors='surrogate_or_strict')))\n return self.get_value(key, section, dflt, is_regexp)\n\n def get_value(self, key, section, dflt, is_regexp):\n # Retrieve all values from a section using a regexp\n if is_regexp:\n return [v for k, v in self.cp.items(section) if re.match(key, k)]\n value = None\n # Retrieve a single value\n try:\n value = self.cp.get(section, key)\n except configparser.NoOptionError:\n return dflt\n return value\n\n def run(self, terms, variables=None, **kwargs):\n\n basedir = self.get_basedir(variables)\n self.basedir = basedir\n self.cp = configparser.ConfigParser()\n\n ret = []\n for term in terms:\n params = _parse_params(term)\n key = params[0]\n\n paramvals = {\n 'file' : 'ansible.ini',\n 're' : False,\n 'default' : None,\n 'section' : \"global\",\n 'type' : \"ini\",\n }\n\n # parameters specified?\n try:\n for param in params[1:]:\n name, value = param.split('=')\n assert(name in paramvals)\n paramvals[name] = value\n except (ValueError, AssertionError) as e:\n raise AnsibleError(e)\n\n path = self.find_file_in_search_path(variables, 'files', paramvals['file'])\n if paramvals['type'] == \"properties\":\n var = self.read_properties(path, key, paramvals['default'], paramvals['re'])\n else:\n var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])\n if var is not None:\n if type(var) is list:\n for v in var:\n ret.append(v)\n else:\n ret.append(var)\n return ret\n", "path": "lib/ansible/plugins/lookup/ini.py"}], "after_files": [{"content": "# (c) 2015, Yannig Perre <yannig.perre(at)gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom io import StringIO\nimport os\nimport re\n\ntry:\n # python2\n import ConfigParser as configparser\nexcept ImportError:\n # python3\n import configparser\n\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.lookup import LookupBase\nfrom ansible.module_utils._text import to_bytes, to_text\n\n\ndef _parse_params(term):\n '''Safely split parameter term to preserve spaces'''\n\n keys = ['key', 'type', 'section', 'file', 're']\n params = {}\n for k in keys:\n params[k] = ''\n\n thiskey = 'key'\n for idp,phrase in enumerate(term.split()):\n for k in keys:\n if ('%s=' % k) in phrase:\n thiskey = k\n if idp == 0 or not params[thiskey]:\n params[thiskey] = phrase\n else:\n params[thiskey] += ' ' + phrase\n\n rparams = [params[x] for x in keys if params[x]]\n return rparams\n\n\nclass LookupModule(LookupBase):\n\n def read_properties(self, filename, key, dflt, is_regexp):\n config = StringIO()\n current_cfg_file = open(to_bytes(filename, errors='surrogate_or_strict'), 'rb')\n\n config.write(u'[java_properties]\\n' + to_text(current_cfg_file.read(), errors='surrogate_or_strict'))\n config.seek(0, os.SEEK_SET)\n self.cp.readfp(config)\n return self.get_value(key, 'java_properties', dflt, is_regexp)\n\n def read_ini(self, filename, key, section, dflt, is_regexp):\n self.cp.readfp(open(to_bytes(filename, errors='surrogate_or_strict')))\n return self.get_value(key, section, dflt, is_regexp)\n\n def get_value(self, key, section, dflt, is_regexp):\n # Retrieve all values from a section using a regexp\n if is_regexp:\n return [v for k, v in self.cp.items(section) if re.match(key, k)]\n value = None\n # Retrieve a single value\n try:\n value = self.cp.get(section, key)\n except configparser.NoOptionError:\n return dflt\n return value\n\n def run(self, terms, variables=None, **kwargs):\n\n basedir = self.get_basedir(variables)\n self.basedir = basedir\n self.cp = configparser.ConfigParser()\n\n ret = []\n for term in terms:\n params = _parse_params(term)\n key = params[0]\n\n paramvals = {\n 'file' : 'ansible.ini',\n 're' : False,\n 'default' : None,\n 'section' : \"global\",\n 'type' : \"ini\",\n }\n\n # parameters specified?\n try:\n for param in params[1:]:\n name, value = param.split('=')\n assert(name in paramvals)\n paramvals[name] = value\n except (ValueError, AssertionError) as e:\n raise AnsibleError(e)\n\n path = self.find_file_in_search_path(variables, 'files', paramvals['file'])\n if paramvals['type'] == \"properties\":\n var = self.read_properties(path, key, paramvals['default'], paramvals['re'])\n else:\n var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re'])\n if var is not None:\n if type(var) is list:\n for v in var:\n ret.append(v)\n else:\n ret.append(var)\n return ret\n", "path": "lib/ansible/plugins/lookup/ini.py"}]} | 2,114 | 130 |
gh_patches_debug_25790 | rasdani/github-patches | git_diff | pre-commit__pre-commit-244 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unstaged check should not complain when running --all-files
```
$ pre-commit run --all-files
[ERROR] Your .pre-commit-config.yaml is unstaged.
`git add .pre-commit-config.yaml` to fix this.
Run pre-commit with --allow-unstaged-config to silence this.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/run.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import logging
5 import os
6 import sys
7
8 from pre_commit import color
9 from pre_commit import git
10 from pre_commit.logging_handler import LoggingHandler
11 from pre_commit.output import get_hook_message
12 from pre_commit.output import sys_stdout_write_wrapper
13 from pre_commit.staged_files_only import staged_files_only
14 from pre_commit.util import cmd_output
15 from pre_commit.util import noop_context
16
17
18 logger = logging.getLogger('pre_commit')
19
20
21 def _get_skips(environ):
22 skips = environ.get('SKIP', '')
23 return set(skip.strip() for skip in skips.split(',') if skip.strip())
24
25
26 def _hook_msg_start(hook, verbose):
27 return '{0}{1}'.format(
28 '[{0}] '.format(hook['id']) if verbose else '',
29 hook['name'],
30 )
31
32
33 def _print_no_files_skipped(hook, write, args):
34 write(get_hook_message(
35 _hook_msg_start(hook, args.verbose),
36 postfix='(no files to check) ',
37 end_msg='Skipped',
38 end_color=color.TURQUOISE,
39 use_color=args.color,
40 ))
41
42
43 def _print_user_skipped(hook, write, args):
44 write(get_hook_message(
45 _hook_msg_start(hook, args.verbose),
46 end_msg='Skipped',
47 end_color=color.YELLOW,
48 use_color=args.color,
49 ))
50
51
52 def get_changed_files(new, old):
53 return cmd_output(
54 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),
55 )[1].splitlines()
56
57
58 def get_filenames(args, include_expr, exclude_expr):
59 if args.origin and args.source:
60 getter = git.get_files_matching(
61 lambda: get_changed_files(args.origin, args.source),
62 )
63 elif args.files:
64 getter = git.get_files_matching(lambda: args.files)
65 elif args.all_files:
66 getter = git.get_all_files_matching
67 elif git.is_in_merge_conflict():
68 getter = git.get_conflicted_files_matching
69 else:
70 getter = git.get_staged_files_matching
71 return getter(include_expr, exclude_expr)
72
73
74 def _run_single_hook(hook, repo, args, write, skips=frozenset()):
75 filenames = get_filenames(args, hook['files'], hook['exclude'])
76 if hook['id'] in skips:
77 _print_user_skipped(hook, write, args)
78 return 0
79 elif not filenames:
80 _print_no_files_skipped(hook, write, args)
81 return 0
82
83 # Print the hook and the dots first in case the hook takes hella long to
84 # run.
85 write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))
86 sys.stdout.flush()
87
88 retcode, stdout, stderr = repo.run_hook(hook, filenames)
89
90 if retcode != hook['expected_return_value']:
91 retcode = 1
92 print_color = color.RED
93 pass_fail = 'Failed'
94 else:
95 retcode = 0
96 print_color = color.GREEN
97 pass_fail = 'Passed'
98
99 write(color.format_color(pass_fail, print_color, args.color) + '\n')
100
101 if (stdout or stderr) and (retcode or args.verbose):
102 write('hookid: {0}\n'.format(hook['id']))
103 write('\n')
104 for output in (stdout, stderr):
105 if output.strip():
106 write(output.strip() + '\n')
107 write('\n')
108
109 return retcode
110
111
112 def _run_hooks(repo_hooks, args, write, environ):
113 """Actually run the hooks."""
114 skips = _get_skips(environ)
115 retval = 0
116 for repo, hook in repo_hooks:
117 retval |= _run_single_hook(hook, repo, args, write, skips)
118 return retval
119
120
121 def get_repo_hooks(runner):
122 for repo in runner.repositories:
123 for _, hook in repo.hooks:
124 yield (repo, hook)
125
126
127 def _has_unmerged_paths(runner):
128 _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])
129 return bool(stdout.strip())
130
131
132 def _has_unstaged_config(runner):
133 retcode, _, _ = runner.cmd_runner.run(
134 ('git', 'diff', '--exit-code', runner.config_file_path),
135 retcode=None,
136 )
137 # be explicit, other git errors don't mean it has an unstaged config.
138 return retcode == 1
139
140
141 def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):
142 # Set up our logging handler
143 logger.addHandler(LoggingHandler(args.color, write=write))
144 logger.setLevel(logging.INFO)
145
146 # Check if we have unresolved merge conflict files and fail fast.
147 if _has_unmerged_paths(runner):
148 logger.error('Unmerged files. Resolve before committing.')
149 return 1
150 if bool(args.source) != bool(args.origin):
151 logger.error('Specify both --origin and --source.')
152 return 1
153 if _has_unstaged_config(runner) and not args.no_stash:
154 if args.allow_unstaged_config:
155 logger.warn(
156 'You have an unstaged config file and have specified the '
157 '--allow-unstaged-config option.\n'
158 'Note that your config will be stashed before the config is '
159 'parsed unless --no-stash is specified.',
160 )
161 else:
162 logger.error(
163 'Your .pre-commit-config.yaml is unstaged.\n'
164 '`git add .pre-commit-config.yaml` to fix this.\n'
165 'Run pre-commit with --allow-unstaged-config to silence this.'
166 )
167 return 1
168
169 # Don't stash if specified or files are specified
170 if args.no_stash or args.all_files or args.files:
171 ctx = noop_context()
172 else:
173 ctx = staged_files_only(runner.cmd_runner)
174
175 with ctx:
176 repo_hooks = list(get_repo_hooks(runner))
177 if args.hook:
178 repo_hooks = [
179 (repo, hook) for repo, hook in repo_hooks
180 if hook['id'] == args.hook
181 ]
182 if not repo_hooks:
183 write('No hook with id `{0}`\n'.format(args.hook))
184 return 1
185 return _run_hooks(repo_hooks, args, write, environ)
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -139,6 +139,7 @@
def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):
+ no_stash = args.no_stash or args.all_files or bool(args.files)
# Set up our logging handler
logger.addHandler(LoggingHandler(args.color, write=write))
logger.setLevel(logging.INFO)
@@ -150,7 +151,7 @@
if bool(args.source) != bool(args.origin):
logger.error('Specify both --origin and --source.')
return 1
- if _has_unstaged_config(runner) and not args.no_stash:
+ if _has_unstaged_config(runner) and not no_stash:
if args.allow_unstaged_config:
logger.warn(
'You have an unstaged config file and have specified the '
@@ -166,8 +167,7 @@
)
return 1
- # Don't stash if specified or files are specified
- if args.no_stash or args.all_files or args.files:
+ if no_stash:
ctx = noop_context()
else:
ctx = staged_files_only(runner.cmd_runner)
| {"golden_diff": "diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py\n--- a/pre_commit/commands/run.py\n+++ b/pre_commit/commands/run.py\n@@ -139,6 +139,7 @@\n \n \n def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):\n+ no_stash = args.no_stash or args.all_files or bool(args.files)\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n@@ -150,7 +151,7 @@\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n- if _has_unstaged_config(runner) and not args.no_stash:\n+ if _has_unstaged_config(runner) and not no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n@@ -166,8 +167,7 @@\n )\n return 1\n \n- # Don't stash if specified or files are specified\n- if args.no_stash or args.all_files or args.files:\n+ if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n", "issue": "Unstaged check should not complain when running --all-files\n```\n$ pre-commit run --all-files\n[ERROR] Your .pre-commit-config.yaml is unstaged.\n`git add .pre-commit-config.yaml` to fix this.\nRun pre-commit with --allow-unstaged-config to silence this.\n```\n\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.output import sys_stdout_write_wrapper\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return set(skip.strip() for skip in skips.split(',') if skip.strip())\n\n\ndef _hook_msg_start(hook, verbose):\n return '{0}{1}'.format(\n '[{0}] '.format(hook['id']) if verbose else '',\n hook['name'],\n )\n\n\ndef _print_no_files_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix='(no files to check) ',\n end_msg='Skipped',\n end_color=color.TURQUOISE,\n use_color=args.color,\n ))\n\n\ndef _print_user_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg='Skipped',\n end_color=color.YELLOW,\n use_color=args.color,\n ))\n\n\ndef get_changed_files(new, old):\n return cmd_output(\n 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),\n )[1].splitlines()\n\n\ndef get_filenames(args, include_expr, exclude_expr):\n if args.origin and args.source:\n getter = git.get_files_matching(\n lambda: get_changed_files(args.origin, args.source),\n )\n elif args.files:\n getter = git.get_files_matching(lambda: args.files)\n elif args.all_files:\n getter = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n getter = git.get_conflicted_files_matching\n else:\n getter = git.get_staged_files_matching\n return getter(include_expr, exclude_expr)\n\n\ndef _run_single_hook(hook, repo, args, write, skips=frozenset()):\n filenames = get_filenames(args, hook['files'], hook['exclude'])\n if hook['id'] in skips:\n _print_user_skipped(hook, write, args)\n return 0\n elif not filenames:\n _print_no_files_skipped(hook, write, args)\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))\n sys.stdout.flush()\n\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n\n if retcode != hook['expected_return_value']:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('hookid: {0}\\n'.format(hook['id']))\n write('\\n')\n for output in (stdout, stderr):\n if output.strip():\n write(output.strip() + '\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(repo_hooks, args, write, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(hook, repo, args, write, skips)\n return retval\n\n\ndef get_repo_hooks(runner):\n for repo in runner.repositories:\n for _, hook in repo.hooks:\n yield (repo, hook)\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = runner.cmd_runner.run(\n ('git', 'diff', '--exit-code', runner.config_file_path),\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not args.no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n '--allow-unstaged-config option.\\n'\n 'Note that your config will be stashed before the config is '\n 'parsed unless --no-stash is specified.',\n )\n else:\n logger.error(\n 'Your .pre-commit-config.yaml is unstaged.\\n'\n '`git add .pre-commit-config.yaml` to fix this.\\n'\n 'Run pre-commit with --allow-unstaged-config to silence this.'\n )\n return 1\n\n # Don't stash if specified or files are specified\n if args.no_stash or args.all_files or args.files:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n repo_hooks = list(get_repo_hooks(runner))\n if args.hook:\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if hook['id'] == args.hook\n ]\n if not repo_hooks:\n write('No hook with id `{0}`\\n'.format(args.hook))\n return 1\n return _run_hooks(repo_hooks, args, write, environ)\n", "path": "pre_commit/commands/run.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.output import sys_stdout_write_wrapper\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return set(skip.strip() for skip in skips.split(',') if skip.strip())\n\n\ndef _hook_msg_start(hook, verbose):\n return '{0}{1}'.format(\n '[{0}] '.format(hook['id']) if verbose else '',\n hook['name'],\n )\n\n\ndef _print_no_files_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix='(no files to check) ',\n end_msg='Skipped',\n end_color=color.TURQUOISE,\n use_color=args.color,\n ))\n\n\ndef _print_user_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg='Skipped',\n end_color=color.YELLOW,\n use_color=args.color,\n ))\n\n\ndef get_changed_files(new, old):\n return cmd_output(\n 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),\n )[1].splitlines()\n\n\ndef get_filenames(args, include_expr, exclude_expr):\n if args.origin and args.source:\n getter = git.get_files_matching(\n lambda: get_changed_files(args.origin, args.source),\n )\n elif args.files:\n getter = git.get_files_matching(lambda: args.files)\n elif args.all_files:\n getter = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n getter = git.get_conflicted_files_matching\n else:\n getter = git.get_staged_files_matching\n return getter(include_expr, exclude_expr)\n\n\ndef _run_single_hook(hook, repo, args, write, skips=frozenset()):\n filenames = get_filenames(args, hook['files'], hook['exclude'])\n if hook['id'] in skips:\n _print_user_skipped(hook, write, args)\n return 0\n elif not filenames:\n _print_no_files_skipped(hook, write, args)\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))\n sys.stdout.flush()\n\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n\n if retcode != hook['expected_return_value']:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('hookid: {0}\\n'.format(hook['id']))\n write('\\n')\n for output in (stdout, stderr):\n if output.strip():\n write(output.strip() + '\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(repo_hooks, args, write, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(hook, repo, args, write, skips)\n return retval\n\n\ndef get_repo_hooks(runner):\n for repo in runner.repositories:\n for _, hook in repo.hooks:\n yield (repo, hook)\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = runner.cmd_runner.run(\n ('git', 'diff', '--exit-code', runner.config_file_path),\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):\n no_stash = args.no_stash or args.all_files or bool(args.files)\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n '--allow-unstaged-config option.\\n'\n 'Note that your config will be stashed before the config is '\n 'parsed unless --no-stash is specified.',\n )\n else:\n logger.error(\n 'Your .pre-commit-config.yaml is unstaged.\\n'\n '`git add .pre-commit-config.yaml` to fix this.\\n'\n 'Run pre-commit with --allow-unstaged-config to silence this.'\n )\n return 1\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n repo_hooks = list(get_repo_hooks(runner))\n if args.hook:\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if hook['id'] == args.hook\n ]\n if not repo_hooks:\n write('No hook with id `{0}`\\n'.format(args.hook))\n return 1\n return _run_hooks(repo_hooks, args, write, environ)\n", "path": "pre_commit/commands/run.py"}]} | 2,160 | 296 |
gh_patches_debug_3949 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-3999 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
String interpolation with no params in utils.db
https://github.com/RedHatQE/cfme_tests/blob/master/utils/db.py#L297
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/db.py`
Content:
```
1 from collections import Mapping
2 from contextlib import contextmanager
3 from itertools import izip
4
5 from cached_property import cached_property
6 from sqlalchemy import MetaData, create_engine, event, inspect
7 from sqlalchemy.exc import ArgumentError, DisconnectionError, InvalidRequestError
8 from sqlalchemy.ext.declarative import declarative_base
9 from sqlalchemy.orm import sessionmaker
10 from sqlalchemy.pool import Pool
11
12 from fixtures.pytest_store import store
13 from utils import conf, ports, version
14 from utils.log import logger
15
16
17 @event.listens_for(Pool, "checkout")
18 def ping_connection(dbapi_connection, connection_record, connection_proxy):
19 """ping_connection event hook, used to reconnect db sessions that time out
20
21 Note:
22
23 See also: :ref:`Connection Invalidation <sqlalchemy:pool_connection_invalidation>`
24
25 """
26 cursor = dbapi_connection.cursor()
27 try:
28 cursor.execute("SELECT 1")
29 except StandardError:
30 raise DisconnectionError
31 cursor.close()
32
33
34 def scl_name():
35 # postgres's version is in the service name and file paths when we pull it from SCL,
36 # so this is a little resolver to help keep the version picking centralized
37 return version.pick({
38 version.LOWEST: 'postgresql92',
39 '5.5': 'rh-postgresql94',
40 '5.7': 'rh-postgresql95'
41 })
42
43
44 class Db(Mapping):
45 """Helper class for interacting with a CFME database using SQLAlchemy
46
47 Args:
48 hostname: base url to be used (default is from current_appliance)
49 credentials: name of credentials to use from :py:attr:`utils.conf.credentials`
50 (default ``database``)
51
52 Provides convient attributes to common sqlalchemy objects related to this DB,
53 as well as a Mapping interface to access and reflect database tables. Where possible,
54 attributes are cached.
55
56 Db objects support getting tables by name via the mapping interface::
57
58 table = db['table_name']
59
60 Usage:
61
62 # Usually used to query the DB for info, here's a common query
63 for vm in db.session.query(db['vms']).all():
64 print(vm.name)
65 print(vm.guid)
66
67 # List comprehension to get all templates
68 [(vm.name, vm.guid) for vm in session.query(db['vms']).all() if vm.template is True]
69
70 # Use the transaction manager for write operations:
71 with db.transaction:
72 db.session.query(db['vms']).all().delete()
73
74 Note:
75
76 Creating a table object requires a call to the database so that SQLAlchemy can do
77 reflection to determine the table's structure (columns, keys, indices, etc). On
78 a latent connection, this can be extremely slow, which will affect methods that return
79 tables, like the mapping interface or :py:meth:`values`.
80
81 """
82 def __init__(self, hostname=None, credentials=None):
83 self._table_cache = {}
84 if hostname is None:
85 self.hostname = store.current_appliance.db_address
86 else:
87 self.hostname = hostname
88
89 self.credentials = credentials or conf.credentials['database']
90
91 def __getitem__(self, table_name):
92 """Access tables as items contained in this db
93
94 Usage:
95
96 # To get a table called 'table_name':
97 db['table_name']
98
99 This may return ``None`` in the case where a table is found but reflection fails.
100
101 """
102 try:
103 return self._table(table_name)
104 except InvalidRequestError:
105 raise KeyError('Table {} could not be found'.format(table_name))
106
107 def __iter__(self):
108 """Iterator of table names in this db"""
109 return self.keys()
110
111 def __len__(self):
112 """Number of tables in this db"""
113 return len(self.table_names)
114
115 def __contains__(self, table_name):
116 """Whether or not the named table is in this db"""
117 return table_name in self.table_names
118
119 def keys(self):
120 """Iterator of table names in this db"""
121 return (table_name for table_name in self.table_names)
122
123 def items(self):
124 """Iterator of ``(table_name, table)`` pairs"""
125 return izip(self.keys(), self.values())
126
127 def values(self):
128 """Iterator of tables in this db"""
129 return (self[table_name] for table_name in self.table_names)
130
131 def get(self, table_name, default=None):
132 """table getter
133
134 Args:
135 table_name: Name of the table to get
136 default: Default value to return if ``table_name`` is not found.
137
138 Returns: a table if ``table_name`` exists, otherwise 'None' or the passed-in default
139
140 """
141 try:
142 return self[table_name]
143 except KeyError:
144 return default
145
146 def copy(self):
147 """Copy this database instance, keeping the same credentials and hostname"""
148 return type(self)(self.hostname, self.credentials)
149
150 def __eq__(self, other):
151 """Check if this db is equal to another db"""
152 try:
153 return self.hostname == other.hostname
154 except:
155 return False
156
157 def __ne__(self, other):
158 """Check if this db is not equal to another db"""
159 return not self == other
160
161 @cached_property
162 def engine(self):
163 """The :py:class:`Engine <sqlalchemy:sqlalchemy.engine.Engine>` for this database
164
165 It uses pessimistic disconnection handling, checking that the database is still
166 connected before executing commands.
167
168 """
169 return create_engine(self.db_url, echo_pool=True)
170
171 @cached_property
172 def sessionmaker(self):
173 """A :py:class:`sessionmaker <sqlalchemy:sqlalchemy.orm.session.sessionmaker>`
174
175 Used to make new sessions with this database, as needed.
176
177 """
178 return sessionmaker(bind=self.engine)
179
180 @cached_property
181 def table_base(self):
182 """Base class for all tables returned by this database
183
184 This base class is created using
185 :py:class:`declarative_base <sqlalchemy:sqlalchemy.ext.declarative.declarative_base>`.
186 """
187 return declarative_base(metadata=self.metadata)
188
189 @cached_property
190 def metadata(self):
191 """:py:class:`MetaData <sqlalchemy:sqlalchemy.schema.MetaData>` for this database
192
193 This can be used for introspection of reflected items.
194
195 Note:
196
197 Tables that haven't been reflected won't show up in metadata. To reflect a table,
198 use :py:meth:`reflect_table`.
199
200 """
201 return MetaData(bind=self.engine)
202
203 @cached_property
204 def db_url(self):
205 """The connection URL for this database, including credentials"""
206 template = "postgresql://{username}:{password}@{host}:{port}/vmdb_production"
207 result = template.format(host=self.hostname, port=ports.DB, **self.credentials)
208 logger.info("[DB] db_url is %s", result)
209 return result
210
211 @cached_property
212 def table_names(self):
213 """A sorted list of table names available in this database."""
214 # rails table names follow similar rules as pep8 identifiers; expose them as such
215 return sorted(inspect(self.engine).get_table_names())
216
217 @cached_property
218 def session(self):
219 """Returns a :py:class:`Session <sqlalchemy:sqlalchemy.orm.session.Session>`
220
221 This is used for database queries. For writing to the database, start a
222 :py:meth:`transaction`.
223
224 Note:
225
226 This attribute is cached. In cases where a new session needs to be explicitly created,
227 use :py:meth:`sessionmaker`.
228
229 """
230 return self.sessionmaker(autocommit=True)
231
232 @property
233 @contextmanager
234 def transaction(self):
235 """Context manager for simple transaction management
236
237 Sessions understand the concept of transactions, and provider context managers to
238 handle conditionally committing or rolling back transactions as needed.
239
240 Note:
241
242 Sessions automatically commit transactions by default. For predictable results when
243 writing to the database, use the transaction manager.
244
245 Usage:
246
247 with db.transaction:
248 db.session.do_something()
249
250 """
251 with self.session.begin():
252 yield
253
254 def reflect_table(self, table_name):
255 """Populate :py:attr:`metadata` with information on a table
256
257 Args:
258 table_name: The name of a table to reflect
259
260 """
261 self.metadata.reflect(only=[table_name])
262
263 def _table(self, table_name):
264 """Retrieves, reflects, and caches table objects
265
266 Actual implementation of __getitem__
267 """
268 try:
269 return self._table_cache[table_name]
270 except KeyError:
271 self.reflect_table(table_name)
272 table = self.metadata.tables[table_name]
273 table_dict = {
274 '__table__': table,
275 '__tablename__': table_name
276 }
277
278 try:
279 table_cls = type(str(table_name), (self.table_base,), table_dict)
280 self._table_cache[table_name] = table_cls
281 return table_cls
282 except ArgumentError:
283 # This usually happens on join tables with no PKs
284 logger.info('Unable to create table class for table "%s"')
285 return None
286
287
288 @contextmanager
289 def database_on_server(hostname, **kwargs):
290 db_obj = Db(hostname=hostname, **kwargs)
291 yield db_obj
292
293
294 #: :py:class:`Db` instance configured with default settings from conf yamls
295 def cfmedb():
296 return store.current_appliance.db
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/db.py b/utils/db.py
--- a/utils/db.py
+++ b/utils/db.py
@@ -281,7 +281,7 @@
return table_cls
except ArgumentError:
# This usually happens on join tables with no PKs
- logger.info('Unable to create table class for table "%s"')
+ logger.info('Unable to create table class for table "{}"'.format(table_name))
return None
| {"golden_diff": "diff --git a/utils/db.py b/utils/db.py\n--- a/utils/db.py\n+++ b/utils/db.py\n@@ -281,7 +281,7 @@\n return table_cls\n except ArgumentError:\n # This usually happens on join tables with no PKs\n- logger.info('Unable to create table class for table \"%s\"')\n+ logger.info('Unable to create table class for table \"{}\"'.format(table_name))\n return None\n", "issue": "String interpolation with no params in utils.db\nhttps://github.com/RedHatQE/cfme_tests/blob/master/utils/db.py#L297\n\n", "before_files": [{"content": "from collections import Mapping\nfrom contextlib import contextmanager\nfrom itertools import izip\n\nfrom cached_property import cached_property\nfrom sqlalchemy import MetaData, create_engine, event, inspect\nfrom sqlalchemy.exc import ArgumentError, DisconnectionError, InvalidRequestError\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.pool import Pool\n\nfrom fixtures.pytest_store import store\nfrom utils import conf, ports, version\nfrom utils.log import logger\n\n\[email protected]_for(Pool, \"checkout\")\ndef ping_connection(dbapi_connection, connection_record, connection_proxy):\n \"\"\"ping_connection event hook, used to reconnect db sessions that time out\n\n Note:\n\n See also: :ref:`Connection Invalidation <sqlalchemy:pool_connection_invalidation>`\n\n \"\"\"\n cursor = dbapi_connection.cursor()\n try:\n cursor.execute(\"SELECT 1\")\n except StandardError:\n raise DisconnectionError\n cursor.close()\n\n\ndef scl_name():\n # postgres's version is in the service name and file paths when we pull it from SCL,\n # so this is a little resolver to help keep the version picking centralized\n return version.pick({\n version.LOWEST: 'postgresql92',\n '5.5': 'rh-postgresql94',\n '5.7': 'rh-postgresql95'\n })\n\n\nclass Db(Mapping):\n \"\"\"Helper class for interacting with a CFME database using SQLAlchemy\n\n Args:\n hostname: base url to be used (default is from current_appliance)\n credentials: name of credentials to use from :py:attr:`utils.conf.credentials`\n (default ``database``)\n\n Provides convient attributes to common sqlalchemy objects related to this DB,\n as well as a Mapping interface to access and reflect database tables. Where possible,\n attributes are cached.\n\n Db objects support getting tables by name via the mapping interface::\n\n table = db['table_name']\n\n Usage:\n\n # Usually used to query the DB for info, here's a common query\n for vm in db.session.query(db['vms']).all():\n print(vm.name)\n print(vm.guid)\n\n # List comprehension to get all templates\n [(vm.name, vm.guid) for vm in session.query(db['vms']).all() if vm.template is True]\n\n # Use the transaction manager for write operations:\n with db.transaction:\n db.session.query(db['vms']).all().delete()\n\n Note:\n\n Creating a table object requires a call to the database so that SQLAlchemy can do\n reflection to determine the table's structure (columns, keys, indices, etc). On\n a latent connection, this can be extremely slow, which will affect methods that return\n tables, like the mapping interface or :py:meth:`values`.\n\n \"\"\"\n def __init__(self, hostname=None, credentials=None):\n self._table_cache = {}\n if hostname is None:\n self.hostname = store.current_appliance.db_address\n else:\n self.hostname = hostname\n\n self.credentials = credentials or conf.credentials['database']\n\n def __getitem__(self, table_name):\n \"\"\"Access tables as items contained in this db\n\n Usage:\n\n # To get a table called 'table_name':\n db['table_name']\n\n This may return ``None`` in the case where a table is found but reflection fails.\n\n \"\"\"\n try:\n return self._table(table_name)\n except InvalidRequestError:\n raise KeyError('Table {} could not be found'.format(table_name))\n\n def __iter__(self):\n \"\"\"Iterator of table names in this db\"\"\"\n return self.keys()\n\n def __len__(self):\n \"\"\"Number of tables in this db\"\"\"\n return len(self.table_names)\n\n def __contains__(self, table_name):\n \"\"\"Whether or not the named table is in this db\"\"\"\n return table_name in self.table_names\n\n def keys(self):\n \"\"\"Iterator of table names in this db\"\"\"\n return (table_name for table_name in self.table_names)\n\n def items(self):\n \"\"\"Iterator of ``(table_name, table)`` pairs\"\"\"\n return izip(self.keys(), self.values())\n\n def values(self):\n \"\"\"Iterator of tables in this db\"\"\"\n return (self[table_name] for table_name in self.table_names)\n\n def get(self, table_name, default=None):\n \"\"\"table getter\n\n Args:\n table_name: Name of the table to get\n default: Default value to return if ``table_name`` is not found.\n\n Returns: a table if ``table_name`` exists, otherwise 'None' or the passed-in default\n\n \"\"\"\n try:\n return self[table_name]\n except KeyError:\n return default\n\n def copy(self):\n \"\"\"Copy this database instance, keeping the same credentials and hostname\"\"\"\n return type(self)(self.hostname, self.credentials)\n\n def __eq__(self, other):\n \"\"\"Check if this db is equal to another db\"\"\"\n try:\n return self.hostname == other.hostname\n except:\n return False\n\n def __ne__(self, other):\n \"\"\"Check if this db is not equal to another db\"\"\"\n return not self == other\n\n @cached_property\n def engine(self):\n \"\"\"The :py:class:`Engine <sqlalchemy:sqlalchemy.engine.Engine>` for this database\n\n It uses pessimistic disconnection handling, checking that the database is still\n connected before executing commands.\n\n \"\"\"\n return create_engine(self.db_url, echo_pool=True)\n\n @cached_property\n def sessionmaker(self):\n \"\"\"A :py:class:`sessionmaker <sqlalchemy:sqlalchemy.orm.session.sessionmaker>`\n\n Used to make new sessions with this database, as needed.\n\n \"\"\"\n return sessionmaker(bind=self.engine)\n\n @cached_property\n def table_base(self):\n \"\"\"Base class for all tables returned by this database\n\n This base class is created using\n :py:class:`declarative_base <sqlalchemy:sqlalchemy.ext.declarative.declarative_base>`.\n \"\"\"\n return declarative_base(metadata=self.metadata)\n\n @cached_property\n def metadata(self):\n \"\"\":py:class:`MetaData <sqlalchemy:sqlalchemy.schema.MetaData>` for this database\n\n This can be used for introspection of reflected items.\n\n Note:\n\n Tables that haven't been reflected won't show up in metadata. To reflect a table,\n use :py:meth:`reflect_table`.\n\n \"\"\"\n return MetaData(bind=self.engine)\n\n @cached_property\n def db_url(self):\n \"\"\"The connection URL for this database, including credentials\"\"\"\n template = \"postgresql://{username}:{password}@{host}:{port}/vmdb_production\"\n result = template.format(host=self.hostname, port=ports.DB, **self.credentials)\n logger.info(\"[DB] db_url is %s\", result)\n return result\n\n @cached_property\n def table_names(self):\n \"\"\"A sorted list of table names available in this database.\"\"\"\n # rails table names follow similar rules as pep8 identifiers; expose them as such\n return sorted(inspect(self.engine).get_table_names())\n\n @cached_property\n def session(self):\n \"\"\"Returns a :py:class:`Session <sqlalchemy:sqlalchemy.orm.session.Session>`\n\n This is used for database queries. For writing to the database, start a\n :py:meth:`transaction`.\n\n Note:\n\n This attribute is cached. In cases where a new session needs to be explicitly created,\n use :py:meth:`sessionmaker`.\n\n \"\"\"\n return self.sessionmaker(autocommit=True)\n\n @property\n @contextmanager\n def transaction(self):\n \"\"\"Context manager for simple transaction management\n\n Sessions understand the concept of transactions, and provider context managers to\n handle conditionally committing or rolling back transactions as needed.\n\n Note:\n\n Sessions automatically commit transactions by default. For predictable results when\n writing to the database, use the transaction manager.\n\n Usage:\n\n with db.transaction:\n db.session.do_something()\n\n \"\"\"\n with self.session.begin():\n yield\n\n def reflect_table(self, table_name):\n \"\"\"Populate :py:attr:`metadata` with information on a table\n\n Args:\n table_name: The name of a table to reflect\n\n \"\"\"\n self.metadata.reflect(only=[table_name])\n\n def _table(self, table_name):\n \"\"\"Retrieves, reflects, and caches table objects\n\n Actual implementation of __getitem__\n \"\"\"\n try:\n return self._table_cache[table_name]\n except KeyError:\n self.reflect_table(table_name)\n table = self.metadata.tables[table_name]\n table_dict = {\n '__table__': table,\n '__tablename__': table_name\n }\n\n try:\n table_cls = type(str(table_name), (self.table_base,), table_dict)\n self._table_cache[table_name] = table_cls\n return table_cls\n except ArgumentError:\n # This usually happens on join tables with no PKs\n logger.info('Unable to create table class for table \"%s\"')\n return None\n\n\n@contextmanager\ndef database_on_server(hostname, **kwargs):\n db_obj = Db(hostname=hostname, **kwargs)\n yield db_obj\n\n\n#: :py:class:`Db` instance configured with default settings from conf yamls\ndef cfmedb():\n return store.current_appliance.db\n", "path": "utils/db.py"}], "after_files": [{"content": "from collections import Mapping\nfrom contextlib import contextmanager\nfrom itertools import izip\n\nfrom cached_property import cached_property\nfrom sqlalchemy import MetaData, create_engine, event, inspect\nfrom sqlalchemy.exc import ArgumentError, DisconnectionError, InvalidRequestError\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.pool import Pool\n\nfrom fixtures.pytest_store import store\nfrom utils import conf, ports, version\nfrom utils.log import logger\n\n\[email protected]_for(Pool, \"checkout\")\ndef ping_connection(dbapi_connection, connection_record, connection_proxy):\n \"\"\"ping_connection event hook, used to reconnect db sessions that time out\n\n Note:\n\n See also: :ref:`Connection Invalidation <sqlalchemy:pool_connection_invalidation>`\n\n \"\"\"\n cursor = dbapi_connection.cursor()\n try:\n cursor.execute(\"SELECT 1\")\n except StandardError:\n raise DisconnectionError\n cursor.close()\n\n\ndef scl_name():\n # postgres's version is in the service name and file paths when we pull it from SCL,\n # so this is a little resolver to help keep the version picking centralized\n return version.pick({\n version.LOWEST: 'postgresql92',\n '5.5': 'rh-postgresql94',\n '5.7': 'rh-postgresql95'\n })\n\n\nclass Db(Mapping):\n \"\"\"Helper class for interacting with a CFME database using SQLAlchemy\n\n Args:\n hostname: base url to be used (default is from current_appliance)\n credentials: name of credentials to use from :py:attr:`utils.conf.credentials`\n (default ``database``)\n\n Provides convient attributes to common sqlalchemy objects related to this DB,\n as well as a Mapping interface to access and reflect database tables. Where possible,\n attributes are cached.\n\n Db objects support getting tables by name via the mapping interface::\n\n table = db['table_name']\n\n Usage:\n\n # Usually used to query the DB for info, here's a common query\n for vm in db.session.query(db['vms']).all():\n print(vm.name)\n print(vm.guid)\n\n # List comprehension to get all templates\n [(vm.name, vm.guid) for vm in session.query(db['vms']).all() if vm.template is True]\n\n # Use the transaction manager for write operations:\n with db.transaction:\n db.session.query(db['vms']).all().delete()\n\n Note:\n\n Creating a table object requires a call to the database so that SQLAlchemy can do\n reflection to determine the table's structure (columns, keys, indices, etc). On\n a latent connection, this can be extremely slow, which will affect methods that return\n tables, like the mapping interface or :py:meth:`values`.\n\n \"\"\"\n def __init__(self, hostname=None, credentials=None):\n self._table_cache = {}\n if hostname is None:\n self.hostname = store.current_appliance.db_address\n else:\n self.hostname = hostname\n\n self.credentials = credentials or conf.credentials['database']\n\n def __getitem__(self, table_name):\n \"\"\"Access tables as items contained in this db\n\n Usage:\n\n # To get a table called 'table_name':\n db['table_name']\n\n This may return ``None`` in the case where a table is found but reflection fails.\n\n \"\"\"\n try:\n return self._table(table_name)\n except InvalidRequestError:\n raise KeyError('Table {} could not be found'.format(table_name))\n\n def __iter__(self):\n \"\"\"Iterator of table names in this db\"\"\"\n return self.keys()\n\n def __len__(self):\n \"\"\"Number of tables in this db\"\"\"\n return len(self.table_names)\n\n def __contains__(self, table_name):\n \"\"\"Whether or not the named table is in this db\"\"\"\n return table_name in self.table_names\n\n def keys(self):\n \"\"\"Iterator of table names in this db\"\"\"\n return (table_name for table_name in self.table_names)\n\n def items(self):\n \"\"\"Iterator of ``(table_name, table)`` pairs\"\"\"\n return izip(self.keys(), self.values())\n\n def values(self):\n \"\"\"Iterator of tables in this db\"\"\"\n return (self[table_name] for table_name in self.table_names)\n\n def get(self, table_name, default=None):\n \"\"\"table getter\n\n Args:\n table_name: Name of the table to get\n default: Default value to return if ``table_name`` is not found.\n\n Returns: a table if ``table_name`` exists, otherwise 'None' or the passed-in default\n\n \"\"\"\n try:\n return self[table_name]\n except KeyError:\n return default\n\n def copy(self):\n \"\"\"Copy this database instance, keeping the same credentials and hostname\"\"\"\n return type(self)(self.hostname, self.credentials)\n\n def __eq__(self, other):\n \"\"\"Check if this db is equal to another db\"\"\"\n try:\n return self.hostname == other.hostname\n except:\n return False\n\n def __ne__(self, other):\n \"\"\"Check if this db is not equal to another db\"\"\"\n return not self == other\n\n @cached_property\n def engine(self):\n \"\"\"The :py:class:`Engine <sqlalchemy:sqlalchemy.engine.Engine>` for this database\n\n It uses pessimistic disconnection handling, checking that the database is still\n connected before executing commands.\n\n \"\"\"\n return create_engine(self.db_url, echo_pool=True)\n\n @cached_property\n def sessionmaker(self):\n \"\"\"A :py:class:`sessionmaker <sqlalchemy:sqlalchemy.orm.session.sessionmaker>`\n\n Used to make new sessions with this database, as needed.\n\n \"\"\"\n return sessionmaker(bind=self.engine)\n\n @cached_property\n def table_base(self):\n \"\"\"Base class for all tables returned by this database\n\n This base class is created using\n :py:class:`declarative_base <sqlalchemy:sqlalchemy.ext.declarative.declarative_base>`.\n \"\"\"\n return declarative_base(metadata=self.metadata)\n\n @cached_property\n def metadata(self):\n \"\"\":py:class:`MetaData <sqlalchemy:sqlalchemy.schema.MetaData>` for this database\n\n This can be used for introspection of reflected items.\n\n Note:\n\n Tables that haven't been reflected won't show up in metadata. To reflect a table,\n use :py:meth:`reflect_table`.\n\n \"\"\"\n return MetaData(bind=self.engine)\n\n @cached_property\n def db_url(self):\n \"\"\"The connection URL for this database, including credentials\"\"\"\n template = \"postgresql://{username}:{password}@{host}:{port}/vmdb_production\"\n result = template.format(host=self.hostname, port=ports.DB, **self.credentials)\n logger.info(\"[DB] db_url is %s\", result)\n return result\n\n @cached_property\n def table_names(self):\n \"\"\"A sorted list of table names available in this database.\"\"\"\n # rails table names follow similar rules as pep8 identifiers; expose them as such\n return sorted(inspect(self.engine).get_table_names())\n\n @cached_property\n def session(self):\n \"\"\"Returns a :py:class:`Session <sqlalchemy:sqlalchemy.orm.session.Session>`\n\n This is used for database queries. For writing to the database, start a\n :py:meth:`transaction`.\n\n Note:\n\n This attribute is cached. In cases where a new session needs to be explicitly created,\n use :py:meth:`sessionmaker`.\n\n \"\"\"\n return self.sessionmaker(autocommit=True)\n\n @property\n @contextmanager\n def transaction(self):\n \"\"\"Context manager for simple transaction management\n\n Sessions understand the concept of transactions, and provider context managers to\n handle conditionally committing or rolling back transactions as needed.\n\n Note:\n\n Sessions automatically commit transactions by default. For predictable results when\n writing to the database, use the transaction manager.\n\n Usage:\n\n with db.transaction:\n db.session.do_something()\n\n \"\"\"\n with self.session.begin():\n yield\n\n def reflect_table(self, table_name):\n \"\"\"Populate :py:attr:`metadata` with information on a table\n\n Args:\n table_name: The name of a table to reflect\n\n \"\"\"\n self.metadata.reflect(only=[table_name])\n\n def _table(self, table_name):\n \"\"\"Retrieves, reflects, and caches table objects\n\n Actual implementation of __getitem__\n \"\"\"\n try:\n return self._table_cache[table_name]\n except KeyError:\n self.reflect_table(table_name)\n table = self.metadata.tables[table_name]\n table_dict = {\n '__table__': table,\n '__tablename__': table_name\n }\n\n try:\n table_cls = type(str(table_name), (self.table_base,), table_dict)\n self._table_cache[table_name] = table_cls\n return table_cls\n except ArgumentError:\n # This usually happens on join tables with no PKs\n logger.info('Unable to create table class for table \"{}\"'.format(table_name))\n return None\n\n\n@contextmanager\ndef database_on_server(hostname, **kwargs):\n db_obj = Db(hostname=hostname, **kwargs)\n yield db_obj\n\n\n#: :py:class:`Db` instance configured with default settings from conf yamls\ndef cfmedb():\n return store.current_appliance.db\n", "path": "utils/db.py"}]} | 3,107 | 99 |
gh_patches_debug_6107 | rasdani/github-patches | git_diff | pytorch__vision-8227 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`CocoDetection` accepts `slice` as argument to its `__getitem__` and returns wrong result
Hi, although `CocoDetection` declares its `__getitem__` argument to be of type `int`, it actually accepts `slice` as well. However it returns a wrong result. As this might be confusing for some users, I suggest to add a type check at the beginning of the `__getitem__`.
https://github.com/pytorch/vision/blob/cab01fc1b7c7f0620ab95c6064f2a3fb583e9bee/torchvision/datasets/coco.py#L46-L49
```python
ds[:2]
```
```
(tensor([[[0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],
[0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],
[0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],
...,
[0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235],
[0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235],
[0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235]],
[[0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],
[0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],
[0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],
...,
[0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353],
[0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353],
[0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353]],
[[0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],
[0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],
[0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],
...,
[0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020],
[0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020],
[0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020]]]),
{'category_id': tensor([2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,
1]),
'bbox': tensor([[ 390, 240, 58, 92],
[ 391, 319, 66, 99],
[ 663, 331, 54, 106],
[ 796, 324, 50, 102],
[1245, 276, 35, 117],
[ 599, 265, 58, 91],
[ 755, 228, 42, 91],
[ 788, 261, 61, 97],
[ 250, 374, 73, 105],
[ 224, 449, 29, 28],
[ 12, 85, 13, 32],
[ 88, 70, 13, 27],
[ 102, 99, 14, 33],
[ 100, 132, 14, 32],
[ 151, 160, 13, 34],
[ 177, 115, 18, 32],
[ 214, 73, 12, 28],
[ 267, 58, 13, 23],
[ 358, 71, 14, 33],
[ 356, 160, 17, 36],
[ 231, 132, 14, 33],
[ 234, 88, 16, 29],
[ 248, 90, 13, 25],
[ 60, 109, 13, 32],
[ 243, 75, 7, 5]]),
'iscrowd': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0])})
```
The image in the returned tuple is that of the `ds[0]` and the target is the combined annotations of `ds[0]` and `ds[1]` which is wrong. This is due to the fact that `ds.coco` could work with a list of inputs in its `getAnnIds()`.
cc @pmeier
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/coco.py`
Content:
```
1 import os.path
2 from typing import Any, Callable, List, Optional, Tuple
3
4 from PIL import Image
5
6 from .vision import VisionDataset
7
8
9 class CocoDetection(VisionDataset):
10 """`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
11
12 It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
13
14 Args:
15 root (string): Root directory where images are downloaded to.
16 annFile (string): Path to json annotation file.
17 transform (callable, optional): A function/transform that takes in a PIL image
18 and returns a transformed version. E.g, ``transforms.PILToTensor``
19 target_transform (callable, optional): A function/transform that takes in the
20 target and transforms it.
21 transforms (callable, optional): A function/transform that takes input sample and its target as entry
22 and returns a transformed version.
23 """
24
25 def __init__(
26 self,
27 root: str,
28 annFile: str,
29 transform: Optional[Callable] = None,
30 target_transform: Optional[Callable] = None,
31 transforms: Optional[Callable] = None,
32 ) -> None:
33 super().__init__(root, transforms, transform, target_transform)
34 from pycocotools.coco import COCO
35
36 self.coco = COCO(annFile)
37 self.ids = list(sorted(self.coco.imgs.keys()))
38
39 def _load_image(self, id: int) -> Image.Image:
40 path = self.coco.loadImgs(id)[0]["file_name"]
41 return Image.open(os.path.join(self.root, path)).convert("RGB")
42
43 def _load_target(self, id: int) -> List[Any]:
44 return self.coco.loadAnns(self.coco.getAnnIds(id))
45
46 def __getitem__(self, index: int) -> Tuple[Any, Any]:
47 id = self.ids[index]
48 image = self._load_image(id)
49 target = self._load_target(id)
50
51 if self.transforms is not None:
52 image, target = self.transforms(image, target)
53
54 return image, target
55
56 def __len__(self) -> int:
57 return len(self.ids)
58
59
60 class CocoCaptions(CocoDetection):
61 """`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
62
63 It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
64
65 Args:
66 root (string): Root directory where images are downloaded to.
67 annFile (string): Path to json annotation file.
68 transform (callable, optional): A function/transform that takes in a PIL image
69 and returns a transformed version. E.g, ``transforms.PILToTensor``
70 target_transform (callable, optional): A function/transform that takes in the
71 target and transforms it.
72 transforms (callable, optional): A function/transform that takes input sample and its target as entry
73 and returns a transformed version.
74
75 Example:
76
77 .. code:: python
78
79 import torchvision.datasets as dset
80 import torchvision.transforms as transforms
81 cap = dset.CocoCaptions(root = 'dir where images are',
82 annFile = 'json annotation file',
83 transform=transforms.PILToTensor())
84
85 print('Number of samples: ', len(cap))
86 img, target = cap[3] # load 4th sample
87
88 print("Image Size: ", img.size())
89 print(target)
90
91 Output: ::
92
93 Number of samples: 82783
94 Image Size: (3L, 427L, 640L)
95 [u'A plane emitting smoke stream flying over a mountain.',
96 u'A plane darts across a bright blue sky behind a mountain covered in snow',
97 u'A plane leaves a contrail above the snowy mountain top.',
98 u'A mountain that has a plane flying overheard in the distance.',
99 u'A mountain view with a plume of smoke in the background']
100
101 """
102
103 def _load_target(self, id: int) -> List[str]:
104 return [ann["caption"] for ann in super()._load_target(id)]
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/datasets/coco.py b/torchvision/datasets/coco.py
--- a/torchvision/datasets/coco.py
+++ b/torchvision/datasets/coco.py
@@ -44,6 +44,10 @@
return self.coco.loadAnns(self.coco.getAnnIds(id))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
+
+ if not isinstance(index, int):
+ raise ValueError(f"Index must be of type integer, got {type(index)} instead.")
+
id = self.ids[index]
image = self._load_image(id)
target = self._load_target(id)
| {"golden_diff": "diff --git a/torchvision/datasets/coco.py b/torchvision/datasets/coco.py\n--- a/torchvision/datasets/coco.py\n+++ b/torchvision/datasets/coco.py\n@@ -44,6 +44,10 @@\n return self.coco.loadAnns(self.coco.getAnnIds(id))\n \n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n+\n+ if not isinstance(index, int):\n+ raise ValueError(f\"Index must be of type integer, got {type(index)} instead.\")\n+\n id = self.ids[index]\n image = self._load_image(id)\n target = self._load_target(id)\n", "issue": "\u200d\u200d\u200d\u200d`CocoDetection` accepts `slice` as argument to its `__getitem__` and returns wrong result\nHi, although `CocoDetection` declares its `__getitem__` argument to be of type `int`, it actually accepts `slice` as well. However it returns a wrong result. As this might be confusing for some users, I suggest to add a type check at the beginning of the `__getitem__`.\r\nhttps://github.com/pytorch/vision/blob/cab01fc1b7c7f0620ab95c6064f2a3fb583e9bee/torchvision/datasets/coco.py#L46-L49\r\n\r\n```python\r\nds[:2]\r\n```\r\n```\r\n(tensor([[[0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],\r\n [0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],\r\n [0.3373, 0.3373, 0.3373, ..., 0.2863, 0.2863, 0.2863],\r\n ...,\r\n [0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235],\r\n [0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235],\r\n [0.4196, 0.4196, 0.4196, ..., 0.4137, 0.3980, 0.3235]],\r\n \r\n [[0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],\r\n [0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],\r\n [0.1608, 0.1608, 0.1608, ..., 0.1216, 0.1216, 0.1216],\r\n ...,\r\n [0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353],\r\n [0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353],\r\n [0.5686, 0.5686, 0.5686, ..., 0.5333, 0.5137, 0.4353]],\r\n \r\n [[0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],\r\n [0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],\r\n [0.1412, 0.1412, 0.1412, ..., 0.1608, 0.1608, 0.1608],\r\n ...,\r\n [0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020],\r\n [0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020],\r\n [0.0902, 0.0902, 0.0902, ..., 0.1333, 0.1451, 0.1020]]]),\r\n {'category_id': tensor([2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3,\r\n 1]),\r\n 'bbox': tensor([[ 390, 240, 58, 92],\r\n [ 391, 319, 66, 99],\r\n [ 663, 331, 54, 106],\r\n [ 796, 324, 50, 102],\r\n [1245, 276, 35, 117],\r\n [ 599, 265, 58, 91],\r\n [ 755, 228, 42, 91],\r\n [ 788, 261, 61, 97],\r\n [ 250, 374, 73, 105],\r\n [ 224, 449, 29, 28],\r\n [ 12, 85, 13, 32],\r\n [ 88, 70, 13, 27],\r\n [ 102, 99, 14, 33],\r\n [ 100, 132, 14, 32],\r\n [ 151, 160, 13, 34],\r\n [ 177, 115, 18, 32],\r\n [ 214, 73, 12, 28],\r\n [ 267, 58, 13, 23],\r\n [ 358, 71, 14, 33],\r\n [ 356, 160, 17, 36],\r\n [ 231, 132, 14, 33],\r\n [ 234, 88, 16, 29],\r\n [ 248, 90, 13, 25],\r\n [ 60, 109, 13, 32],\r\n [ 243, 75, 7, 5]]),\r\n 'iscrowd': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\r\n 0])})\r\n```\r\nThe image in the returned tuple is that of the `ds[0]` and the target is the combined annotations of `ds[0]` and `ds[1]` which is wrong. This is due to the fact that `ds.coco` could work with a list of inputs in its `getAnnIds()`.\n\ncc @pmeier\n", "before_files": [{"content": "import os.path\nfrom typing import Any, Callable, List, Optional, Tuple\n\nfrom PIL import Image\n\nfrom .vision import VisionDataset\n\n\nclass CocoDetection(VisionDataset):\n \"\"\"`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.\n\n It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.\n\n Args:\n root (string): Root directory where images are downloaded to.\n annFile (string): Path to json annotation file.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.PILToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n \"\"\"\n\n def __init__(\n self,\n root: str,\n annFile: str,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n transforms: Optional[Callable] = None,\n ) -> None:\n super().__init__(root, transforms, transform, target_transform)\n from pycocotools.coco import COCO\n\n self.coco = COCO(annFile)\n self.ids = list(sorted(self.coco.imgs.keys()))\n\n def _load_image(self, id: int) -> Image.Image:\n path = self.coco.loadImgs(id)[0][\"file_name\"]\n return Image.open(os.path.join(self.root, path)).convert(\"RGB\")\n\n def _load_target(self, id: int) -> List[Any]:\n return self.coco.loadAnns(self.coco.getAnnIds(id))\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n id = self.ids[index]\n image = self._load_image(id)\n target = self._load_target(id)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n return len(self.ids)\n\n\nclass CocoCaptions(CocoDetection):\n \"\"\"`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.\n\n It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.\n\n Args:\n root (string): Root directory where images are downloaded to.\n annFile (string): Path to json annotation file.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.PILToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n\n Example:\n\n .. code:: python\n\n import torchvision.datasets as dset\n import torchvision.transforms as transforms\n cap = dset.CocoCaptions(root = 'dir where images are',\n annFile = 'json annotation file',\n transform=transforms.PILToTensor())\n\n print('Number of samples: ', len(cap))\n img, target = cap[3] # load 4th sample\n\n print(\"Image Size: \", img.size())\n print(target)\n\n Output: ::\n\n Number of samples: 82783\n Image Size: (3L, 427L, 640L)\n [u'A plane emitting smoke stream flying over a mountain.',\n u'A plane darts across a bright blue sky behind a mountain covered in snow',\n u'A plane leaves a contrail above the snowy mountain top.',\n u'A mountain that has a plane flying overheard in the distance.',\n u'A mountain view with a plume of smoke in the background']\n\n \"\"\"\n\n def _load_target(self, id: int) -> List[str]:\n return [ann[\"caption\"] for ann in super()._load_target(id)]\n", "path": "torchvision/datasets/coco.py"}], "after_files": [{"content": "import os.path\nfrom typing import Any, Callable, List, Optional, Tuple\n\nfrom PIL import Image\n\nfrom .vision import VisionDataset\n\n\nclass CocoDetection(VisionDataset):\n \"\"\"`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.\n\n It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.\n\n Args:\n root (string): Root directory where images are downloaded to.\n annFile (string): Path to json annotation file.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.PILToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n \"\"\"\n\n def __init__(\n self,\n root: str,\n annFile: str,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n transforms: Optional[Callable] = None,\n ) -> None:\n super().__init__(root, transforms, transform, target_transform)\n from pycocotools.coco import COCO\n\n self.coco = COCO(annFile)\n self.ids = list(sorted(self.coco.imgs.keys()))\n\n def _load_image(self, id: int) -> Image.Image:\n path = self.coco.loadImgs(id)[0][\"file_name\"]\n return Image.open(os.path.join(self.root, path)).convert(\"RGB\")\n\n def _load_target(self, id: int) -> List[Any]:\n return self.coco.loadAnns(self.coco.getAnnIds(id))\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n\n if not isinstance(index, int):\n raise ValueError(f\"Index must be of type integer, got {type(index)} instead.\")\n\n id = self.ids[index]\n image = self._load_image(id)\n target = self._load_target(id)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n return len(self.ids)\n\n\nclass CocoCaptions(CocoDetection):\n \"\"\"`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.\n\n It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.\n\n Args:\n root (string): Root directory where images are downloaded to.\n annFile (string): Path to json annotation file.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.PILToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n\n Example:\n\n .. code:: python\n\n import torchvision.datasets as dset\n import torchvision.transforms as transforms\n cap = dset.CocoCaptions(root = 'dir where images are',\n annFile = 'json annotation file',\n transform=transforms.PILToTensor())\n\n print('Number of samples: ', len(cap))\n img, target = cap[3] # load 4th sample\n\n print(\"Image Size: \", img.size())\n print(target)\n\n Output: ::\n\n Number of samples: 82783\n Image Size: (3L, 427L, 640L)\n [u'A plane emitting smoke stream flying over a mountain.',\n u'A plane darts across a bright blue sky behind a mountain covered in snow',\n u'A plane leaves a contrail above the snowy mountain top.',\n u'A mountain that has a plane flying overheard in the distance.',\n u'A mountain view with a plume of smoke in the background']\n\n \"\"\"\n\n def _load_target(self, id: int) -> List[str]:\n return [ann[\"caption\"] for ann in super()._load_target(id)]\n", "path": "torchvision/datasets/coco.py"}]} | 3,308 | 147 |
gh_patches_debug_43254 | rasdani/github-patches | git_diff | streamlink__streamlink-4759 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.atresplayer: Live streams is not working.
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
As for today, Atresplayer live streams is not working.
### Debug log
```text
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.10.6
[cli][debug] Streamlink: 4.3.0
[cli][debug] Dependencies:
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] websocket-client: 1.3.3
[cli][debug] Arguments:
[cli][debug] url=https://www.atresplayer.com/directos/antena3/
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --hls-live-edge=1
[cli][debug] --ffmpeg-ffmpeg=C:\Program Files\Streamlink\ffmpeg\ffmpeg.exe
[cli][info] Found matching plugin atresplayer for URL https://www.atresplayer.com/directos/antena3/
error: Unable to validate response text: ValidationError(NoneOrAllSchema):
ValidationError(type):
Type of <generator object search_dict at 0x000002C64BA79930> should be list, but is generator
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/atresplayer.py`
Content:
```
1 """
2 $description Spanish live TV channels from Atresmedia Television, including Antena 3 and laSexta.
3 $url atresplayer.com
4 $type live
5 $region Spain
6 """
7
8 import logging
9 import re
10
11 from streamlink.plugin import Plugin, pluginmatcher
12 from streamlink.plugin.api import validate
13 from streamlink.stream.dash import DASHStream
14 from streamlink.stream.hls import HLSStream
15 from streamlink.utils.data import search_dict
16 from streamlink.utils.url import update_scheme
17
18 log = logging.getLogger(__name__)
19
20
21 @pluginmatcher(re.compile(
22 r"https?://(?:www\.)?atresplayer\.com/"
23 ))
24 class AtresPlayer(Plugin):
25 def _get_streams(self):
26 self.url = update_scheme("https://", self.url)
27
28 api_url = self.session.http.get(self.url, schema=validate.Schema(
29 re.compile(r"""window.__PRELOADED_STATE__\s*=\s*({.*?});""", re.DOTALL),
30 validate.none_or_all(
31 validate.get(1),
32 validate.parse_json(),
33 validate.transform(search_dict, key="href"),
34 [validate.url()],
35 validate.get(0),
36 ),
37 ))
38 if not api_url:
39 return
40 log.debug(f"API URL: {api_url}")
41
42 player_api_url = self.session.http.get(api_url, schema=validate.Schema(
43 validate.parse_json(),
44 validate.transform(search_dict, key="urlVideo"),
45 ))
46
47 stream_schema = validate.Schema(
48 validate.parse_json(),
49 {
50 "sources": [
51 validate.all(
52 {
53 "src": validate.url(),
54 validate.optional("type"): str,
55 },
56 ),
57 ],
58 },
59 validate.get("sources"),
60 )
61
62 for api_url in player_api_url:
63 log.debug(f"Player API URL: {api_url}")
64 for source in self.session.http.get(api_url, schema=stream_schema):
65 log.debug(f"Stream source: {source['src']} ({source.get('type', 'n/a')})")
66
67 if "type" not in source or source["type"] == "application/vnd.apple.mpegurl":
68 streams = HLSStream.parse_variant_playlist(self.session, source["src"])
69 if not streams:
70 yield "live", HLSStream(self.session, source["src"])
71 else:
72 yield from streams.items()
73 elif source["type"] == "application/dash+xml":
74 yield from DASHStream.parse_manifest(self.session, source["src"]).items()
75
76
77 __plugin__ = AtresPlayer
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/atresplayer.py b/src/streamlink/plugins/atresplayer.py
--- a/src/streamlink/plugins/atresplayer.py
+++ b/src/streamlink/plugins/atresplayer.py
@@ -7,12 +7,12 @@
import logging
import re
+from urllib.parse import urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.dash import DASHStream
from streamlink.stream.hls import HLSStream
-from streamlink.utils.data import search_dict
from streamlink.utils.url import update_scheme
log = logging.getLogger(__name__)
@@ -24,15 +24,15 @@
class AtresPlayer(Plugin):
def _get_streams(self):
self.url = update_scheme("https://", self.url)
+ path = urlparse(self.url).path
api_url = self.session.http.get(self.url, schema=validate.Schema(
re.compile(r"""window.__PRELOADED_STATE__\s*=\s*({.*?});""", re.DOTALL),
validate.none_or_all(
validate.get(1),
validate.parse_json(),
- validate.transform(search_dict, key="href"),
- [validate.url()],
- validate.get(0),
+ {"links": {path: {"href": validate.url()}}},
+ validate.get(("links", path, "href")),
),
))
if not api_url:
@@ -41,37 +41,46 @@
player_api_url = self.session.http.get(api_url, schema=validate.Schema(
validate.parse_json(),
- validate.transform(search_dict, key="urlVideo"),
+ {"urlVideo": validate.url()},
+ validate.get("urlVideo"),
))
- stream_schema = validate.Schema(
+ log.debug(f"Player API URL: {player_api_url}")
+ sources = self.session.http.get(player_api_url, acceptable_status=(200, 403), schema=validate.Schema(
validate.parse_json(),
- {
- "sources": [
- validate.all(
- {
- "src": validate.url(),
- validate.optional("type"): str,
- },
- ),
- ],
- },
- validate.get("sources"),
- )
+ validate.any(
+ {
+ "error": str,
+ "error_description": str,
+ },
+ {
+ "sources": [
+ validate.all(
+ {
+ "src": validate.url(),
+ validate.optional("type"): str,
+ },
+ validate.union_get("type", "src"),
+ ),
+ ],
+ },
+ ),
+ ))
+ if "error" in sources:
+ log.error(f"Player API error: {sources['error']} - {sources['error_description']}")
+ return
- for api_url in player_api_url:
- log.debug(f"Player API URL: {api_url}")
- for source in self.session.http.get(api_url, schema=stream_schema):
- log.debug(f"Stream source: {source['src']} ({source.get('type', 'n/a')})")
+ for streamtype, streamsrc in sources.get("sources"):
+ log.debug(f"Stream source: {streamsrc} ({streamtype or 'n/a'})")
- if "type" not in source or source["type"] == "application/vnd.apple.mpegurl":
- streams = HLSStream.parse_variant_playlist(self.session, source["src"])
- if not streams:
- yield "live", HLSStream(self.session, source["src"])
- else:
- yield from streams.items()
- elif source["type"] == "application/dash+xml":
- yield from DASHStream.parse_manifest(self.session, source["src"]).items()
+ if streamtype == "application/vnd.apple.mpegurl":
+ streams = HLSStream.parse_variant_playlist(self.session, streamsrc)
+ if not streams:
+ yield "live", HLSStream(self.session, streamsrc)
+ else:
+ yield from streams.items()
+ elif streamtype == "application/dash+xml":
+ yield from DASHStream.parse_manifest(self.session, streamsrc).items()
__plugin__ = AtresPlayer
| {"golden_diff": "diff --git a/src/streamlink/plugins/atresplayer.py b/src/streamlink/plugins/atresplayer.py\n--- a/src/streamlink/plugins/atresplayer.py\n+++ b/src/streamlink/plugins/atresplayer.py\n@@ -7,12 +7,12 @@\n \n import logging\n import re\n+from urllib.parse import urlparse\n \n from streamlink.plugin import Plugin, pluginmatcher\n from streamlink.plugin.api import validate\n from streamlink.stream.dash import DASHStream\n from streamlink.stream.hls import HLSStream\n-from streamlink.utils.data import search_dict\n from streamlink.utils.url import update_scheme\n \n log = logging.getLogger(__name__)\n@@ -24,15 +24,15 @@\n class AtresPlayer(Plugin):\n def _get_streams(self):\n self.url = update_scheme(\"https://\", self.url)\n+ path = urlparse(self.url).path\n \n api_url = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\"\"window.__PRELOADED_STATE__\\s*=\\s*({.*?});\"\"\", re.DOTALL),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n- validate.transform(search_dict, key=\"href\"),\n- [validate.url()],\n- validate.get(0),\n+ {\"links\": {path: {\"href\": validate.url()}}},\n+ validate.get((\"links\", path, \"href\")),\n ),\n ))\n if not api_url:\n@@ -41,37 +41,46 @@\n \n player_api_url = self.session.http.get(api_url, schema=validate.Schema(\n validate.parse_json(),\n- validate.transform(search_dict, key=\"urlVideo\"),\n+ {\"urlVideo\": validate.url()},\n+ validate.get(\"urlVideo\"),\n ))\n \n- stream_schema = validate.Schema(\n+ log.debug(f\"Player API URL: {player_api_url}\")\n+ sources = self.session.http.get(player_api_url, acceptable_status=(200, 403), schema=validate.Schema(\n validate.parse_json(),\n- {\n- \"sources\": [\n- validate.all(\n- {\n- \"src\": validate.url(),\n- validate.optional(\"type\"): str,\n- },\n- ),\n- ],\n- },\n- validate.get(\"sources\"),\n- )\n+ validate.any(\n+ {\n+ \"error\": str,\n+ \"error_description\": str,\n+ },\n+ {\n+ \"sources\": [\n+ validate.all(\n+ {\n+ \"src\": validate.url(),\n+ validate.optional(\"type\"): str,\n+ },\n+ validate.union_get(\"type\", \"src\"),\n+ ),\n+ ],\n+ },\n+ ),\n+ ))\n+ if \"error\" in sources:\n+ log.error(f\"Player API error: {sources['error']} - {sources['error_description']}\")\n+ return\n \n- for api_url in player_api_url:\n- log.debug(f\"Player API URL: {api_url}\")\n- for source in self.session.http.get(api_url, schema=stream_schema):\n- log.debug(f\"Stream source: {source['src']} ({source.get('type', 'n/a')})\")\n+ for streamtype, streamsrc in sources.get(\"sources\"):\n+ log.debug(f\"Stream source: {streamsrc} ({streamtype or 'n/a'})\")\n \n- if \"type\" not in source or source[\"type\"] == \"application/vnd.apple.mpegurl\":\n- streams = HLSStream.parse_variant_playlist(self.session, source[\"src\"])\n- if not streams:\n- yield \"live\", HLSStream(self.session, source[\"src\"])\n- else:\n- yield from streams.items()\n- elif source[\"type\"] == \"application/dash+xml\":\n- yield from DASHStream.parse_manifest(self.session, source[\"src\"]).items()\n+ if streamtype == \"application/vnd.apple.mpegurl\":\n+ streams = HLSStream.parse_variant_playlist(self.session, streamsrc)\n+ if not streams:\n+ yield \"live\", HLSStream(self.session, streamsrc)\n+ else:\n+ yield from streams.items()\n+ elif streamtype == \"application/dash+xml\":\n+ yield from DASHStream.parse_manifest(self.session, streamsrc).items()\n \n \n __plugin__ = AtresPlayer\n", "issue": "plugins.atresplayer: Live streams is not working.\n### Checklist\r\n\r\n- [X] This is a plugin issue and not a different kind of issue\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\nLatest stable release\r\n\r\n### Description\r\n\r\nAs for today, Atresplayer live streams is not working.\r\n\r\n### Debug log\r\n\r\n```text\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.10.6\r\n[cli][debug] Streamlink: 4.3.0\r\n[cli][debug] Dependencies:\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] websocket-client: 1.3.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.atresplayer.com/directos/antena3/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --hls-live-edge=1\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Program Files\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin atresplayer for URL https://www.atresplayer.com/directos/antena3/\r\nerror: Unable to validate response text: ValidationError(NoneOrAllSchema):\r\n ValidationError(type):\r\n Type of <generator object search_dict at 0x000002C64BA79930> should be list, but is generator\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n$description Spanish live TV channels from Atresmedia Television, including Antena 3 and laSexta.\n$url atresplayer.com\n$type live\n$region Spain\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.data import search_dict\nfrom streamlink.utils.url import update_scheme\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?atresplayer\\.com/\"\n))\nclass AtresPlayer(Plugin):\n def _get_streams(self):\n self.url = update_scheme(\"https://\", self.url)\n\n api_url = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\"\"window.__PRELOADED_STATE__\\s*=\\s*({.*?});\"\"\", re.DOTALL),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n validate.transform(search_dict, key=\"href\"),\n [validate.url()],\n validate.get(0),\n ),\n ))\n if not api_url:\n return\n log.debug(f\"API URL: {api_url}\")\n\n player_api_url = self.session.http.get(api_url, schema=validate.Schema(\n validate.parse_json(),\n validate.transform(search_dict, key=\"urlVideo\"),\n ))\n\n stream_schema = validate.Schema(\n validate.parse_json(),\n {\n \"sources\": [\n validate.all(\n {\n \"src\": validate.url(),\n validate.optional(\"type\"): str,\n },\n ),\n ],\n },\n validate.get(\"sources\"),\n )\n\n for api_url in player_api_url:\n log.debug(f\"Player API URL: {api_url}\")\n for source in self.session.http.get(api_url, schema=stream_schema):\n log.debug(f\"Stream source: {source['src']} ({source.get('type', 'n/a')})\")\n\n if \"type\" not in source or source[\"type\"] == \"application/vnd.apple.mpegurl\":\n streams = HLSStream.parse_variant_playlist(self.session, source[\"src\"])\n if not streams:\n yield \"live\", HLSStream(self.session, source[\"src\"])\n else:\n yield from streams.items()\n elif source[\"type\"] == \"application/dash+xml\":\n yield from DASHStream.parse_manifest(self.session, source[\"src\"]).items()\n\n\n__plugin__ = AtresPlayer\n", "path": "src/streamlink/plugins/atresplayer.py"}], "after_files": [{"content": "\"\"\"\n$description Spanish live TV channels from Atresmedia Television, including Antena 3 and laSexta.\n$url atresplayer.com\n$type live\n$region Spain\n\"\"\"\n\nimport logging\nimport re\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.url import update_scheme\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?atresplayer\\.com/\"\n))\nclass AtresPlayer(Plugin):\n def _get_streams(self):\n self.url = update_scheme(\"https://\", self.url)\n path = urlparse(self.url).path\n\n api_url = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\"\"window.__PRELOADED_STATE__\\s*=\\s*({.*?});\"\"\", re.DOTALL),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n {\"links\": {path: {\"href\": validate.url()}}},\n validate.get((\"links\", path, \"href\")),\n ),\n ))\n if not api_url:\n return\n log.debug(f\"API URL: {api_url}\")\n\n player_api_url = self.session.http.get(api_url, schema=validate.Schema(\n validate.parse_json(),\n {\"urlVideo\": validate.url()},\n validate.get(\"urlVideo\"),\n ))\n\n log.debug(f\"Player API URL: {player_api_url}\")\n sources = self.session.http.get(player_api_url, acceptable_status=(200, 403), schema=validate.Schema(\n validate.parse_json(),\n validate.any(\n {\n \"error\": str,\n \"error_description\": str,\n },\n {\n \"sources\": [\n validate.all(\n {\n \"src\": validate.url(),\n validate.optional(\"type\"): str,\n },\n validate.union_get(\"type\", \"src\"),\n ),\n ],\n },\n ),\n ))\n if \"error\" in sources:\n log.error(f\"Player API error: {sources['error']} - {sources['error_description']}\")\n return\n\n for streamtype, streamsrc in sources.get(\"sources\"):\n log.debug(f\"Stream source: {streamsrc} ({streamtype or 'n/a'})\")\n\n if streamtype == \"application/vnd.apple.mpegurl\":\n streams = HLSStream.parse_variant_playlist(self.session, streamsrc)\n if not streams:\n yield \"live\", HLSStream(self.session, streamsrc)\n else:\n yield from streams.items()\n elif streamtype == \"application/dash+xml\":\n yield from DASHStream.parse_manifest(self.session, streamsrc).items()\n\n\n__plugin__ = AtresPlayer\n", "path": "src/streamlink/plugins/atresplayer.py"}]} | 1,463 | 920 |
gh_patches_debug_81 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-60 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix warning about deprecation of TEMPLATE_DEBUG
When starting aimmo locally the following message is displayed:
> WARNINGS:
> ?: (1_8.W001) The standalone TEMPLATE_\* settings were deprecated in Django 1.8 and the TEMPLATES dictionary takes precedence. You must put the values of the following settings into your default TEMPLATES dict: TEMPLATE_DEBUG.
The value in question is in `aimmo/example_project/example_project/settings.py`:
`TEMPLATE_DEBUG = DEBUG`
The TEMPLATES dictionary _maybe_ the one here `aimmo/players/autoconfig.py` (?):
```
'TEMPLATES': [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
]
}
}
],
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `example_project/example_project/settings.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2015, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 '''Django settings for example_project project.'''
38 import os
39
40 DEBUG = True
41 TEMPLATE_DEBUG = DEBUG
42
43 DATABASES = {
44 'default': {
45 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
46 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)),'db.sqlite3'),# Or path to database file if using sqlite3.
47 }
48 }
49
50 USE_I18N = True
51 USE_L10N = True
52
53 TIME_ZONE = 'Europe/London'
54 LANGUAGE_CODE = 'en-gb'
55 STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
56 STATIC_URL = '/static/'
57 SECRET_KEY = 'not-a-secret'
58
59 ROOT_URLCONF = 'django_autoconfig.autourlconf'
60
61 WSGI_APPLICATION = 'example_project.wsgi.application'
62
63 INSTALLED_APPS = (
64 'django.contrib.admin',
65 'players',
66 )
67
68 LOGGING = {
69 'version': 1,
70 'disable_existing_loggers': False,
71 'filters': {
72 'require_debug_false': {
73 '()': 'django.utils.log.RequireDebugFalse'
74 }
75 },
76 'handlers': {
77 'console': {
78 'level': 'DEBUG',
79 'class': 'logging.StreamHandler'
80 },
81 },
82 'loggers': {
83 'views': {
84 'handlers': ['console'],
85 'level': 'DEBUG'
86 },
87 }
88 }
89
90 try:
91 from example_project.local_settings import * # pylint: disable=E0611
92 except ImportError:
93 pass
94
95 from django_autoconfig import autoconfig
96 autoconfig.configure_settings(globals())
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py
--- a/example_project/example_project/settings.py
+++ b/example_project/example_project/settings.py
@@ -38,7 +38,6 @@
import os
DEBUG = True
-TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
| {"golden_diff": "diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py\n--- a/example_project/example_project/settings.py\n+++ b/example_project/example_project/settings.py\n@@ -38,7 +38,6 @@\n import os\n \n DEBUG = True\n-TEMPLATE_DEBUG = DEBUG\n \n DATABASES = {\n 'default': {\n", "issue": "Fix warning about deprecation of TEMPLATE_DEBUG\nWhen starting aimmo locally the following message is displayed:\n\n> WARNINGS:\n> ?: (1_8.W001) The standalone TEMPLATE_\\* settings were deprecated in Django 1.8 and the TEMPLATES dictionary takes precedence. You must put the values of the following settings into your default TEMPLATES dict: TEMPLATE_DEBUG.\n\nThe value in question is in `aimmo/example_project/example_project/settings.py`:\n`TEMPLATE_DEBUG = DEBUG`\n\nThe TEMPLATES dictionary _maybe_ the one here `aimmo/players/autoconfig.py` (?):\n\n```\n'TEMPLATES': [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ]\n }\n }\n ],\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n'''Django settings for example_project project.'''\nimport os\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)),'db.sqlite3'),# Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'players',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n'''Django settings for example_project project.'''\nimport os\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)),'db.sqlite3'),# Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'players',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py"}]} | 1,424 | 70 |
gh_patches_debug_42629 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-3869 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Permissions to delete Email-Rules/Accounts: disabled for a user, but user can still delete them
### Description
First the issue description:
A user without "delete"-permissions for email-rules can still delete them also when the rule was created by another user.
Second a question:
Is it possible for a user to fully manage his own email-rules/accounts without seeing the rules/accounts from other users?
### Steps to reproduce
1. Create a user (e.g. test-user) with mail-rule/mail-account add/edit/read permissions (no delete permission).
2. Create a mail-rule with other user (e.g. admin user)
3. Login with the "test-user"
4. Go to settings -> email
5. Delete the mail-rule created by admin-user
Also the mail-account can be deleted without delete-permissions.
### Webserver logs
```bash
No logs are visible
```
### Browser logs
_No response_
### Paperless-ngx version
1.16.5
### Host OS
Debian x86_64
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/paperless_mail/admin.py`
Content:
```
1 from django import forms
2 from django.contrib import admin
3 from django.utils.translation import gettext_lazy as _
4
5 from paperless_mail.models import MailAccount
6 from paperless_mail.models import MailRule
7 from paperless_mail.models import ProcessedMail
8
9
10 class MailAccountAdminForm(forms.ModelForm):
11
12 """Metadata classes used by Django admin to display the form."""
13
14 class Meta:
15
16 """Metadata class used by Django admin to display the form."""
17
18 model = MailAccount
19 widgets = {
20 "password": forms.PasswordInput(),
21 }
22 fields = [
23 "name",
24 "imap_server",
25 "username",
26 "imap_security",
27 "username",
28 "password",
29 "is_token",
30 "character_set",
31 ]
32
33
34 class MailAccountAdmin(admin.ModelAdmin):
35 list_display = ("name", "imap_server", "username")
36
37 fieldsets = [
38 (None, {"fields": ["name", "imap_server", "imap_port"]}),
39 (
40 _("Authentication"),
41 {"fields": ["imap_security", "username", "password", "is_token"]},
42 ),
43 (_("Advanced settings"), {"fields": ["character_set"]}),
44 ]
45 form = MailAccountAdminForm
46
47
48 class MailRuleAdmin(admin.ModelAdmin):
49 radio_fields = {
50 "attachment_type": admin.VERTICAL,
51 "action": admin.VERTICAL,
52 "assign_title_from": admin.VERTICAL,
53 "assign_correspondent_from": admin.VERTICAL,
54 }
55
56 fieldsets = (
57 (None, {"fields": ("name", "order", "account", "folder")}),
58 (
59 _("Filter"),
60 {
61 "description": _(
62 "Paperless will only process mails that match ALL of the "
63 "filters given below.",
64 ),
65 "fields": (
66 "filter_from",
67 "filter_to",
68 "filter_subject",
69 "filter_body",
70 "filter_attachment_filename",
71 "maximum_age",
72 "consumption_scope",
73 "attachment_type",
74 ),
75 },
76 ),
77 (
78 _("Actions"),
79 {
80 "description": _(
81 "The action applied to the mail. This action is only "
82 "performed when the mail body or attachments were "
83 "consumed from the mail.",
84 ),
85 "fields": ("action", "action_parameter"),
86 },
87 ),
88 (
89 _("Metadata"),
90 {
91 "description": _(
92 "Assign metadata to documents consumed from this rule "
93 "automatically. If you do not assign tags, types or "
94 "correspondents here, paperless will still process all "
95 "matching rules that you have defined.",
96 ),
97 "fields": (
98 "assign_title_from",
99 "assign_tags",
100 "assign_document_type",
101 "assign_correspondent_from",
102 "assign_correspondent",
103 ),
104 },
105 ),
106 )
107
108 list_filter = ("account",)
109
110 list_display = ("order", "name", "account", "folder", "action")
111
112 list_editable = ("order",)
113
114 list_display_links = ("name",)
115
116 sortable_by = []
117
118 ordering = ["order"]
119
120
121 class ProcessedMailAdmin(admin.ModelAdmin):
122 class Meta:
123 model = ProcessedMail
124 fields = "__all__"
125
126 list_display = ("subject", "status", "processed", "received", "rule")
127
128 ordering = ["-processed"]
129
130 readonly_fields = [
131 "owner",
132 "rule",
133 "folder",
134 "uid",
135 "subject",
136 "received",
137 "processed",
138 "status",
139 "error",
140 ]
141
142 list_display_links = ["subject"]
143
144 list_filter = ("status", "rule")
145
146
147 admin.site.register(MailAccount, MailAccountAdmin)
148 admin.site.register(MailRule, MailRuleAdmin)
149 admin.site.register(ProcessedMail, ProcessedMailAdmin)
150
```
Path: `src/paperless_mail/serialisers.py`
Content:
```
1 from rest_framework import serializers
2
3 from documents.serialisers import CorrespondentField
4 from documents.serialisers import DocumentTypeField
5 from documents.serialisers import OwnedObjectSerializer
6 from documents.serialisers import TagsField
7 from paperless_mail.models import MailAccount
8 from paperless_mail.models import MailRule
9
10
11 class ObfuscatedPasswordField(serializers.Field):
12 """
13 Sends *** string instead of password in the clear
14 """
15
16 def to_representation(self, value):
17 return "*" * len(value)
18
19 def to_internal_value(self, data):
20 return data
21
22
23 class MailAccountSerializer(OwnedObjectSerializer):
24 password = ObfuscatedPasswordField()
25
26 class Meta:
27 model = MailAccount
28 depth = 1
29 fields = [
30 "id",
31 "name",
32 "imap_server",
33 "imap_port",
34 "imap_security",
35 "username",
36 "password",
37 "character_set",
38 "is_token",
39 ]
40
41 def update(self, instance, validated_data):
42 if (
43 "password" in validated_data
44 and len(validated_data.get("password").replace("*", "")) == 0
45 ):
46 validated_data.pop("password")
47 super().update(instance, validated_data)
48 return instance
49
50
51 class AccountField(serializers.PrimaryKeyRelatedField):
52 def get_queryset(self):
53 return MailAccount.objects.all().order_by("-id")
54
55
56 class MailRuleSerializer(OwnedObjectSerializer):
57 account = AccountField(required=True)
58 action_parameter = serializers.CharField(
59 allow_null=True,
60 required=False,
61 default="",
62 )
63 assign_correspondent = CorrespondentField(allow_null=True, required=False)
64 assign_tags = TagsField(many=True, allow_null=True, required=False)
65 assign_document_type = DocumentTypeField(allow_null=True, required=False)
66 order = serializers.IntegerField(required=False)
67
68 class Meta:
69 model = MailRule
70 depth = 1
71 fields = [
72 "id",
73 "name",
74 "account",
75 "folder",
76 "filter_from",
77 "filter_to",
78 "filter_subject",
79 "filter_body",
80 "filter_attachment_filename",
81 "maximum_age",
82 "action",
83 "action_parameter",
84 "assign_title_from",
85 "assign_tags",
86 "assign_correspondent_from",
87 "assign_correspondent",
88 "assign_document_type",
89 "order",
90 "attachment_type",
91 "consumption_scope",
92 ]
93
94 def update(self, instance, validated_data):
95 super().update(instance, validated_data)
96 return instance
97
98 def create(self, validated_data):
99 if "assign_tags" in validated_data:
100 assign_tags = validated_data.pop("assign_tags")
101 mail_rule = super().create(validated_data)
102 if assign_tags:
103 mail_rule.assign_tags.set(assign_tags)
104 return mail_rule
105
106 def validate(self, attrs):
107 if (
108 attrs["action"] == MailRule.MailAction.TAG
109 or attrs["action"] == MailRule.MailAction.MOVE
110 ) and attrs["action_parameter"] is None:
111 raise serializers.ValidationError("An action parameter is required.")
112
113 return attrs
114
```
Path: `src/paperless_mail/views.py`
Content:
```
1 import datetime
2 import logging
3
4 from django.http import HttpResponseBadRequest
5 from rest_framework.generics import GenericAPIView
6 from rest_framework.permissions import IsAuthenticated
7 from rest_framework.response import Response
8 from rest_framework.viewsets import ModelViewSet
9
10 from documents.views import PassUserMixin
11 from paperless.views import StandardPagination
12 from paperless_mail.mail import MailError
13 from paperless_mail.mail import get_mailbox
14 from paperless_mail.mail import mailbox_login
15 from paperless_mail.models import MailAccount
16 from paperless_mail.models import MailRule
17 from paperless_mail.serialisers import MailAccountSerializer
18 from paperless_mail.serialisers import MailRuleSerializer
19
20
21 class MailAccountViewSet(ModelViewSet, PassUserMixin):
22 model = MailAccount
23
24 queryset = MailAccount.objects.all().order_by("pk")
25 serializer_class = MailAccountSerializer
26 pagination_class = StandardPagination
27 permission_classes = (IsAuthenticated,)
28
29
30 class MailRuleViewSet(ModelViewSet, PassUserMixin):
31 model = MailRule
32
33 queryset = MailRule.objects.all().order_by("order")
34 serializer_class = MailRuleSerializer
35 pagination_class = StandardPagination
36 permission_classes = (IsAuthenticated,)
37
38
39 class MailAccountTestView(GenericAPIView):
40 permission_classes = (IsAuthenticated,)
41 serializer_class = MailAccountSerializer
42
43 def post(self, request, *args, **kwargs):
44 logger = logging.getLogger("paperless_mail")
45 request.data["name"] = datetime.datetime.now().isoformat()
46 serializer = self.get_serializer(data=request.data)
47 serializer.is_valid(raise_exception=True)
48
49 # account exists, use the password from there instead of ***
50 if (
51 len(serializer.validated_data.get("password").replace("*", "")) == 0
52 and request.data["id"] is not None
53 ):
54 serializer.validated_data["password"] = MailAccount.objects.get(
55 pk=request.data["id"],
56 ).password
57
58 account = MailAccount(**serializer.validated_data)
59
60 with get_mailbox(
61 account.imap_server,
62 account.imap_port,
63 account.imap_security,
64 ) as M:
65 try:
66 mailbox_login(M, account)
67 return Response({"success": True})
68 except MailError:
69 logger.error(
70 f"Mail account {account} test failed",
71 )
72 return HttpResponseBadRequest("Unable to connect to server")
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/paperless_mail/admin.py b/src/paperless_mail/admin.py
--- a/src/paperless_mail/admin.py
+++ b/src/paperless_mail/admin.py
@@ -1,6 +1,7 @@
from django import forms
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
+from guardian.admin import GuardedModelAdmin
from paperless_mail.models import MailAccount
from paperless_mail.models import MailRule
@@ -31,7 +32,7 @@
]
-class MailAccountAdmin(admin.ModelAdmin):
+class MailAccountAdmin(GuardedModelAdmin):
list_display = ("name", "imap_server", "username")
fieldsets = [
@@ -45,7 +46,7 @@
form = MailAccountAdminForm
-class MailRuleAdmin(admin.ModelAdmin):
+class MailRuleAdmin(GuardedModelAdmin):
radio_fields = {
"attachment_type": admin.VERTICAL,
"action": admin.VERTICAL,
diff --git a/src/paperless_mail/serialisers.py b/src/paperless_mail/serialisers.py
--- a/src/paperless_mail/serialisers.py
+++ b/src/paperless_mail/serialisers.py
@@ -25,7 +25,6 @@
class Meta:
model = MailAccount
- depth = 1
fields = [
"id",
"name",
@@ -36,6 +35,10 @@
"password",
"character_set",
"is_token",
+ "owner",
+ "user_can_change",
+ "permissions",
+ "set_permissions",
]
def update(self, instance, validated_data):
@@ -67,7 +70,6 @@
class Meta:
model = MailRule
- depth = 1
fields = [
"id",
"name",
@@ -89,6 +91,10 @@
"order",
"attachment_type",
"consumption_scope",
+ "owner",
+ "user_can_change",
+ "permissions",
+ "set_permissions",
]
def update(self, instance, validated_data):
diff --git a/src/paperless_mail/views.py b/src/paperless_mail/views.py
--- a/src/paperless_mail/views.py
+++ b/src/paperless_mail/views.py
@@ -7,6 +7,8 @@
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
+from documents.filters import ObjectOwnedOrGrantedPermissionsFilter
+from documents.permissions import PaperlessObjectPermissions
from documents.views import PassUserMixin
from paperless.views import StandardPagination
from paperless_mail.mail import MailError
@@ -24,7 +26,8 @@
queryset = MailAccount.objects.all().order_by("pk")
serializer_class = MailAccountSerializer
pagination_class = StandardPagination
- permission_classes = (IsAuthenticated,)
+ permission_classes = (IsAuthenticated, PaperlessObjectPermissions)
+ filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)
class MailRuleViewSet(ModelViewSet, PassUserMixin):
@@ -33,7 +36,8 @@
queryset = MailRule.objects.all().order_by("order")
serializer_class = MailRuleSerializer
pagination_class = StandardPagination
- permission_classes = (IsAuthenticated,)
+ permission_classes = (IsAuthenticated, PaperlessObjectPermissions)
+ filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)
class MailAccountTestView(GenericAPIView):
| {"golden_diff": "diff --git a/src/paperless_mail/admin.py b/src/paperless_mail/admin.py\n--- a/src/paperless_mail/admin.py\n+++ b/src/paperless_mail/admin.py\n@@ -1,6 +1,7 @@\n from django import forms\n from django.contrib import admin\n from django.utils.translation import gettext_lazy as _\n+from guardian.admin import GuardedModelAdmin\n \n from paperless_mail.models import MailAccount\n from paperless_mail.models import MailRule\n@@ -31,7 +32,7 @@\n ]\n \n \n-class MailAccountAdmin(admin.ModelAdmin):\n+class MailAccountAdmin(GuardedModelAdmin):\n list_display = (\"name\", \"imap_server\", \"username\")\n \n fieldsets = [\n@@ -45,7 +46,7 @@\n form = MailAccountAdminForm\n \n \n-class MailRuleAdmin(admin.ModelAdmin):\n+class MailRuleAdmin(GuardedModelAdmin):\n radio_fields = {\n \"attachment_type\": admin.VERTICAL,\n \"action\": admin.VERTICAL,\ndiff --git a/src/paperless_mail/serialisers.py b/src/paperless_mail/serialisers.py\n--- a/src/paperless_mail/serialisers.py\n+++ b/src/paperless_mail/serialisers.py\n@@ -25,7 +25,6 @@\n \n class Meta:\n model = MailAccount\n- depth = 1\n fields = [\n \"id\",\n \"name\",\n@@ -36,6 +35,10 @@\n \"password\",\n \"character_set\",\n \"is_token\",\n+ \"owner\",\n+ \"user_can_change\",\n+ \"permissions\",\n+ \"set_permissions\",\n ]\n \n def update(self, instance, validated_data):\n@@ -67,7 +70,6 @@\n \n class Meta:\n model = MailRule\n- depth = 1\n fields = [\n \"id\",\n \"name\",\n@@ -89,6 +91,10 @@\n \"order\",\n \"attachment_type\",\n \"consumption_scope\",\n+ \"owner\",\n+ \"user_can_change\",\n+ \"permissions\",\n+ \"set_permissions\",\n ]\n \n def update(self, instance, validated_data):\ndiff --git a/src/paperless_mail/views.py b/src/paperless_mail/views.py\n--- a/src/paperless_mail/views.py\n+++ b/src/paperless_mail/views.py\n@@ -7,6 +7,8 @@\n from rest_framework.response import Response\n from rest_framework.viewsets import ModelViewSet\n \n+from documents.filters import ObjectOwnedOrGrantedPermissionsFilter\n+from documents.permissions import PaperlessObjectPermissions\n from documents.views import PassUserMixin\n from paperless.views import StandardPagination\n from paperless_mail.mail import MailError\n@@ -24,7 +26,8 @@\n queryset = MailAccount.objects.all().order_by(\"pk\")\n serializer_class = MailAccountSerializer\n pagination_class = StandardPagination\n- permission_classes = (IsAuthenticated,)\n+ permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n+ filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)\n \n \n class MailRuleViewSet(ModelViewSet, PassUserMixin):\n@@ -33,7 +36,8 @@\n queryset = MailRule.objects.all().order_by(\"order\")\n serializer_class = MailRuleSerializer\n pagination_class = StandardPagination\n- permission_classes = (IsAuthenticated,)\n+ permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n+ filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)\n \n \n class MailAccountTestView(GenericAPIView):\n", "issue": "[BUG] Permissions to delete Email-Rules/Accounts: disabled for a user, but user can still delete them\n### Description\r\n\r\nFirst the issue description: \r\nA user without \"delete\"-permissions for email-rules can still delete them also when the rule was created by another user.\r\n\r\nSecond a question:\r\nIs it possible for a user to fully manage his own email-rules/accounts without seeing the rules/accounts from other users?\r\n\r\n### Steps to reproduce\r\n\r\n1. Create a user (e.g. test-user) with mail-rule/mail-account add/edit/read permissions (no delete permission).\r\n2. Create a mail-rule with other user (e.g. admin user)\r\n3. Login with the \"test-user\"\r\n4. Go to settings -> email \r\n5. Delete the mail-rule created by admin-user\r\n\r\nAlso the mail-account can be deleted without delete-permissions.\r\n\r\n### Webserver logs\r\n\r\n```bash\r\nNo logs are visible\r\n```\r\n\r\n\r\n### Browser logs\r\n\r\n_No response_\r\n\r\n### Paperless-ngx version\r\n\r\n1.16.5\r\n\r\n### Host OS\r\n\r\nDebian x86_64\r\n\r\n### Installation method\r\n\r\nDocker - official image\r\n\r\n### Browser\r\n\r\n_No response_\r\n\r\n### Configuration changes\r\n\r\n_No response_\r\n\r\n### Other\r\n\r\n_No response_\n", "before_files": [{"content": "from django import forms\nfrom django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.models import ProcessedMail\n\n\nclass MailAccountAdminForm(forms.ModelForm):\n\n \"\"\"Metadata classes used by Django admin to display the form.\"\"\"\n\n class Meta:\n\n \"\"\"Metadata class used by Django admin to display the form.\"\"\"\n\n model = MailAccount\n widgets = {\n \"password\": forms.PasswordInput(),\n }\n fields = [\n \"name\",\n \"imap_server\",\n \"username\",\n \"imap_security\",\n \"username\",\n \"password\",\n \"is_token\",\n \"character_set\",\n ]\n\n\nclass MailAccountAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"imap_server\", \"username\")\n\n fieldsets = [\n (None, {\"fields\": [\"name\", \"imap_server\", \"imap_port\"]}),\n (\n _(\"Authentication\"),\n {\"fields\": [\"imap_security\", \"username\", \"password\", \"is_token\"]},\n ),\n (_(\"Advanced settings\"), {\"fields\": [\"character_set\"]}),\n ]\n form = MailAccountAdminForm\n\n\nclass MailRuleAdmin(admin.ModelAdmin):\n radio_fields = {\n \"attachment_type\": admin.VERTICAL,\n \"action\": admin.VERTICAL,\n \"assign_title_from\": admin.VERTICAL,\n \"assign_correspondent_from\": admin.VERTICAL,\n }\n\n fieldsets = (\n (None, {\"fields\": (\"name\", \"order\", \"account\", \"folder\")}),\n (\n _(\"Filter\"),\n {\n \"description\": _(\n \"Paperless will only process mails that match ALL of the \"\n \"filters given below.\",\n ),\n \"fields\": (\n \"filter_from\",\n \"filter_to\",\n \"filter_subject\",\n \"filter_body\",\n \"filter_attachment_filename\",\n \"maximum_age\",\n \"consumption_scope\",\n \"attachment_type\",\n ),\n },\n ),\n (\n _(\"Actions\"),\n {\n \"description\": _(\n \"The action applied to the mail. This action is only \"\n \"performed when the mail body or attachments were \"\n \"consumed from the mail.\",\n ),\n \"fields\": (\"action\", \"action_parameter\"),\n },\n ),\n (\n _(\"Metadata\"),\n {\n \"description\": _(\n \"Assign metadata to documents consumed from this rule \"\n \"automatically. If you do not assign tags, types or \"\n \"correspondents here, paperless will still process all \"\n \"matching rules that you have defined.\",\n ),\n \"fields\": (\n \"assign_title_from\",\n \"assign_tags\",\n \"assign_document_type\",\n \"assign_correspondent_from\",\n \"assign_correspondent\",\n ),\n },\n ),\n )\n\n list_filter = (\"account\",)\n\n list_display = (\"order\", \"name\", \"account\", \"folder\", \"action\")\n\n list_editable = (\"order\",)\n\n list_display_links = (\"name\",)\n\n sortable_by = []\n\n ordering = [\"order\"]\n\n\nclass ProcessedMailAdmin(admin.ModelAdmin):\n class Meta:\n model = ProcessedMail\n fields = \"__all__\"\n\n list_display = (\"subject\", \"status\", \"processed\", \"received\", \"rule\")\n\n ordering = [\"-processed\"]\n\n readonly_fields = [\n \"owner\",\n \"rule\",\n \"folder\",\n \"uid\",\n \"subject\",\n \"received\",\n \"processed\",\n \"status\",\n \"error\",\n ]\n\n list_display_links = [\"subject\"]\n\n list_filter = (\"status\", \"rule\")\n\n\nadmin.site.register(MailAccount, MailAccountAdmin)\nadmin.site.register(MailRule, MailRuleAdmin)\nadmin.site.register(ProcessedMail, ProcessedMailAdmin)\n", "path": "src/paperless_mail/admin.py"}, {"content": "from rest_framework import serializers\n\nfrom documents.serialisers import CorrespondentField\nfrom documents.serialisers import DocumentTypeField\nfrom documents.serialisers import OwnedObjectSerializer\nfrom documents.serialisers import TagsField\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\n\n\nclass ObfuscatedPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"*\" * len(value)\n\n def to_internal_value(self, data):\n return data\n\n\nclass MailAccountSerializer(OwnedObjectSerializer):\n password = ObfuscatedPasswordField()\n\n class Meta:\n model = MailAccount\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"imap_server\",\n \"imap_port\",\n \"imap_security\",\n \"username\",\n \"password\",\n \"character_set\",\n \"is_token\",\n ]\n\n def update(self, instance, validated_data):\n if (\n \"password\" in validated_data\n and len(validated_data.get(\"password\").replace(\"*\", \"\")) == 0\n ):\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n\nclass AccountField(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n return MailAccount.objects.all().order_by(\"-id\")\n\n\nclass MailRuleSerializer(OwnedObjectSerializer):\n account = AccountField(required=True)\n action_parameter = serializers.CharField(\n allow_null=True,\n required=False,\n default=\"\",\n )\n assign_correspondent = CorrespondentField(allow_null=True, required=False)\n assign_tags = TagsField(many=True, allow_null=True, required=False)\n assign_document_type = DocumentTypeField(allow_null=True, required=False)\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = MailRule\n depth = 1\n fields = [\n \"id\",\n \"name\",\n \"account\",\n \"folder\",\n \"filter_from\",\n \"filter_to\",\n \"filter_subject\",\n \"filter_body\",\n \"filter_attachment_filename\",\n \"maximum_age\",\n \"action\",\n \"action_parameter\",\n \"assign_title_from\",\n \"assign_tags\",\n \"assign_correspondent_from\",\n \"assign_correspondent\",\n \"assign_document_type\",\n \"order\",\n \"attachment_type\",\n \"consumption_scope\",\n ]\n\n def update(self, instance, validated_data):\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n if \"assign_tags\" in validated_data:\n assign_tags = validated_data.pop(\"assign_tags\")\n mail_rule = super().create(validated_data)\n if assign_tags:\n mail_rule.assign_tags.set(assign_tags)\n return mail_rule\n\n def validate(self, attrs):\n if (\n attrs[\"action\"] == MailRule.MailAction.TAG\n or attrs[\"action\"] == MailRule.MailAction.MOVE\n ) and attrs[\"action_parameter\"] is None:\n raise serializers.ValidationError(\"An action parameter is required.\")\n\n return attrs\n", "path": "src/paperless_mail/serialisers.py"}, {"content": "import datetime\nimport logging\n\nfrom django.http import HttpResponseBadRequest\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom documents.views import PassUserMixin\nfrom paperless.views import StandardPagination\nfrom paperless_mail.mail import MailError\nfrom paperless_mail.mail import get_mailbox\nfrom paperless_mail.mail import mailbox_login\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.serialisers import MailAccountSerializer\nfrom paperless_mail.serialisers import MailRuleSerializer\n\n\nclass MailAccountViewSet(ModelViewSet, PassUserMixin):\n model = MailAccount\n\n queryset = MailAccount.objects.all().order_by(\"pk\")\n serializer_class = MailAccountSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated,)\n\n\nclass MailRuleViewSet(ModelViewSet, PassUserMixin):\n model = MailRule\n\n queryset = MailRule.objects.all().order_by(\"order\")\n serializer_class = MailRuleSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated,)\n\n\nclass MailAccountTestView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = MailAccountSerializer\n\n def post(self, request, *args, **kwargs):\n logger = logging.getLogger(\"paperless_mail\")\n request.data[\"name\"] = datetime.datetime.now().isoformat()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n # account exists, use the password from there instead of ***\n if (\n len(serializer.validated_data.get(\"password\").replace(\"*\", \"\")) == 0\n and request.data[\"id\"] is not None\n ):\n serializer.validated_data[\"password\"] = MailAccount.objects.get(\n pk=request.data[\"id\"],\n ).password\n\n account = MailAccount(**serializer.validated_data)\n\n with get_mailbox(\n account.imap_server,\n account.imap_port,\n account.imap_security,\n ) as M:\n try:\n mailbox_login(M, account)\n return Response({\"success\": True})\n except MailError:\n logger.error(\n f\"Mail account {account} test failed\",\n )\n return HttpResponseBadRequest(\"Unable to connect to server\")\n", "path": "src/paperless_mail/views.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\nfrom guardian.admin import GuardedModelAdmin\n\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.models import ProcessedMail\n\n\nclass MailAccountAdminForm(forms.ModelForm):\n\n \"\"\"Metadata classes used by Django admin to display the form.\"\"\"\n\n class Meta:\n\n \"\"\"Metadata class used by Django admin to display the form.\"\"\"\n\n model = MailAccount\n widgets = {\n \"password\": forms.PasswordInput(),\n }\n fields = [\n \"name\",\n \"imap_server\",\n \"username\",\n \"imap_security\",\n \"username\",\n \"password\",\n \"is_token\",\n \"character_set\",\n ]\n\n\nclass MailAccountAdmin(GuardedModelAdmin):\n list_display = (\"name\", \"imap_server\", \"username\")\n\n fieldsets = [\n (None, {\"fields\": [\"name\", \"imap_server\", \"imap_port\"]}),\n (\n _(\"Authentication\"),\n {\"fields\": [\"imap_security\", \"username\", \"password\", \"is_token\"]},\n ),\n (_(\"Advanced settings\"), {\"fields\": [\"character_set\"]}),\n ]\n form = MailAccountAdminForm\n\n\nclass MailRuleAdmin(GuardedModelAdmin):\n radio_fields = {\n \"attachment_type\": admin.VERTICAL,\n \"action\": admin.VERTICAL,\n \"assign_title_from\": admin.VERTICAL,\n \"assign_correspondent_from\": admin.VERTICAL,\n }\n\n fieldsets = (\n (None, {\"fields\": (\"name\", \"order\", \"account\", \"folder\")}),\n (\n _(\"Filter\"),\n {\n \"description\": _(\n \"Paperless will only process mails that match ALL of the \"\n \"filters given below.\",\n ),\n \"fields\": (\n \"filter_from\",\n \"filter_to\",\n \"filter_subject\",\n \"filter_body\",\n \"filter_attachment_filename\",\n \"maximum_age\",\n \"consumption_scope\",\n \"attachment_type\",\n ),\n },\n ),\n (\n _(\"Actions\"),\n {\n \"description\": _(\n \"The action applied to the mail. This action is only \"\n \"performed when the mail body or attachments were \"\n \"consumed from the mail.\",\n ),\n \"fields\": (\"action\", \"action_parameter\"),\n },\n ),\n (\n _(\"Metadata\"),\n {\n \"description\": _(\n \"Assign metadata to documents consumed from this rule \"\n \"automatically. If you do not assign tags, types or \"\n \"correspondents here, paperless will still process all \"\n \"matching rules that you have defined.\",\n ),\n \"fields\": (\n \"assign_title_from\",\n \"assign_tags\",\n \"assign_document_type\",\n \"assign_correspondent_from\",\n \"assign_correspondent\",\n ),\n },\n ),\n )\n\n list_filter = (\"account\",)\n\n list_display = (\"order\", \"name\", \"account\", \"folder\", \"action\")\n\n list_editable = (\"order\",)\n\n list_display_links = (\"name\",)\n\n sortable_by = []\n\n ordering = [\"order\"]\n\n\nclass ProcessedMailAdmin(admin.ModelAdmin):\n class Meta:\n model = ProcessedMail\n fields = \"__all__\"\n\n list_display = (\"subject\", \"status\", \"processed\", \"received\", \"rule\")\n\n ordering = [\"-processed\"]\n\n readonly_fields = [\n \"owner\",\n \"rule\",\n \"folder\",\n \"uid\",\n \"subject\",\n \"received\",\n \"processed\",\n \"status\",\n \"error\",\n ]\n\n list_display_links = [\"subject\"]\n\n list_filter = (\"status\", \"rule\")\n\n\nadmin.site.register(MailAccount, MailAccountAdmin)\nadmin.site.register(MailRule, MailRuleAdmin)\nadmin.site.register(ProcessedMail, ProcessedMailAdmin)\n", "path": "src/paperless_mail/admin.py"}, {"content": "from rest_framework import serializers\n\nfrom documents.serialisers import CorrespondentField\nfrom documents.serialisers import DocumentTypeField\nfrom documents.serialisers import OwnedObjectSerializer\nfrom documents.serialisers import TagsField\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\n\n\nclass ObfuscatedPasswordField(serializers.Field):\n \"\"\"\n Sends *** string instead of password in the clear\n \"\"\"\n\n def to_representation(self, value):\n return \"*\" * len(value)\n\n def to_internal_value(self, data):\n return data\n\n\nclass MailAccountSerializer(OwnedObjectSerializer):\n password = ObfuscatedPasswordField()\n\n class Meta:\n model = MailAccount\n fields = [\n \"id\",\n \"name\",\n \"imap_server\",\n \"imap_port\",\n \"imap_security\",\n \"username\",\n \"password\",\n \"character_set\",\n \"is_token\",\n \"owner\",\n \"user_can_change\",\n \"permissions\",\n \"set_permissions\",\n ]\n\n def update(self, instance, validated_data):\n if (\n \"password\" in validated_data\n and len(validated_data.get(\"password\").replace(\"*\", \"\")) == 0\n ):\n validated_data.pop(\"password\")\n super().update(instance, validated_data)\n return instance\n\n\nclass AccountField(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n return MailAccount.objects.all().order_by(\"-id\")\n\n\nclass MailRuleSerializer(OwnedObjectSerializer):\n account = AccountField(required=True)\n action_parameter = serializers.CharField(\n allow_null=True,\n required=False,\n default=\"\",\n )\n assign_correspondent = CorrespondentField(allow_null=True, required=False)\n assign_tags = TagsField(many=True, allow_null=True, required=False)\n assign_document_type = DocumentTypeField(allow_null=True, required=False)\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = MailRule\n fields = [\n \"id\",\n \"name\",\n \"account\",\n \"folder\",\n \"filter_from\",\n \"filter_to\",\n \"filter_subject\",\n \"filter_body\",\n \"filter_attachment_filename\",\n \"maximum_age\",\n \"action\",\n \"action_parameter\",\n \"assign_title_from\",\n \"assign_tags\",\n \"assign_correspondent_from\",\n \"assign_correspondent\",\n \"assign_document_type\",\n \"order\",\n \"attachment_type\",\n \"consumption_scope\",\n \"owner\",\n \"user_can_change\",\n \"permissions\",\n \"set_permissions\",\n ]\n\n def update(self, instance, validated_data):\n super().update(instance, validated_data)\n return instance\n\n def create(self, validated_data):\n if \"assign_tags\" in validated_data:\n assign_tags = validated_data.pop(\"assign_tags\")\n mail_rule = super().create(validated_data)\n if assign_tags:\n mail_rule.assign_tags.set(assign_tags)\n return mail_rule\n\n def validate(self, attrs):\n if (\n attrs[\"action\"] == MailRule.MailAction.TAG\n or attrs[\"action\"] == MailRule.MailAction.MOVE\n ) and attrs[\"action_parameter\"] is None:\n raise serializers.ValidationError(\"An action parameter is required.\")\n\n return attrs\n", "path": "src/paperless_mail/serialisers.py"}, {"content": "import datetime\nimport logging\n\nfrom django.http import HttpResponseBadRequest\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom documents.filters import ObjectOwnedOrGrantedPermissionsFilter\nfrom documents.permissions import PaperlessObjectPermissions\nfrom documents.views import PassUserMixin\nfrom paperless.views import StandardPagination\nfrom paperless_mail.mail import MailError\nfrom paperless_mail.mail import get_mailbox\nfrom paperless_mail.mail import mailbox_login\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\nfrom paperless_mail.serialisers import MailAccountSerializer\nfrom paperless_mail.serialisers import MailRuleSerializer\n\n\nclass MailAccountViewSet(ModelViewSet, PassUserMixin):\n model = MailAccount\n\n queryset = MailAccount.objects.all().order_by(\"pk\")\n serializer_class = MailAccountSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)\n\n\nclass MailRuleViewSet(ModelViewSet, PassUserMixin):\n model = MailRule\n\n queryset = MailRule.objects.all().order_by(\"order\")\n serializer_class = MailRuleSerializer\n pagination_class = StandardPagination\n permission_classes = (IsAuthenticated, PaperlessObjectPermissions)\n filter_backends = (ObjectOwnedOrGrantedPermissionsFilter,)\n\n\nclass MailAccountTestView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = MailAccountSerializer\n\n def post(self, request, *args, **kwargs):\n logger = logging.getLogger(\"paperless_mail\")\n request.data[\"name\"] = datetime.datetime.now().isoformat()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n # account exists, use the password from there instead of ***\n if (\n len(serializer.validated_data.get(\"password\").replace(\"*\", \"\")) == 0\n and request.data[\"id\"] is not None\n ):\n serializer.validated_data[\"password\"] = MailAccount.objects.get(\n pk=request.data[\"id\"],\n ).password\n\n account = MailAccount(**serializer.validated_data)\n\n with get_mailbox(\n account.imap_server,\n account.imap_port,\n account.imap_security,\n ) as M:\n try:\n mailbox_login(M, account)\n return Response({\"success\": True})\n except MailError:\n logger.error(\n f\"Mail account {account} test failed\",\n )\n return HttpResponseBadRequest(\"Unable to connect to server\")\n", "path": "src/paperless_mail/views.py"}]} | 3,232 | 758 |
gh_patches_debug_23806 | rasdani/github-patches | git_diff | ipython__ipython-9861 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tools/github_stats.py still fails on two factor auth with app
See #9179. I think #9186 fixed it only for SMS codes, not for OTPs generated from a smartphone app.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/gh_api.py`
Content:
```
1 """Functions for Github API requests."""
2 from __future__ import print_function
3
4 try:
5 input = raw_input
6 except NameError:
7 pass
8
9 import os
10 import re
11 import sys
12
13 import requests
14 import getpass
15 import json
16
17 try:
18 import requests_cache
19 except ImportError:
20 print("cache not available, install `requests_cache` for caching.", file=sys.stderr)
21 else:
22 requests_cache.install_cache("gh_api", expire_after=3600)
23
24 # Keyring stores passwords by a 'username', but we're not storing a username and
25 # password
26 fake_username = 'ipython_tools'
27
28 class Obj(dict):
29 """Dictionary with attribute access to names."""
30 def __getattr__(self, name):
31 try:
32 return self[name]
33 except KeyError:
34 raise AttributeError(name)
35
36 def __setattr__(self, name, val):
37 self[name] = val
38
39 token = None
40 def get_auth_token():
41 global token
42
43 if token is not None:
44 return token
45
46 import keyring
47 token = keyring.get_password('github', fake_username)
48 if token is not None:
49 return token
50
51 print("Please enter your github username and password. These are not "
52 "stored, only used to get an oAuth token. You can revoke this at "
53 "any time on Github.\n"
54 "Username: ", file=sys.stderr, end='')
55 user = input('')
56 pw = getpass.getpass("Password: ", stream=sys.stderr)
57
58 auth_request = {
59 "scopes": [
60 "public_repo",
61 "gist"
62 ],
63 "note": "IPython tools",
64 "note_url": "https://github.com/ipython/ipython/tree/master/tools",
65 }
66 response = requests.post('https://api.github.com/authorizations',
67 auth=(user, pw), data=json.dumps(auth_request))
68 if response.status_code == 401 and \
69 response.headers.get('X-GitHub-OTP') == 'required; sms':
70 print("Your login API resquest a SMS one time password", file=sys.stderr)
71 sms_pw = getpass.getpass("SMS password: ", stream=sys.stderr)
72 response = requests.post('https://api.github.com/authorizations',
73 auth=(user, pw),
74 data=json.dumps(auth_request),
75 headers={'X-GitHub-OTP':sms_pw})
76 response.raise_for_status()
77 token = json.loads(response.text)['token']
78 keyring.set_password('github', fake_username, token)
79 return token
80
81 def make_auth_header():
82 return {'Authorization': 'token ' + get_auth_token()}
83
84 def post_issue_comment(project, num, body):
85 url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
86 payload = json.dumps({'body': body})
87 requests.post(url, data=payload, headers=make_auth_header())
88
89 def post_gist(content, description='', filename='file', auth=False):
90 """Post some text to a Gist, and return the URL."""
91 post_data = json.dumps({
92 "description": description,
93 "public": True,
94 "files": {
95 filename: {
96 "content": content
97 }
98 }
99 }).encode('utf-8')
100
101 headers = make_auth_header() if auth else {}
102 response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
103 response.raise_for_status()
104 response_data = json.loads(response.text)
105 return response_data['html_url']
106
107 def get_pull_request(project, num, auth=False):
108 """get pull request info by number
109 """
110 url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
111 if auth:
112 header = make_auth_header()
113 else:
114 header = None
115 print("fetching %s" % url, file=sys.stderr)
116 response = requests.get(url, headers=header)
117 response.raise_for_status()
118 return json.loads(response.text, object_hook=Obj)
119
120 def get_pull_request_files(project, num, auth=False):
121 """get list of files in a pull request"""
122 url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
123 if auth:
124 header = make_auth_header()
125 else:
126 header = None
127 return get_paged_request(url, headers=header)
128
129 element_pat = re.compile(r'<(.+?)>')
130 rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
131
132 def get_paged_request(url, headers=None, **params):
133 """get a full list, handling APIv3's paging"""
134 results = []
135 params.setdefault("per_page", 100)
136 while True:
137 if '?' in url:
138 params = None
139 print("fetching %s" % url, file=sys.stderr)
140 else:
141 print("fetching %s with %s" % (url, params), file=sys.stderr)
142 response = requests.get(url, headers=headers, params=params)
143 response.raise_for_status()
144 results.extend(response.json())
145 if 'next' in response.links:
146 url = response.links['next']['url']
147 else:
148 break
149 return results
150
151 def get_pulls_list(project, auth=False, **params):
152 """get pull request list"""
153 params.setdefault("state", "closed")
154 url = "https://api.github.com/repos/{project}/pulls".format(project=project)
155 if auth:
156 headers = make_auth_header()
157 else:
158 headers = None
159 pages = get_paged_request(url, headers=headers, **params)
160 return pages
161
162 def get_issues_list(project, auth=False, **params):
163 """get issues list"""
164 params.setdefault("state", "closed")
165 url = "https://api.github.com/repos/{project}/issues".format(project=project)
166 if auth:
167 headers = make_auth_header()
168 else:
169 headers = None
170 pages = get_paged_request(url, headers=headers, **params)
171 return pages
172
173 def get_milestones(project, auth=False, **params):
174 params.setdefault('state', 'all')
175 url = "https://api.github.com/repos/{project}/milestones".format(project=project)
176 if auth:
177 headers = make_auth_header()
178 else:
179 headers = None
180 milestones = get_paged_request(url, headers=headers, **params)
181 return milestones
182
183 def get_milestone_id(project, milestone, auth=False, **params):
184 milestones = get_milestones(project, auth=auth, **params)
185 for mstone in milestones:
186 if mstone['title'] == milestone:
187 return mstone['number']
188 else:
189 raise ValueError("milestone %s not found" % milestone)
190
191 def is_pull_request(issue):
192 """Return True if the given issue is a pull request."""
193 return bool(issue.get('pull_request', {}).get('html_url', None))
194
195 def get_authors(pr):
196 print("getting authors for #%i" % pr['number'], file=sys.stderr)
197 h = make_auth_header()
198 r = requests.get(pr['commits_url'], headers=h)
199 r.raise_for_status()
200 commits = r.json()
201 authors = []
202 for commit in commits:
203 author = commit['commit']['author']
204 authors.append("%s <%s>" % (author['name'], author['email']))
205 return authors
206
207 # encode_multipart_formdata is from urllib3.filepost
208 # The only change is to iter_fields, to enforce S3's required key ordering
209
210 def iter_fields(fields):
211 fields = fields.copy()
212 for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
213 'Policy', 'Signature', 'Content-Type', 'file'):
214 yield (key, fields.pop(key))
215 for (k,v) in fields.items():
216 yield k,v
217
218 def encode_multipart_formdata(fields, boundary=None):
219 """
220 Encode a dictionary of ``fields`` using the multipart/form-data mime format.
221
222 :param fields:
223 Dictionary of fields or list of (key, value) field tuples. The key is
224 treated as the field name, and the value as the body of the form-data
225 bytes. If the value is a tuple of two elements, then the first element
226 is treated as the filename of the form-data section.
227
228 Field names and filenames must be unicode.
229
230 :param boundary:
231 If not specified, then a random boundary will be generated using
232 :func:`mimetools.choose_boundary`.
233 """
234 # copy requests imports in here:
235 from io import BytesIO
236 from requests.packages.urllib3.filepost import (
237 choose_boundary, six, writer, b, get_content_type
238 )
239 body = BytesIO()
240 if boundary is None:
241 boundary = choose_boundary()
242
243 for fieldname, value in iter_fields(fields):
244 body.write(b('--%s\r\n' % (boundary)))
245
246 if isinstance(value, tuple):
247 filename, data = value
248 writer(body).write('Content-Disposition: form-data; name="%s"; '
249 'filename="%s"\r\n' % (fieldname, filename))
250 body.write(b('Content-Type: %s\r\n\r\n' %
251 (get_content_type(filename))))
252 else:
253 data = value
254 writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
255 % (fieldname))
256 body.write(b'Content-Type: text/plain\r\n\r\n')
257
258 if isinstance(data, int):
259 data = str(data) # Backwards compatibility
260 if isinstance(data, six.text_type):
261 writer(body).write(data)
262 else:
263 body.write(data)
264
265 body.write(b'\r\n')
266
267 body.write(b('--%s--\r\n' % (boundary)))
268
269 content_type = b('multipart/form-data; boundary=%s' % boundary)
270
271 return body.getvalue(), content_type
272
273
274 def post_download(project, filename, name=None, description=""):
275 """Upload a file to the GitHub downloads area"""
276 if name is None:
277 name = os.path.basename(filename)
278 with open(filename, 'rb') as f:
279 filedata = f.read()
280
281 url = "https://api.github.com/repos/{project}/downloads".format(project=project)
282
283 payload = json.dumps(dict(name=name, size=len(filedata),
284 description=description))
285 response = requests.post(url, data=payload, headers=make_auth_header())
286 response.raise_for_status()
287 reply = json.loads(response.content)
288 s3_url = reply['s3_url']
289
290 fields = dict(
291 key=reply['path'],
292 acl=reply['acl'],
293 success_action_status=201,
294 Filename=reply['name'],
295 AWSAccessKeyId=reply['accesskeyid'],
296 Policy=reply['policy'],
297 Signature=reply['signature'],
298 file=(reply['name'], filedata),
299 )
300 fields['Content-Type'] = reply['mime_type']
301 data, content_type = encode_multipart_formdata(fields)
302 s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
303 return s3r
304
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/gh_api.py b/tools/gh_api.py
--- a/tools/gh_api.py
+++ b/tools/gh_api.py
@@ -66,13 +66,13 @@
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
if response.status_code == 401 and \
- response.headers.get('X-GitHub-OTP') == 'required; sms':
- print("Your login API resquest a SMS one time password", file=sys.stderr)
- sms_pw = getpass.getpass("SMS password: ", stream=sys.stderr)
+ 'required;' in response.headers.get('X-GitHub-OTP', ''):
+ print("Your login API requested a one time password", file=sys.stderr)
+ otp = getpass.getpass("One Time Password: ", stream=sys.stderr)
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw),
data=json.dumps(auth_request),
- headers={'X-GitHub-OTP':sms_pw})
+ headers={'X-GitHub-OTP':otp})
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
| {"golden_diff": "diff --git a/tools/gh_api.py b/tools/gh_api.py\n--- a/tools/gh_api.py\n+++ b/tools/gh_api.py\n@@ -66,13 +66,13 @@\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), data=json.dumps(auth_request))\n if response.status_code == 401 and \\\n- response.headers.get('X-GitHub-OTP') == 'required; sms':\n- print(\"Your login API resquest a SMS one time password\", file=sys.stderr)\n- sms_pw = getpass.getpass(\"SMS password: \", stream=sys.stderr)\n+ 'required;' in response.headers.get('X-GitHub-OTP', ''):\n+ print(\"Your login API requested a one time password\", file=sys.stderr)\n+ otp = getpass.getpass(\"One Time Password: \", stream=sys.stderr)\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), \n data=json.dumps(auth_request),\n- headers={'X-GitHub-OTP':sms_pw})\n+ headers={'X-GitHub-OTP':otp})\n response.raise_for_status()\n token = json.loads(response.text)['token']\n keyring.set_password('github', fake_username, token)\n", "issue": "tools/github_stats.py still fails on two factor auth with app\nSee #9179. I think #9186 fixed it only for SMS codes, not for OTPs generated from a smartphone app.\n\n", "before_files": [{"content": "\"\"\"Functions for Github API requests.\"\"\"\nfrom __future__ import print_function\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\nimport os\nimport re\nimport sys\n\nimport requests\nimport getpass\nimport json\n\ntry:\n import requests_cache\nexcept ImportError:\n print(\"cache not available, install `requests_cache` for caching.\", file=sys.stderr)\nelse:\n requests_cache.install_cache(\"gh_api\", expire_after=3600)\n\n# Keyring stores passwords by a 'username', but we're not storing a username and\n# password\nfake_username = 'ipython_tools'\n\nclass Obj(dict):\n \"\"\"Dictionary with attribute access to names.\"\"\"\n def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, val):\n self[name] = val\n\ntoken = None\ndef get_auth_token():\n global token\n\n if token is not None:\n return token\n\n import keyring\n token = keyring.get_password('github', fake_username)\n if token is not None:\n return token\n\n print(\"Please enter your github username and password. These are not \"\n \"stored, only used to get an oAuth token. You can revoke this at \"\n \"any time on Github.\\n\"\n \"Username: \", file=sys.stderr, end='')\n user = input('')\n pw = getpass.getpass(\"Password: \", stream=sys.stderr)\n\n auth_request = {\n \"scopes\": [\n \"public_repo\",\n \"gist\"\n ],\n \"note\": \"IPython tools\",\n \"note_url\": \"https://github.com/ipython/ipython/tree/master/tools\",\n }\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), data=json.dumps(auth_request))\n if response.status_code == 401 and \\\n response.headers.get('X-GitHub-OTP') == 'required; sms':\n print(\"Your login API resquest a SMS one time password\", file=sys.stderr)\n sms_pw = getpass.getpass(\"SMS password: \", stream=sys.stderr)\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), \n data=json.dumps(auth_request),\n headers={'X-GitHub-OTP':sms_pw})\n response.raise_for_status()\n token = json.loads(response.text)['token']\n keyring.set_password('github', fake_username, token)\n return token\n\ndef make_auth_header():\n return {'Authorization': 'token ' + get_auth_token()}\n\ndef post_issue_comment(project, num, body):\n url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)\n payload = json.dumps({'body': body})\n requests.post(url, data=payload, headers=make_auth_header())\n\ndef post_gist(content, description='', filename='file', auth=False):\n \"\"\"Post some text to a Gist, and return the URL.\"\"\"\n post_data = json.dumps({\n \"description\": description,\n \"public\": True,\n \"files\": {\n filename: {\n \"content\": content\n }\n }\n }).encode('utf-8')\n\n headers = make_auth_header() if auth else {}\n response = requests.post(\"https://api.github.com/gists\", data=post_data, headers=headers)\n response.raise_for_status()\n response_data = json.loads(response.text)\n return response_data['html_url']\n\ndef get_pull_request(project, num, auth=False):\n \"\"\"get pull request info by number\n \"\"\"\n url = \"https://api.github.com/repos/{project}/pulls/{num}\".format(project=project, num=num)\n if auth:\n header = make_auth_header()\n else:\n header = None\n print(\"fetching %s\" % url, file=sys.stderr)\n response = requests.get(url, headers=header)\n response.raise_for_status()\n return json.loads(response.text, object_hook=Obj)\n\ndef get_pull_request_files(project, num, auth=False):\n \"\"\"get list of files in a pull request\"\"\"\n url = \"https://api.github.com/repos/{project}/pulls/{num}/files\".format(project=project, num=num)\n if auth:\n header = make_auth_header()\n else:\n header = None\n return get_paged_request(url, headers=header)\n\nelement_pat = re.compile(r'<(.+?)>')\nrel_pat = re.compile(r'rel=[\\'\"](\\w+)[\\'\"]')\n\ndef get_paged_request(url, headers=None, **params):\n \"\"\"get a full list, handling APIv3's paging\"\"\"\n results = []\n params.setdefault(\"per_page\", 100)\n while True:\n if '?' in url:\n params = None\n print(\"fetching %s\" % url, file=sys.stderr)\n else:\n print(\"fetching %s with %s\" % (url, params), file=sys.stderr)\n response = requests.get(url, headers=headers, params=params)\n response.raise_for_status()\n results.extend(response.json())\n if 'next' in response.links:\n url = response.links['next']['url']\n else:\n break\n return results\n\ndef get_pulls_list(project, auth=False, **params):\n \"\"\"get pull request list\"\"\"\n params.setdefault(\"state\", \"closed\")\n url = \"https://api.github.com/repos/{project}/pulls\".format(project=project)\n if auth:\n headers = make_auth_header()\n else:\n headers = None\n pages = get_paged_request(url, headers=headers, **params)\n return pages\n\ndef get_issues_list(project, auth=False, **params):\n \"\"\"get issues list\"\"\"\n params.setdefault(\"state\", \"closed\")\n url = \"https://api.github.com/repos/{project}/issues\".format(project=project)\n if auth:\n headers = make_auth_header()\n else:\n headers = None\n pages = get_paged_request(url, headers=headers, **params)\n return pages\n\ndef get_milestones(project, auth=False, **params):\n params.setdefault('state', 'all')\n url = \"https://api.github.com/repos/{project}/milestones\".format(project=project)\n if auth:\n headers = make_auth_header()\n else:\n headers = None\n milestones = get_paged_request(url, headers=headers, **params)\n return milestones\n\ndef get_milestone_id(project, milestone, auth=False, **params):\n milestones = get_milestones(project, auth=auth, **params)\n for mstone in milestones:\n if mstone['title'] == milestone:\n return mstone['number']\n else:\n raise ValueError(\"milestone %s not found\" % milestone)\n\ndef is_pull_request(issue):\n \"\"\"Return True if the given issue is a pull request.\"\"\"\n return bool(issue.get('pull_request', {}).get('html_url', None))\n\ndef get_authors(pr):\n print(\"getting authors for #%i\" % pr['number'], file=sys.stderr)\n h = make_auth_header()\n r = requests.get(pr['commits_url'], headers=h)\n r.raise_for_status()\n commits = r.json()\n authors = []\n for commit in commits:\n author = commit['commit']['author']\n authors.append(\"%s <%s>\" % (author['name'], author['email']))\n return authors\n\n# encode_multipart_formdata is from urllib3.filepost\n# The only change is to iter_fields, to enforce S3's required key ordering\n\ndef iter_fields(fields):\n fields = fields.copy()\n for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',\n 'Policy', 'Signature', 'Content-Type', 'file'):\n yield (key, fields.pop(key))\n for (k,v) in fields.items():\n yield k,v\n\ndef encode_multipart_formdata(fields, boundary=None):\n \"\"\"\n Encode a dictionary of ``fields`` using the multipart/form-data mime format.\n\n :param fields:\n Dictionary of fields or list of (key, value) field tuples. The key is\n treated as the field name, and the value as the body of the form-data\n bytes. If the value is a tuple of two elements, then the first element\n is treated as the filename of the form-data section.\n\n Field names and filenames must be unicode.\n\n :param boundary:\n If not specified, then a random boundary will be generated using\n :func:`mimetools.choose_boundary`.\n \"\"\"\n # copy requests imports in here:\n from io import BytesIO\n from requests.packages.urllib3.filepost import (\n choose_boundary, six, writer, b, get_content_type\n )\n body = BytesIO()\n if boundary is None:\n boundary = choose_boundary()\n\n for fieldname, value in iter_fields(fields):\n body.write(b('--%s\\r\\n' % (boundary)))\n\n if isinstance(value, tuple):\n filename, data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"; '\n 'filename=\"%s\"\\r\\n' % (fieldname, filename))\n body.write(b('Content-Type: %s\\r\\n\\r\\n' %\n (get_content_type(filename))))\n else:\n data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"\\r\\n'\n % (fieldname))\n body.write(b'Content-Type: text/plain\\r\\n\\r\\n')\n\n if isinstance(data, int):\n data = str(data) # Backwards compatibility\n if isinstance(data, six.text_type):\n writer(body).write(data)\n else:\n body.write(data)\n\n body.write(b'\\r\\n')\n\n body.write(b('--%s--\\r\\n' % (boundary)))\n\n content_type = b('multipart/form-data; boundary=%s' % boundary)\n\n return body.getvalue(), content_type\n\n\ndef post_download(project, filename, name=None, description=\"\"):\n \"\"\"Upload a file to the GitHub downloads area\"\"\"\n if name is None:\n name = os.path.basename(filename)\n with open(filename, 'rb') as f:\n filedata = f.read()\n\n url = \"https://api.github.com/repos/{project}/downloads\".format(project=project)\n\n payload = json.dumps(dict(name=name, size=len(filedata),\n description=description))\n response = requests.post(url, data=payload, headers=make_auth_header())\n response.raise_for_status()\n reply = json.loads(response.content)\n s3_url = reply['s3_url']\n\n fields = dict(\n key=reply['path'],\n acl=reply['acl'],\n success_action_status=201,\n Filename=reply['name'],\n AWSAccessKeyId=reply['accesskeyid'],\n Policy=reply['policy'],\n Signature=reply['signature'],\n file=(reply['name'], filedata),\n )\n fields['Content-Type'] = reply['mime_type']\n data, content_type = encode_multipart_formdata(fields)\n s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})\n return s3r\n", "path": "tools/gh_api.py"}], "after_files": [{"content": "\"\"\"Functions for Github API requests.\"\"\"\nfrom __future__ import print_function\n\ntry:\n input = raw_input\nexcept NameError:\n pass\n\nimport os\nimport re\nimport sys\n\nimport requests\nimport getpass\nimport json\n\ntry:\n import requests_cache\nexcept ImportError:\n print(\"cache not available, install `requests_cache` for caching.\", file=sys.stderr)\nelse:\n requests_cache.install_cache(\"gh_api\", expire_after=3600)\n\n# Keyring stores passwords by a 'username', but we're not storing a username and\n# password\nfake_username = 'ipython_tools'\n\nclass Obj(dict):\n \"\"\"Dictionary with attribute access to names.\"\"\"\n def __getattr__(self, name):\n try:\n return self[name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, val):\n self[name] = val\n\ntoken = None\ndef get_auth_token():\n global token\n\n if token is not None:\n return token\n\n import keyring\n token = keyring.get_password('github', fake_username)\n if token is not None:\n return token\n\n print(\"Please enter your github username and password. These are not \"\n \"stored, only used to get an oAuth token. You can revoke this at \"\n \"any time on Github.\\n\"\n \"Username: \", file=sys.stderr, end='')\n user = input('')\n pw = getpass.getpass(\"Password: \", stream=sys.stderr)\n\n auth_request = {\n \"scopes\": [\n \"public_repo\",\n \"gist\"\n ],\n \"note\": \"IPython tools\",\n \"note_url\": \"https://github.com/ipython/ipython/tree/master/tools\",\n }\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), data=json.dumps(auth_request))\n if response.status_code == 401 and \\\n 'required;' in response.headers.get('X-GitHub-OTP', ''):\n print(\"Your login API requested a one time password\", file=sys.stderr)\n otp = getpass.getpass(\"One Time Password: \", stream=sys.stderr)\n response = requests.post('https://api.github.com/authorizations',\n auth=(user, pw), \n data=json.dumps(auth_request),\n headers={'X-GitHub-OTP':otp})\n response.raise_for_status()\n token = json.loads(response.text)['token']\n keyring.set_password('github', fake_username, token)\n return token\n\ndef make_auth_header():\n return {'Authorization': 'token ' + get_auth_token()}\n\ndef post_issue_comment(project, num, body):\n url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)\n payload = json.dumps({'body': body})\n requests.post(url, data=payload, headers=make_auth_header())\n\ndef post_gist(content, description='', filename='file', auth=False):\n \"\"\"Post some text to a Gist, and return the URL.\"\"\"\n post_data = json.dumps({\n \"description\": description,\n \"public\": True,\n \"files\": {\n filename: {\n \"content\": content\n }\n }\n }).encode('utf-8')\n\n headers = make_auth_header() if auth else {}\n response = requests.post(\"https://api.github.com/gists\", data=post_data, headers=headers)\n response.raise_for_status()\n response_data = json.loads(response.text)\n return response_data['html_url']\n\ndef get_pull_request(project, num, auth=False):\n \"\"\"get pull request info by number\n \"\"\"\n url = \"https://api.github.com/repos/{project}/pulls/{num}\".format(project=project, num=num)\n if auth:\n header = make_auth_header()\n else:\n header = None\n print(\"fetching %s\" % url, file=sys.stderr)\n response = requests.get(url, headers=header)\n response.raise_for_status()\n return json.loads(response.text, object_hook=Obj)\n\ndef get_pull_request_files(project, num, auth=False):\n \"\"\"get list of files in a pull request\"\"\"\n url = \"https://api.github.com/repos/{project}/pulls/{num}/files\".format(project=project, num=num)\n if auth:\n header = make_auth_header()\n else:\n header = None\n return get_paged_request(url, headers=header)\n\nelement_pat = re.compile(r'<(.+?)>')\nrel_pat = re.compile(r'rel=[\\'\"](\\w+)[\\'\"]')\n\ndef get_paged_request(url, headers=None, **params):\n \"\"\"get a full list, handling APIv3's paging\"\"\"\n results = []\n params.setdefault(\"per_page\", 100)\n while True:\n if '?' in url:\n params = None\n print(\"fetching %s\" % url, file=sys.stderr)\n else:\n print(\"fetching %s with %s\" % (url, params), file=sys.stderr)\n response = requests.get(url, headers=headers, params=params)\n response.raise_for_status()\n results.extend(response.json())\n if 'next' in response.links:\n url = response.links['next']['url']\n else:\n break\n return results\n\ndef get_pulls_list(project, auth=False, **params):\n \"\"\"get pull request list\"\"\"\n params.setdefault(\"state\", \"closed\")\n url = \"https://api.github.com/repos/{project}/pulls\".format(project=project)\n if auth:\n headers = make_auth_header()\n else:\n headers = None\n pages = get_paged_request(url, headers=headers, **params)\n return pages\n\ndef get_issues_list(project, auth=False, **params):\n \"\"\"get issues list\"\"\"\n params.setdefault(\"state\", \"closed\")\n url = \"https://api.github.com/repos/{project}/issues\".format(project=project)\n if auth:\n headers = make_auth_header()\n else:\n headers = None\n pages = get_paged_request(url, headers=headers, **params)\n return pages\n\ndef get_milestones(project, auth=False, **params):\n params.setdefault('state', 'all')\n url = \"https://api.github.com/repos/{project}/milestones\".format(project=project)\n if auth:\n headers = make_auth_header()\n else:\n headers = None\n milestones = get_paged_request(url, headers=headers, **params)\n return milestones\n\ndef get_milestone_id(project, milestone, auth=False, **params):\n milestones = get_milestones(project, auth=auth, **params)\n for mstone in milestones:\n if mstone['title'] == milestone:\n return mstone['number']\n else:\n raise ValueError(\"milestone %s not found\" % milestone)\n\ndef is_pull_request(issue):\n \"\"\"Return True if the given issue is a pull request.\"\"\"\n return bool(issue.get('pull_request', {}).get('html_url', None))\n\ndef get_authors(pr):\n print(\"getting authors for #%i\" % pr['number'], file=sys.stderr)\n h = make_auth_header()\n r = requests.get(pr['commits_url'], headers=h)\n r.raise_for_status()\n commits = r.json()\n authors = []\n for commit in commits:\n author = commit['commit']['author']\n authors.append(\"%s <%s>\" % (author['name'], author['email']))\n return authors\n\n# encode_multipart_formdata is from urllib3.filepost\n# The only change is to iter_fields, to enforce S3's required key ordering\n\ndef iter_fields(fields):\n fields = fields.copy()\n for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',\n 'Policy', 'Signature', 'Content-Type', 'file'):\n yield (key, fields.pop(key))\n for (k,v) in fields.items():\n yield k,v\n\ndef encode_multipart_formdata(fields, boundary=None):\n \"\"\"\n Encode a dictionary of ``fields`` using the multipart/form-data mime format.\n\n :param fields:\n Dictionary of fields or list of (key, value) field tuples. The key is\n treated as the field name, and the value as the body of the form-data\n bytes. If the value is a tuple of two elements, then the first element\n is treated as the filename of the form-data section.\n\n Field names and filenames must be unicode.\n\n :param boundary:\n If not specified, then a random boundary will be generated using\n :func:`mimetools.choose_boundary`.\n \"\"\"\n # copy requests imports in here:\n from io import BytesIO\n from requests.packages.urllib3.filepost import (\n choose_boundary, six, writer, b, get_content_type\n )\n body = BytesIO()\n if boundary is None:\n boundary = choose_boundary()\n\n for fieldname, value in iter_fields(fields):\n body.write(b('--%s\\r\\n' % (boundary)))\n\n if isinstance(value, tuple):\n filename, data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"; '\n 'filename=\"%s\"\\r\\n' % (fieldname, filename))\n body.write(b('Content-Type: %s\\r\\n\\r\\n' %\n (get_content_type(filename))))\n else:\n data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"\\r\\n'\n % (fieldname))\n body.write(b'Content-Type: text/plain\\r\\n\\r\\n')\n\n if isinstance(data, int):\n data = str(data) # Backwards compatibility\n if isinstance(data, six.text_type):\n writer(body).write(data)\n else:\n body.write(data)\n\n body.write(b'\\r\\n')\n\n body.write(b('--%s--\\r\\n' % (boundary)))\n\n content_type = b('multipart/form-data; boundary=%s' % boundary)\n\n return body.getvalue(), content_type\n\n\ndef post_download(project, filename, name=None, description=\"\"):\n \"\"\"Upload a file to the GitHub downloads area\"\"\"\n if name is None:\n name = os.path.basename(filename)\n with open(filename, 'rb') as f:\n filedata = f.read()\n\n url = \"https://api.github.com/repos/{project}/downloads\".format(project=project)\n\n payload = json.dumps(dict(name=name, size=len(filedata),\n description=description))\n response = requests.post(url, data=payload, headers=make_auth_header())\n response.raise_for_status()\n reply = json.loads(response.content)\n s3_url = reply['s3_url']\n\n fields = dict(\n key=reply['path'],\n acl=reply['acl'],\n success_action_status=201,\n Filename=reply['name'],\n AWSAccessKeyId=reply['accesskeyid'],\n Policy=reply['policy'],\n Signature=reply['signature'],\n file=(reply['name'], filedata),\n )\n fields['Content-Type'] = reply['mime_type']\n data, content_type = encode_multipart_formdata(fields)\n s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})\n return s3r\n", "path": "tools/gh_api.py"}]} | 3,529 | 282 |
gh_patches_debug_38408 | rasdani/github-patches | git_diff | great-expectations__great_expectations-5077 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py`
Content:
```
1 import logging
2 from typing import Dict, Optional, Type
3
4 from great_expectations.rule_based_profiler.data_assistant import DataAssistant
5 from great_expectations.rule_based_profiler.data_assistant.data_assistant_runner import (
6 DataAssistantRunner,
7 )
8
9 logger = logging.getLogger(__name__)
10
11
12 class DataAssistantDispatcher:
13 """
14 DataAssistantDispatcher intercepts requests for "DataAssistant" classes by their registered names and manages their
15 associated "DataAssistantRunner" objects, which process invocations of calls to "DataAssistant" "run()" methods.
16 """
17
18 _registered_data_assistants: Dict[str, Type[DataAssistant]] = {}
19
20 def __init__(self, data_context: "BaseDataContext") -> None: # noqa: F821
21 """
22 Args:
23 data_context: BaseDataContext associated with DataAssistantDispatcher
24 """
25 self._data_context = data_context
26
27 self._data_assistant_runner_cache = {}
28
29 def __getattr__(self, name: str) -> DataAssistantRunner:
30 # Both, registered data_assistant_type and alias name are supported for invocation.
31
32 # _registered_data_assistants has both aliases and full names
33 data_assistant_cls: Optional[
34 Type[DataAssistant]
35 ] = DataAssistantDispatcher.get_data_assistant_impl(name=name)
36
37 # If "DataAssistant" is not registered, then raise "AttributeError", which is appropriate for "__getattr__()".
38 if data_assistant_cls is None:
39 raise AttributeError(
40 f'"{type(self).__name__}" object has no attribute "{name}".'
41 )
42
43 data_assistant_name: str = data_assistant_cls.data_assistant_type
44 data_assistant_runner: Optional[
45 DataAssistantRunner
46 ] = self._data_assistant_runner_cache.get(data_assistant_name)
47 if data_assistant_runner is None:
48 data_assistant_runner = DataAssistantRunner(
49 data_assistant_cls=data_assistant_cls,
50 data_context=self._data_context,
51 )
52 self._data_assistant_runner_cache[
53 data_assistant_name
54 ] = data_assistant_runner
55
56 return data_assistant_runner
57
58 @classmethod
59 def register_data_assistant(
60 cls,
61 data_assistant: Type[DataAssistant], # noqa: F821
62 ) -> None:
63 """
64 This method executes "run()" of effective "RuleBasedProfiler" and fills "DataAssistantResult" object with outputs.
65
66 Args:
67 data_assistant: "DataAssistant" class to be registered
68 """
69 data_assistant_type = data_assistant.data_assistant_type
70 cls._register(data_assistant_type, data_assistant)
71
72 alias: Optional[str] = data_assistant.__alias__
73 if alias is not None:
74 cls._register(alias, data_assistant)
75
76 @classmethod
77 def _register(cls, name: str, data_assistant: Type[DataAssistant]) -> None:
78 registered_data_assistants = cls._registered_data_assistants
79
80 if name in registered_data_assistants:
81 raise ValueError(f'Existing declarations of DataAssistant "{name}" found.')
82
83 logger.debug(
84 f'Registering the declaration of DataAssistant "{name}" took place.'
85 )
86 registered_data_assistants[name] = data_assistant
87
88 @classmethod
89 def get_data_assistant_impl(
90 cls,
91 name: Optional[str],
92 ) -> Optional[Type[DataAssistant]]: # noqa: F821
93 """
94 This method obtains (previously registered) "DataAssistant" class from DataAssistant Registry.
95
96 Note that it will clean the input string before checking against registered assistants.
97
98 Args:
99 data_assistant_type: String representing "snake case" version of "DataAssistant" class type
100
101 Returns:
102 Class inheriting "DataAssistant" if found; otherwise, None
103 """
104 if name is None:
105 return None
106 name = name.lower()
107 return cls._registered_data_assistants.get(name)
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py b/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py
--- a/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py
+++ b/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py
@@ -1,5 +1,5 @@
import logging
-from typing import Dict, Optional, Type
+from typing import Dict, Optional, Set, Type
from great_expectations.rule_based_profiler.data_assistant import DataAssistant
from great_expectations.rule_based_profiler.data_assistant.data_assistant_runner import (
@@ -61,7 +61,7 @@
data_assistant: Type[DataAssistant], # noqa: F821
) -> None:
"""
- This method executes "run()" of effective "RuleBasedProfiler" and fills "DataAssistantResult" object with outputs.
+ This method registers "DataAssistant" subclass for future instantiation and execution of its "run()" method.
Args:
data_assistant: "DataAssistant" class to be registered
@@ -89,14 +89,14 @@
def get_data_assistant_impl(
cls,
name: Optional[str],
- ) -> Optional[Type[DataAssistant]]: # noqa: F821
+ ) -> Optional[Type[DataAssistant]]:
"""
This method obtains (previously registered) "DataAssistant" class from DataAssistant Registry.
Note that it will clean the input string before checking against registered assistants.
Args:
- data_assistant_type: String representing "snake case" version of "DataAssistant" class type
+ name: String representing "snake case" version of "DataAssistant" class type
Returns:
Class inheriting "DataAssistant" if found; otherwise, None
@@ -105,3 +105,23 @@
return None
name = name.lower()
return cls._registered_data_assistants.get(name)
+
+ def __dir__(self):
+ """
+ This custom magic method is used to enable tab completion on "DataAssistantDispatcher" objects.
+ """
+ data_assistant_dispatcher_attrs: Set[str] = set(super().__dir__())
+ data_assistant_registered_names: Set[
+ str
+ ] = get_registered_data_assistant_names()
+ combined_dir_attrs: Set[str] = (
+ data_assistant_dispatcher_attrs | data_assistant_registered_names
+ )
+ return list(combined_dir_attrs)
+
+
+def get_registered_data_assistant_names() -> Set[str]:
+ """
+ This method returns names (registered data_assistant_type and alias name) of registered "DataAssistant" classes.
+ """
+ return set(DataAssistantDispatcher._registered_data_assistants.keys())
| {"golden_diff": "diff --git a/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py b/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py\n--- a/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py\n+++ b/great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py\n@@ -1,5 +1,5 @@\n import logging\n-from typing import Dict, Optional, Type\n+from typing import Dict, Optional, Set, Type\n \n from great_expectations.rule_based_profiler.data_assistant import DataAssistant\n from great_expectations.rule_based_profiler.data_assistant.data_assistant_runner import (\n@@ -61,7 +61,7 @@\n data_assistant: Type[DataAssistant], # noqa: F821\n ) -> None:\n \"\"\"\n- This method executes \"run()\" of effective \"RuleBasedProfiler\" and fills \"DataAssistantResult\" object with outputs.\n+ This method registers \"DataAssistant\" subclass for future instantiation and execution of its \"run()\" method.\n \n Args:\n data_assistant: \"DataAssistant\" class to be registered\n@@ -89,14 +89,14 @@\n def get_data_assistant_impl(\n cls,\n name: Optional[str],\n- ) -> Optional[Type[DataAssistant]]: # noqa: F821\n+ ) -> Optional[Type[DataAssistant]]:\n \"\"\"\n This method obtains (previously registered) \"DataAssistant\" class from DataAssistant Registry.\n \n Note that it will clean the input string before checking against registered assistants.\n \n Args:\n- data_assistant_type: String representing \"snake case\" version of \"DataAssistant\" class type\n+ name: String representing \"snake case\" version of \"DataAssistant\" class type\n \n Returns:\n Class inheriting \"DataAssistant\" if found; otherwise, None\n@@ -105,3 +105,23 @@\n return None\n name = name.lower()\n return cls._registered_data_assistants.get(name)\n+\n+ def __dir__(self):\n+ \"\"\"\n+ This custom magic method is used to enable tab completion on \"DataAssistantDispatcher\" objects.\n+ \"\"\"\n+ data_assistant_dispatcher_attrs: Set[str] = set(super().__dir__())\n+ data_assistant_registered_names: Set[\n+ str\n+ ] = get_registered_data_assistant_names()\n+ combined_dir_attrs: Set[str] = (\n+ data_assistant_dispatcher_attrs | data_assistant_registered_names\n+ )\n+ return list(combined_dir_attrs)\n+\n+\n+def get_registered_data_assistant_names() -> Set[str]:\n+ \"\"\"\n+ This method returns names (registered data_assistant_type and alias name) of registered \"DataAssistant\" classes.\n+ \"\"\"\n+ return set(DataAssistantDispatcher._registered_data_assistants.keys())\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import logging\nfrom typing import Dict, Optional, Type\n\nfrom great_expectations.rule_based_profiler.data_assistant import DataAssistant\nfrom great_expectations.rule_based_profiler.data_assistant.data_assistant_runner import (\n DataAssistantRunner,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataAssistantDispatcher:\n \"\"\"\n DataAssistantDispatcher intercepts requests for \"DataAssistant\" classes by their registered names and manages their\n associated \"DataAssistantRunner\" objects, which process invocations of calls to \"DataAssistant\" \"run()\" methods.\n \"\"\"\n\n _registered_data_assistants: Dict[str, Type[DataAssistant]] = {}\n\n def __init__(self, data_context: \"BaseDataContext\") -> None: # noqa: F821\n \"\"\"\n Args:\n data_context: BaseDataContext associated with DataAssistantDispatcher\n \"\"\"\n self._data_context = data_context\n\n self._data_assistant_runner_cache = {}\n\n def __getattr__(self, name: str) -> DataAssistantRunner:\n # Both, registered data_assistant_type and alias name are supported for invocation.\n\n # _registered_data_assistants has both aliases and full names\n data_assistant_cls: Optional[\n Type[DataAssistant]\n ] = DataAssistantDispatcher.get_data_assistant_impl(name=name)\n\n # If \"DataAssistant\" is not registered, then raise \"AttributeError\", which is appropriate for \"__getattr__()\".\n if data_assistant_cls is None:\n raise AttributeError(\n f'\"{type(self).__name__}\" object has no attribute \"{name}\".'\n )\n\n data_assistant_name: str = data_assistant_cls.data_assistant_type\n data_assistant_runner: Optional[\n DataAssistantRunner\n ] = self._data_assistant_runner_cache.get(data_assistant_name)\n if data_assistant_runner is None:\n data_assistant_runner = DataAssistantRunner(\n data_assistant_cls=data_assistant_cls,\n data_context=self._data_context,\n )\n self._data_assistant_runner_cache[\n data_assistant_name\n ] = data_assistant_runner\n\n return data_assistant_runner\n\n @classmethod\n def register_data_assistant(\n cls,\n data_assistant: Type[DataAssistant], # noqa: F821\n ) -> None:\n \"\"\"\n This method executes \"run()\" of effective \"RuleBasedProfiler\" and fills \"DataAssistantResult\" object with outputs.\n\n Args:\n data_assistant: \"DataAssistant\" class to be registered\n \"\"\"\n data_assistant_type = data_assistant.data_assistant_type\n cls._register(data_assistant_type, data_assistant)\n\n alias: Optional[str] = data_assistant.__alias__\n if alias is not None:\n cls._register(alias, data_assistant)\n\n @classmethod\n def _register(cls, name: str, data_assistant: Type[DataAssistant]) -> None:\n registered_data_assistants = cls._registered_data_assistants\n\n if name in registered_data_assistants:\n raise ValueError(f'Existing declarations of DataAssistant \"{name}\" found.')\n\n logger.debug(\n f'Registering the declaration of DataAssistant \"{name}\" took place.'\n )\n registered_data_assistants[name] = data_assistant\n\n @classmethod\n def get_data_assistant_impl(\n cls,\n name: Optional[str],\n ) -> Optional[Type[DataAssistant]]: # noqa: F821\n \"\"\"\n This method obtains (previously registered) \"DataAssistant\" class from DataAssistant Registry.\n\n Note that it will clean the input string before checking against registered assistants.\n\n Args:\n data_assistant_type: String representing \"snake case\" version of \"DataAssistant\" class type\n\n Returns:\n Class inheriting \"DataAssistant\" if found; otherwise, None\n \"\"\"\n if name is None:\n return None\n name = name.lower()\n return cls._registered_data_assistants.get(name)\n", "path": "great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py"}], "after_files": [{"content": "import logging\nfrom typing import Dict, Optional, Set, Type\n\nfrom great_expectations.rule_based_profiler.data_assistant import DataAssistant\nfrom great_expectations.rule_based_profiler.data_assistant.data_assistant_runner import (\n DataAssistantRunner,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataAssistantDispatcher:\n \"\"\"\n DataAssistantDispatcher intercepts requests for \"DataAssistant\" classes by their registered names and manages their\n associated \"DataAssistantRunner\" objects, which process invocations of calls to \"DataAssistant\" \"run()\" methods.\n \"\"\"\n\n _registered_data_assistants: Dict[str, Type[DataAssistant]] = {}\n\n def __init__(self, data_context: \"BaseDataContext\") -> None: # noqa: F821\n \"\"\"\n Args:\n data_context: BaseDataContext associated with DataAssistantDispatcher\n \"\"\"\n self._data_context = data_context\n\n self._data_assistant_runner_cache = {}\n\n def __getattr__(self, name: str) -> DataAssistantRunner:\n # Both, registered data_assistant_type and alias name are supported for invocation.\n\n # _registered_data_assistants has both aliases and full names\n data_assistant_cls: Optional[\n Type[DataAssistant]\n ] = DataAssistantDispatcher.get_data_assistant_impl(name=name)\n\n # If \"DataAssistant\" is not registered, then raise \"AttributeError\", which is appropriate for \"__getattr__()\".\n if data_assistant_cls is None:\n raise AttributeError(\n f'\"{type(self).__name__}\" object has no attribute \"{name}\".'\n )\n\n data_assistant_name: str = data_assistant_cls.data_assistant_type\n data_assistant_runner: Optional[\n DataAssistantRunner\n ] = self._data_assistant_runner_cache.get(data_assistant_name)\n if data_assistant_runner is None:\n data_assistant_runner = DataAssistantRunner(\n data_assistant_cls=data_assistant_cls,\n data_context=self._data_context,\n )\n self._data_assistant_runner_cache[\n data_assistant_name\n ] = data_assistant_runner\n\n return data_assistant_runner\n\n @classmethod\n def register_data_assistant(\n cls,\n data_assistant: Type[DataAssistant], # noqa: F821\n ) -> None:\n \"\"\"\n This method registers \"DataAssistant\" subclass for future instantiation and execution of its \"run()\" method.\n\n Args:\n data_assistant: \"DataAssistant\" class to be registered\n \"\"\"\n data_assistant_type = data_assistant.data_assistant_type\n cls._register(data_assistant_type, data_assistant)\n\n alias: Optional[str] = data_assistant.__alias__\n if alias is not None:\n cls._register(alias, data_assistant)\n\n @classmethod\n def _register(cls, name: str, data_assistant: Type[DataAssistant]) -> None:\n registered_data_assistants = cls._registered_data_assistants\n\n if name in registered_data_assistants:\n raise ValueError(f'Existing declarations of DataAssistant \"{name}\" found.')\n\n logger.debug(\n f'Registering the declaration of DataAssistant \"{name}\" took place.'\n )\n registered_data_assistants[name] = data_assistant\n\n @classmethod\n def get_data_assistant_impl(\n cls,\n name: Optional[str],\n ) -> Optional[Type[DataAssistant]]:\n \"\"\"\n This method obtains (previously registered) \"DataAssistant\" class from DataAssistant Registry.\n\n Note that it will clean the input string before checking against registered assistants.\n\n Args:\n name: String representing \"snake case\" version of \"DataAssistant\" class type\n\n Returns:\n Class inheriting \"DataAssistant\" if found; otherwise, None\n \"\"\"\n if name is None:\n return None\n name = name.lower()\n return cls._registered_data_assistants.get(name)\n\n def __dir__(self):\n \"\"\"\n This custom magic method is used to enable tab completion on \"DataAssistantDispatcher\" objects.\n \"\"\"\n data_assistant_dispatcher_attrs: Set[str] = set(super().__dir__())\n data_assistant_registered_names: Set[\n str\n ] = get_registered_data_assistant_names()\n combined_dir_attrs: Set[str] = (\n data_assistant_dispatcher_attrs | data_assistant_registered_names\n )\n return list(combined_dir_attrs)\n\n\ndef get_registered_data_assistant_names() -> Set[str]:\n \"\"\"\n This method returns names (registered data_assistant_type and alias name) of registered \"DataAssistant\" classes.\n \"\"\"\n return set(DataAssistantDispatcher._registered_data_assistants.keys())\n", "path": "great_expectations/rule_based_profiler/data_assistant/data_assistant_dispatcher.py"}]} | 1,374 | 625 |
gh_patches_debug_19029 | rasdani/github-patches | git_diff | pantsbuild__pants-11713 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filename too long when invoking PEX with --pex-verbosity >=3
Any PEX-using run of Pants at 44017419d (PEX 2.1.33) with `--pex-verbosity=3` (i.e. `./pants test --pex-verbosity=3 src/python/pants/util/::`) fails reproducibly with a line like:
```
/Users/stuhood/.cache/pants/named_caches/pex_root/venvs/34bbe9579368d3ee50226ae26ab367a6fe827866/800ae8ef047bd783543e8282c22bfdbee7b7fca8/pex: File name too long
```
Notably: `--pex-verbosity=1` does not trigger the issue.
Capturing the sandbox of the run using `--no-process-execution-cleanup-local-dirs` allows for reproducing the issue with the `__run.sh` script (have captured a repro). The complete log of `./__run.sh` is attached.
[ftl.log](https://github.com/pantsbuild/pants/files/6127740/ftl.log)
----
The content of the `pants_plugins.pex_pex_shim.sh` file (attached as [pants_plugins.pex_pex_shim.sh.txt](https://github.com/pantsbuild/pants/files/6127756/pants_plugins.pex_pex_shim.sh.txt)) seems to indicate that some of the verbose output gets accidentally embedded in the generated shim script.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/util_rules/pex_cli.py`
Content:
```
1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import dataclasses
5 import os
6 from dataclasses import dataclass
7 from pathlib import Path
8 from typing import Iterable, List, Mapping, Optional, Tuple
9
10 from pants.backend.python.subsystems.python_native_code import PythonNativeCode
11 from pants.backend.python.util_rules import pex_environment
12 from pants.backend.python.util_rules.pex_environment import (
13 PexEnvironment,
14 PexRuntimeEnvironment,
15 PythonExecutable,
16 )
17 from pants.core.util_rules import external_tool
18 from pants.core.util_rules.external_tool import (
19 DownloadedExternalTool,
20 ExternalToolRequest,
21 TemplatedExternalTool,
22 )
23 from pants.engine.fs import CreateDigest, Digest, Directory, FileContent, MergeDigests
24 from pants.engine.internals.selectors import MultiGet
25 from pants.engine.platform import Platform
26 from pants.engine.process import Process, ProcessCacheScope
27 from pants.engine.rules import Get, collect_rules, rule
28 from pants.option.global_options import GlobalOptions
29 from pants.util.frozendict import FrozenDict
30 from pants.util.logging import LogLevel
31 from pants.util.meta import classproperty, frozen_after_init
32 from pants.util.strutil import create_path_env_var
33
34
35 class PexBinary(TemplatedExternalTool):
36 options_scope = "download-pex-bin"
37 name = "pex"
38 help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
39
40 default_version = "v2.1.33"
41 default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
42
43 @classproperty
44 def default_known_versions(cls):
45 return [
46 "|".join(
47 (
48 cls.default_version,
49 plat,
50 "7f9f6168691fe83f38fbc248bd8629a152cf2a8833be1afdc06219f70fbb6064",
51 "3596348",
52 )
53 )
54 for plat in ["darwin", "linux"]
55 ]
56
57
58 @frozen_after_init
59 @dataclass(unsafe_hash=True)
60 class PexCliProcess:
61 argv: Tuple[str, ...]
62 description: str = dataclasses.field(compare=False)
63 additional_input_digest: Optional[Digest]
64 extra_env: Optional[FrozenDict[str, str]]
65 output_files: Optional[Tuple[str, ...]]
66 output_directories: Optional[Tuple[str, ...]]
67 python: Optional[PythonExecutable]
68 level: LogLevel
69 cache_scope: Optional[ProcessCacheScope]
70
71 def __init__(
72 self,
73 *,
74 argv: Iterable[str],
75 description: str,
76 additional_input_digest: Optional[Digest] = None,
77 extra_env: Optional[Mapping[str, str]] = None,
78 output_files: Optional[Iterable[str]] = None,
79 output_directories: Optional[Iterable[str]] = None,
80 python: Optional[PythonExecutable] = None,
81 level: LogLevel = LogLevel.INFO,
82 cache_scope: Optional[ProcessCacheScope] = None,
83 ) -> None:
84 self.argv = tuple(argv)
85 self.description = description
86 self.additional_input_digest = additional_input_digest
87 self.extra_env = FrozenDict(extra_env) if extra_env else None
88 self.output_files = tuple(output_files) if output_files else None
89 self.output_directories = tuple(output_directories) if output_directories else None
90 self.python = python
91 self.level = level
92 self.cache_scope = cache_scope
93 self.__post_init__()
94
95 def __post_init__(self) -> None:
96 if "--pex-root-path" in self.argv:
97 raise ValueError("`--pex-root` flag not allowed. We set its value for you.")
98
99
100 @rule
101 async def setup_pex_cli_process(
102 request: PexCliProcess,
103 pex_binary: PexBinary,
104 pex_env: PexEnvironment,
105 python_native_code: PythonNativeCode,
106 global_options: GlobalOptions,
107 pex_runtime_env: PexRuntimeEnvironment,
108 ) -> Process:
109 tmpdir = ".tmp"
110 gets: List[Get] = [
111 Get(DownloadedExternalTool, ExternalToolRequest, pex_binary.get_request(Platform.current)),
112 Get(Digest, CreateDigest([Directory(tmpdir)])),
113 ]
114 cert_args = []
115
116 # The certs file will typically not be in the repo, so we can't digest it via a PathGlobs.
117 # Instead we manually create a FileContent for it.
118 if global_options.options.ca_certs_path:
119 ca_certs_content = Path(global_options.options.ca_certs_path).read_bytes()
120 chrooted_ca_certs_path = os.path.basename(global_options.options.ca_certs_path)
121
122 gets.append(
123 Get(
124 Digest,
125 CreateDigest((FileContent(chrooted_ca_certs_path, ca_certs_content),)),
126 )
127 )
128 cert_args = ["--cert", chrooted_ca_certs_path]
129
130 downloaded_pex_bin, *digests_to_merge = await MultiGet(gets)
131 digests_to_merge.append(downloaded_pex_bin.digest)
132 if request.additional_input_digest:
133 digests_to_merge.append(request.additional_input_digest)
134 input_digest = await Get(Digest, MergeDigests(digests_to_merge))
135
136 pex_root_path = ".cache/pex_root"
137 argv = [
138 downloaded_pex_bin.exe,
139 *cert_args,
140 "--python-path",
141 create_path_env_var(pex_env.interpreter_search_paths),
142 "--pex-root",
143 pex_root_path,
144 # Ensure Pex and its subprocesses create temporary files in the the process execution
145 # sandbox. It may make sense to do this generally for Processes, but in the short term we
146 # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to
147 # perform. Making the TMPDIR local to the sandbox allows control via
148 # --local-execution-root-dir for the local case and should work well with remote cases where
149 # a remoting implementation has to allow for processes producing large binaries in a
150 # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via
151 # environment variable allows Pex to absolutize the path ensuring subprocesses that change
152 # CWD can find the TMPDIR.
153 "--tmpdir",
154 tmpdir,
155 ]
156 if pex_runtime_env.verbosity > 0:
157 argv.append(f"-{'v' * pex_runtime_env.verbosity}")
158
159 # NB: This comes at the end of the argv because the request may use `--` passthrough args,
160 # which must come at the end.
161 argv.extend(request.argv)
162 normalized_argv = pex_env.create_argv(*argv, python=request.python)
163 env = {
164 **pex_env.environment_dict(python_configured=request.python is not None),
165 **python_native_code.environment_dict,
166 **(request.extra_env or {}),
167 }
168
169 return Process(
170 normalized_argv,
171 description=request.description,
172 input_digest=input_digest,
173 env=env,
174 output_files=request.output_files,
175 output_directories=request.output_directories,
176 append_only_caches={"pex_root": pex_root_path},
177 level=request.level,
178 cache_scope=request.cache_scope,
179 )
180
181
182 def rules():
183 return [*collect_rules(), *external_tool.rules(), *pex_environment.rules()]
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py
--- a/src/python/pants/backend/python/util_rules/pex_cli.py
+++ b/src/python/pants/backend/python/util_rules/pex_cli.py
@@ -37,7 +37,7 @@
name = "pex"
help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
- default_version = "v2.1.33"
+ default_version = "v2.1.34"
default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
@classproperty
@@ -47,8 +47,8 @@
(
cls.default_version,
plat,
- "7f9f6168691fe83f38fbc248bd8629a152cf2a8833be1afdc06219f70fbb6064",
- "3596348",
+ "9b1a959ccb61b3deb64ffeed43a735c7115e414f4de6f96e66adc9e7fc7a757f",
+ "3597768",
)
)
for plat in ["darwin", "linux"]
| {"golden_diff": "diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py\n--- a/src/python/pants/backend/python/util_rules/pex_cli.py\n+++ b/src/python/pants/backend/python/util_rules/pex_cli.py\n@@ -37,7 +37,7 @@\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n \n- default_version = \"v2.1.33\"\n+ default_version = \"v2.1.34\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n \n @classproperty\n@@ -47,8 +47,8 @@\n (\n cls.default_version,\n plat,\n- \"7f9f6168691fe83f38fbc248bd8629a152cf2a8833be1afdc06219f70fbb6064\",\n- \"3596348\",\n+ \"9b1a959ccb61b3deb64ffeed43a735c7115e414f4de6f96e66adc9e7fc7a757f\",\n+ \"3597768\",\n )\n )\n for plat in [\"darwin\", \"linux\"]\n", "issue": "Filename too long when invoking PEX with --pex-verbosity >=3\nAny PEX-using run of Pants at 44017419d (PEX 2.1.33) with `--pex-verbosity=3` (i.e. `./pants test --pex-verbosity=3 src/python/pants/util/::`) fails reproducibly with a line like:\r\n```\r\n/Users/stuhood/.cache/pants/named_caches/pex_root/venvs/34bbe9579368d3ee50226ae26ab367a6fe827866/800ae8ef047bd783543e8282c22bfdbee7b7fca8/pex: File name too long\r\n```\r\nNotably: `--pex-verbosity=1` does not trigger the issue.\r\n\r\nCapturing the sandbox of the run using `--no-process-execution-cleanup-local-dirs` allows for reproducing the issue with the `__run.sh` script (have captured a repro). The complete log of `./__run.sh` is attached.\r\n\r\n[ftl.log](https://github.com/pantsbuild/pants/files/6127740/ftl.log)\r\n\r\n----\r\n\r\nThe content of the `pants_plugins.pex_pex_shim.sh` file (attached as [pants_plugins.pex_pex_shim.sh.txt](https://github.com/pantsbuild/pants/files/6127756/pants_plugins.pex_pex_shim.sh.txt)) seems to indicate that some of the verbose output gets accidentally embedded in the generated shim script.\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport dataclasses\nimport os\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable, List, Mapping, Optional, Tuple\n\nfrom pants.backend.python.subsystems.python_native_code import PythonNativeCode\nfrom pants.backend.python.util_rules import pex_environment\nfrom pants.backend.python.util_rules.pex_environment import (\n PexEnvironment,\n PexRuntimeEnvironment,\n PythonExecutable,\n)\nfrom pants.core.util_rules import external_tool\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, Directory, FileContent, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.option.global_options import GlobalOptions\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import classproperty, frozen_after_init\nfrom pants.util.strutil import create_path_env_var\n\n\nclass PexBinary(TemplatedExternalTool):\n options_scope = \"download-pex-bin\"\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n\n default_version = \"v2.1.33\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n\n @classproperty\n def default_known_versions(cls):\n return [\n \"|\".join(\n (\n cls.default_version,\n plat,\n \"7f9f6168691fe83f38fbc248bd8629a152cf2a8833be1afdc06219f70fbb6064\",\n \"3596348\",\n )\n )\n for plat in [\"darwin\", \"linux\"]\n ]\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass PexCliProcess:\n argv: Tuple[str, ...]\n description: str = dataclasses.field(compare=False)\n additional_input_digest: Optional[Digest]\n extra_env: Optional[FrozenDict[str, str]]\n output_files: Optional[Tuple[str, ...]]\n output_directories: Optional[Tuple[str, ...]]\n python: Optional[PythonExecutable]\n level: LogLevel\n cache_scope: Optional[ProcessCacheScope]\n\n def __init__(\n self,\n *,\n argv: Iterable[str],\n description: str,\n additional_input_digest: Optional[Digest] = None,\n extra_env: Optional[Mapping[str, str]] = None,\n output_files: Optional[Iterable[str]] = None,\n output_directories: Optional[Iterable[str]] = None,\n python: Optional[PythonExecutable] = None,\n level: LogLevel = LogLevel.INFO,\n cache_scope: Optional[ProcessCacheScope] = None,\n ) -> None:\n self.argv = tuple(argv)\n self.description = description\n self.additional_input_digest = additional_input_digest\n self.extra_env = FrozenDict(extra_env) if extra_env else None\n self.output_files = tuple(output_files) if output_files else None\n self.output_directories = tuple(output_directories) if output_directories else None\n self.python = python\n self.level = level\n self.cache_scope = cache_scope\n self.__post_init__()\n\n def __post_init__(self) -> None:\n if \"--pex-root-path\" in self.argv:\n raise ValueError(\"`--pex-root` flag not allowed. We set its value for you.\")\n\n\n@rule\nasync def setup_pex_cli_process(\n request: PexCliProcess,\n pex_binary: PexBinary,\n pex_env: PexEnvironment,\n python_native_code: PythonNativeCode,\n global_options: GlobalOptions,\n pex_runtime_env: PexRuntimeEnvironment,\n) -> Process:\n tmpdir = \".tmp\"\n gets: List[Get] = [\n Get(DownloadedExternalTool, ExternalToolRequest, pex_binary.get_request(Platform.current)),\n Get(Digest, CreateDigest([Directory(tmpdir)])),\n ]\n cert_args = []\n\n # The certs file will typically not be in the repo, so we can't digest it via a PathGlobs.\n # Instead we manually create a FileContent for it.\n if global_options.options.ca_certs_path:\n ca_certs_content = Path(global_options.options.ca_certs_path).read_bytes()\n chrooted_ca_certs_path = os.path.basename(global_options.options.ca_certs_path)\n\n gets.append(\n Get(\n Digest,\n CreateDigest((FileContent(chrooted_ca_certs_path, ca_certs_content),)),\n )\n )\n cert_args = [\"--cert\", chrooted_ca_certs_path]\n\n downloaded_pex_bin, *digests_to_merge = await MultiGet(gets)\n digests_to_merge.append(downloaded_pex_bin.digest)\n if request.additional_input_digest:\n digests_to_merge.append(request.additional_input_digest)\n input_digest = await Get(Digest, MergeDigests(digests_to_merge))\n\n pex_root_path = \".cache/pex_root\"\n argv = [\n downloaded_pex_bin.exe,\n *cert_args,\n \"--python-path\",\n create_path_env_var(pex_env.interpreter_search_paths),\n \"--pex-root\",\n pex_root_path,\n # Ensure Pex and its subprocesses create temporary files in the the process execution\n # sandbox. It may make sense to do this generally for Processes, but in the short term we\n # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to\n # perform. Making the TMPDIR local to the sandbox allows control via\n # --local-execution-root-dir for the local case and should work well with remote cases where\n # a remoting implementation has to allow for processes producing large binaries in a\n # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via\n # environment variable allows Pex to absolutize the path ensuring subprocesses that change\n # CWD can find the TMPDIR.\n \"--tmpdir\",\n tmpdir,\n ]\n if pex_runtime_env.verbosity > 0:\n argv.append(f\"-{'v' * pex_runtime_env.verbosity}\")\n\n # NB: This comes at the end of the argv because the request may use `--` passthrough args,\n # which must come at the end.\n argv.extend(request.argv)\n normalized_argv = pex_env.create_argv(*argv, python=request.python)\n env = {\n **pex_env.environment_dict(python_configured=request.python is not None),\n **python_native_code.environment_dict,\n **(request.extra_env or {}),\n }\n\n return Process(\n normalized_argv,\n description=request.description,\n input_digest=input_digest,\n env=env,\n output_files=request.output_files,\n output_directories=request.output_directories,\n append_only_caches={\"pex_root\": pex_root_path},\n level=request.level,\n cache_scope=request.cache_scope,\n )\n\n\ndef rules():\n return [*collect_rules(), *external_tool.rules(), *pex_environment.rules()]\n", "path": "src/python/pants/backend/python/util_rules/pex_cli.py"}], "after_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport dataclasses\nimport os\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable, List, Mapping, Optional, Tuple\n\nfrom pants.backend.python.subsystems.python_native_code import PythonNativeCode\nfrom pants.backend.python.util_rules import pex_environment\nfrom pants.backend.python.util_rules.pex_environment import (\n PexEnvironment,\n PexRuntimeEnvironment,\n PythonExecutable,\n)\nfrom pants.core.util_rules import external_tool\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, Directory, FileContent, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.option.global_options import GlobalOptions\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import classproperty, frozen_after_init\nfrom pants.util.strutil import create_path_env_var\n\n\nclass PexBinary(TemplatedExternalTool):\n options_scope = \"download-pex-bin\"\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n\n default_version = \"v2.1.34\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n\n @classproperty\n def default_known_versions(cls):\n return [\n \"|\".join(\n (\n cls.default_version,\n plat,\n \"9b1a959ccb61b3deb64ffeed43a735c7115e414f4de6f96e66adc9e7fc7a757f\",\n \"3597768\",\n )\n )\n for plat in [\"darwin\", \"linux\"]\n ]\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass PexCliProcess:\n argv: Tuple[str, ...]\n description: str = dataclasses.field(compare=False)\n additional_input_digest: Optional[Digest]\n extra_env: Optional[FrozenDict[str, str]]\n output_files: Optional[Tuple[str, ...]]\n output_directories: Optional[Tuple[str, ...]]\n python: Optional[PythonExecutable]\n level: LogLevel\n cache_scope: Optional[ProcessCacheScope]\n\n def __init__(\n self,\n *,\n argv: Iterable[str],\n description: str,\n additional_input_digest: Optional[Digest] = None,\n extra_env: Optional[Mapping[str, str]] = None,\n output_files: Optional[Iterable[str]] = None,\n output_directories: Optional[Iterable[str]] = None,\n python: Optional[PythonExecutable] = None,\n level: LogLevel = LogLevel.INFO,\n cache_scope: Optional[ProcessCacheScope] = None,\n ) -> None:\n self.argv = tuple(argv)\n self.description = description\n self.additional_input_digest = additional_input_digest\n self.extra_env = FrozenDict(extra_env) if extra_env else None\n self.output_files = tuple(output_files) if output_files else None\n self.output_directories = tuple(output_directories) if output_directories else None\n self.python = python\n self.level = level\n self.cache_scope = cache_scope\n self.__post_init__()\n\n def __post_init__(self) -> None:\n if \"--pex-root-path\" in self.argv:\n raise ValueError(\"`--pex-root` flag not allowed. We set its value for you.\")\n\n\n@rule\nasync def setup_pex_cli_process(\n request: PexCliProcess,\n pex_binary: PexBinary,\n pex_env: PexEnvironment,\n python_native_code: PythonNativeCode,\n global_options: GlobalOptions,\n pex_runtime_env: PexRuntimeEnvironment,\n) -> Process:\n tmpdir = \".tmp\"\n gets: List[Get] = [\n Get(DownloadedExternalTool, ExternalToolRequest, pex_binary.get_request(Platform.current)),\n Get(Digest, CreateDigest([Directory(tmpdir)])),\n ]\n cert_args = []\n\n # The certs file will typically not be in the repo, so we can't digest it via a PathGlobs.\n # Instead we manually create a FileContent for it.\n if global_options.options.ca_certs_path:\n ca_certs_content = Path(global_options.options.ca_certs_path).read_bytes()\n chrooted_ca_certs_path = os.path.basename(global_options.options.ca_certs_path)\n\n gets.append(\n Get(\n Digest,\n CreateDigest((FileContent(chrooted_ca_certs_path, ca_certs_content),)),\n )\n )\n cert_args = [\"--cert\", chrooted_ca_certs_path]\n\n downloaded_pex_bin, *digests_to_merge = await MultiGet(gets)\n digests_to_merge.append(downloaded_pex_bin.digest)\n if request.additional_input_digest:\n digests_to_merge.append(request.additional_input_digest)\n input_digest = await Get(Digest, MergeDigests(digests_to_merge))\n\n pex_root_path = \".cache/pex_root\"\n argv = [\n downloaded_pex_bin.exe,\n *cert_args,\n \"--python-path\",\n create_path_env_var(pex_env.interpreter_search_paths),\n \"--pex-root\",\n pex_root_path,\n # Ensure Pex and its subprocesses create temporary files in the the process execution\n # sandbox. It may make sense to do this generally for Processes, but in the short term we\n # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to\n # perform. Making the TMPDIR local to the sandbox allows control via\n # --local-execution-root-dir for the local case and should work well with remote cases where\n # a remoting implementation has to allow for processes producing large binaries in a\n # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via\n # environment variable allows Pex to absolutize the path ensuring subprocesses that change\n # CWD can find the TMPDIR.\n \"--tmpdir\",\n tmpdir,\n ]\n if pex_runtime_env.verbosity > 0:\n argv.append(f\"-{'v' * pex_runtime_env.verbosity}\")\n\n # NB: This comes at the end of the argv because the request may use `--` passthrough args,\n # which must come at the end.\n argv.extend(request.argv)\n normalized_argv = pex_env.create_argv(*argv, python=request.python)\n env = {\n **pex_env.environment_dict(python_configured=request.python is not None),\n **python_native_code.environment_dict,\n **(request.extra_env or {}),\n }\n\n return Process(\n normalized_argv,\n description=request.description,\n input_digest=input_digest,\n env=env,\n output_files=request.output_files,\n output_directories=request.output_directories,\n append_only_caches={\"pex_root\": pex_root_path},\n level=request.level,\n cache_scope=request.cache_scope,\n )\n\n\ndef rules():\n return [*collect_rules(), *external_tool.rules(), *pex_environment.rules()]\n", "path": "src/python/pants/backend/python/util_rules/pex_cli.py"}]} | 2,689 | 338 |
gh_patches_debug_13415 | rasdani/github-patches | git_diff | AnalogJ__lexicon-106 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
function update_record() in namesilo.py has wrong query parameter
origin: data['rdata'] = content
modified: data['rrvalue'] = content
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lexicon/providers/namesilo.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import absolute_import
3 from .base import Provider as BaseProvider
4 import requests
5 from xml.etree import ElementTree
6
7 def ProviderParser(subparser):
8 subparser.add_argument("--auth-token", help="specify key used authenticate")
9
10
11 class Provider(BaseProvider):
12
13 def __init__(self, options, provider_options={}):
14 super(Provider, self).__init__(options)
15 self.domain_id = None
16 self.api_endpoint = provider_options.get('api_endpoint') or 'https://www.namesilo.com/api'
17
18 def authenticate(self):
19
20 payload = self._get('/getDomainInfo', {'domain': self.options['domain']})
21 self.domain_id = self.options['domain']
22
23
24 # Create record. If record already exists with the same content, do nothing'
25 def create_record(self, type, name, content):
26 record = {
27 'domain': self.domain_id,
28 'rrhost': self._relative_name(name),
29 'rrtype': type,
30 'rrvalue': content
31 }
32 if self.options.get('ttl'):
33 record['rrttl'] = self.options.get('ttl')
34 payload = self._get('/dnsAddRecord', record)
35 print('create_record: {0}'.format(True))
36 return True
37
38 # List all records. Return an empty list if no records found
39 # type, name and content are used to filter records.
40 # If possible filter during the query, otherwise filter after response is received.
41 def list_records(self, type=None, name=None, content=None):
42 query = {'domain': self.domain_id}
43
44 payload = self._get('/dnsListRecords', query)
45 records = []
46 for record in payload.find('reply').findall('resource_record'):
47 processed_record = {
48 'type': record.find('type').text,
49 'name': record.find('host').text,
50 'ttl': record.find('ttl').text,
51 'content': record.find('value').text,
52 'id': record.find('record_id').text
53 }
54 records.append(processed_record)
55
56 if type:
57 records = [record for record in records if record['type'] == type]
58 if name:
59 records = [record for record in records if record['name'] == self._full_name(name)]
60 if content:
61 records = [record for record in records if record['content'] == content]
62
63 print('list_records: {0}'.format(records))
64 return records
65
66 # Create or update a record.
67 def update_record(self, identifier, type=None, name=None, content=None):
68
69 data = {
70 'domain': self.domain_id,
71 'rrid': identifier
72 }
73 # if type:
74 # data['rtype'] = type
75 if name:
76 data['rrhost'] = self._relative_name(name)
77 if content:
78 data['rdata'] = content
79 if self.options.get('ttl'):
80 data['rrttl'] = self.options.get('ttl')
81
82 payload = self._get('/dnsUpdateRecord', data)
83
84 print('update_record: {0}'.format(True))
85 return True
86
87 # Delete an existing record.
88 # If record does not exist, do nothing.
89 def delete_record(self, identifier=None, type=None, name=None, content=None):
90 data = {
91 'domain': self.domain_id
92 }
93 if not identifier:
94 records = self.list_records(type, name, content)
95 print(records)
96 if len(records) == 1:
97 data['rrid'] = records[0]['id']
98 else:
99 raise Exception('Record identifier could not be found.')
100 else:
101 data['rrid'] = identifier
102 payload = self._get('/dnsDeleteRecord', data)
103
104 print('delete_record: {0}'.format(True))
105 return True
106
107
108 # Helpers
109 def _request(self, action='GET', url='/', data=None, query_params=None):
110 if data is None:
111 data = {}
112 if query_params is None:
113 query_params = {}
114 query_params['version'] = 1
115 query_params['type'] = 'xml'
116 query_params['key'] = self.options['auth_token']
117 r = requests.request(action, self.api_endpoint + url, params=query_params)
118 #data=json.dumps(data))
119 r.raise_for_status() # if the request fails for any reason, throw an error.
120 # TODO: check if the response is an error using
121 tree = ElementTree.ElementTree(ElementTree.fromstring(r.content))
122 root = tree.getroot()
123 if root.find('reply').find('code').text != '300':
124 raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))
125
126
127 return root
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lexicon/providers/namesilo.py b/lexicon/providers/namesilo.py
--- a/lexicon/providers/namesilo.py
+++ b/lexicon/providers/namesilo.py
@@ -75,7 +75,7 @@
if name:
data['rrhost'] = self._relative_name(name)
if content:
- data['rdata'] = content
+ data['rrvalue'] = content
if self.options.get('ttl'):
data['rrttl'] = self.options.get('ttl')
@@ -124,4 +124,4 @@
raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))
- return root
\ No newline at end of file
+ return root
| {"golden_diff": "diff --git a/lexicon/providers/namesilo.py b/lexicon/providers/namesilo.py\n--- a/lexicon/providers/namesilo.py\n+++ b/lexicon/providers/namesilo.py\n@@ -75,7 +75,7 @@\n if name:\n data['rrhost'] = self._relative_name(name)\n if content:\n- data['rdata'] = content\n+ data['rrvalue'] = content\n if self.options.get('ttl'):\n data['rrttl'] = self.options.get('ttl')\n \n@@ -124,4 +124,4 @@\n raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))\n \n \n- return root\n\\ No newline at end of file\n+ return root\n", "issue": "function update_record() in namesilo.py has wrong query parameter\norigin: data['rdata'] = content\r\nmodified: data['rrvalue'] = content\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom .base import Provider as BaseProvider\nimport requests\nfrom xml.etree import ElementTree\n\ndef ProviderParser(subparser):\n subparser.add_argument(\"--auth-token\", help=\"specify key used authenticate\")\n\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, provider_options={}):\n super(Provider, self).__init__(options)\n self.domain_id = None\n self.api_endpoint = provider_options.get('api_endpoint') or 'https://www.namesilo.com/api'\n\n def authenticate(self):\n\n payload = self._get('/getDomainInfo', {'domain': self.options['domain']})\n self.domain_id = self.options['domain']\n\n\n # Create record. If record already exists with the same content, do nothing'\n def create_record(self, type, name, content):\n record = {\n 'domain': self.domain_id,\n 'rrhost': self._relative_name(name),\n 'rrtype': type,\n 'rrvalue': content\n }\n if self.options.get('ttl'):\n record['rrttl'] = self.options.get('ttl')\n payload = self._get('/dnsAddRecord', record)\n print('create_record: {0}'.format(True))\n return True\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def list_records(self, type=None, name=None, content=None):\n query = {'domain': self.domain_id}\n\n payload = self._get('/dnsListRecords', query)\n records = []\n for record in payload.find('reply').findall('resource_record'):\n processed_record = {\n 'type': record.find('type').text,\n 'name': record.find('host').text,\n 'ttl': record.find('ttl').text,\n 'content': record.find('value').text,\n 'id': record.find('record_id').text\n }\n records.append(processed_record)\n\n if type:\n records = [record for record in records if record['type'] == type]\n if name:\n records = [record for record in records if record['name'] == self._full_name(name)]\n if content:\n records = [record for record in records if record['content'] == content]\n\n print('list_records: {0}'.format(records))\n return records\n\n # Create or update a record.\n def update_record(self, identifier, type=None, name=None, content=None):\n\n data = {\n 'domain': self.domain_id,\n 'rrid': identifier\n }\n # if type:\n # data['rtype'] = type\n if name:\n data['rrhost'] = self._relative_name(name)\n if content:\n data['rdata'] = content\n if self.options.get('ttl'):\n data['rrttl'] = self.options.get('ttl')\n\n payload = self._get('/dnsUpdateRecord', data)\n\n print('update_record: {0}'.format(True))\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n data = {\n 'domain': self.domain_id\n }\n if not identifier:\n records = self.list_records(type, name, content)\n print(records)\n if len(records) == 1:\n data['rrid'] = records[0]['id']\n else:\n raise Exception('Record identifier could not be found.')\n else:\n data['rrid'] = identifier\n payload = self._get('/dnsDeleteRecord', data)\n\n print('delete_record: {0}'.format(True))\n return True\n\n\n # Helpers\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n query_params['version'] = 1\n query_params['type'] = 'xml'\n query_params['key'] = self.options['auth_token']\n r = requests.request(action, self.api_endpoint + url, params=query_params)\n #data=json.dumps(data))\n r.raise_for_status() # if the request fails for any reason, throw an error.\n # TODO: check if the response is an error using\n tree = ElementTree.ElementTree(ElementTree.fromstring(r.content))\n root = tree.getroot()\n if root.find('reply').find('code').text != '300':\n raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))\n\n\n return root", "path": "lexicon/providers/namesilo.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom .base import Provider as BaseProvider\nimport requests\nfrom xml.etree import ElementTree\n\ndef ProviderParser(subparser):\n subparser.add_argument(\"--auth-token\", help=\"specify key used authenticate\")\n\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, provider_options={}):\n super(Provider, self).__init__(options)\n self.domain_id = None\n self.api_endpoint = provider_options.get('api_endpoint') or 'https://www.namesilo.com/api'\n\n def authenticate(self):\n\n payload = self._get('/getDomainInfo', {'domain': self.options['domain']})\n self.domain_id = self.options['domain']\n\n\n # Create record. If record already exists with the same content, do nothing'\n def create_record(self, type, name, content):\n record = {\n 'domain': self.domain_id,\n 'rrhost': self._relative_name(name),\n 'rrtype': type,\n 'rrvalue': content\n }\n if self.options.get('ttl'):\n record['rrttl'] = self.options.get('ttl')\n payload = self._get('/dnsAddRecord', record)\n print('create_record: {0}'.format(True))\n return True\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def list_records(self, type=None, name=None, content=None):\n query = {'domain': self.domain_id}\n\n payload = self._get('/dnsListRecords', query)\n records = []\n for record in payload.find('reply').findall('resource_record'):\n processed_record = {\n 'type': record.find('type').text,\n 'name': record.find('host').text,\n 'ttl': record.find('ttl').text,\n 'content': record.find('value').text,\n 'id': record.find('record_id').text\n }\n records.append(processed_record)\n\n if type:\n records = [record for record in records if record['type'] == type]\n if name:\n records = [record for record in records if record['name'] == self._full_name(name)]\n if content:\n records = [record for record in records if record['content'] == content]\n\n print('list_records: {0}'.format(records))\n return records\n\n # Create or update a record.\n def update_record(self, identifier, type=None, name=None, content=None):\n\n data = {\n 'domain': self.domain_id,\n 'rrid': identifier\n }\n # if type:\n # data['rtype'] = type\n if name:\n data['rrhost'] = self._relative_name(name)\n if content:\n data['rrvalue'] = content\n if self.options.get('ttl'):\n data['rrttl'] = self.options.get('ttl')\n\n payload = self._get('/dnsUpdateRecord', data)\n\n print('update_record: {0}'.format(True))\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n data = {\n 'domain': self.domain_id\n }\n if not identifier:\n records = self.list_records(type, name, content)\n print(records)\n if len(records) == 1:\n data['rrid'] = records[0]['id']\n else:\n raise Exception('Record identifier could not be found.')\n else:\n data['rrid'] = identifier\n payload = self._get('/dnsDeleteRecord', data)\n\n print('delete_record: {0}'.format(True))\n return True\n\n\n # Helpers\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n query_params['version'] = 1\n query_params['type'] = 'xml'\n query_params['key'] = self.options['auth_token']\n r = requests.request(action, self.api_endpoint + url, params=query_params)\n #data=json.dumps(data))\n r.raise_for_status() # if the request fails for any reason, throw an error.\n # TODO: check if the response is an error using\n tree = ElementTree.ElementTree(ElementTree.fromstring(r.content))\n root = tree.getroot()\n if root.find('reply').find('code').text != '300':\n raise Exception('An error occurred: {0}, {1}'.format(root.find('reply').find('detail').text, root.find('reply').find('code').text))\n\n\n return root\n", "path": "lexicon/providers/namesilo.py"}]} | 1,618 | 189 |
gh_patches_debug_21013 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1792 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
language settings don't persist past browser sessions
**Describe the bug**
Language settings don't persist past browser sessions. If I change my language settings, and close my browser without logging out, when I open the browser again, the language has reverted to English. However, if I logout and log back in, bookwyrm is in the language I specified.
**To Reproduce**
Steps to reproduce the behavior:
( assuming your bookwyrm is already in English)
1. Go to https://bookwyrm.social/preferences/profile
2. Select a language other than English under the Language setting and Save
3. Close the browser window and reopen bookwyrm
4. See that bookwyrm is in English and not the language you selected
**Expected behavior**
I would expect that my language settings are persistent across browser sessions.
**Instance**
bookwyrm.social
**Additional Context**
I have been able to repro this bug on Safari and Brave on macOS BigSur
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/helpers.py`
Content:
```
1 """ helper functions used in various views """
2 import re
3 from datetime import datetime
4 import dateutil.parser
5 import dateutil.tz
6 from dateutil.parser import ParserError
7
8 from requests import HTTPError
9 from django.db.models import Q
10 from django.http import Http404
11 from django.utils import translation
12
13 from bookwyrm import activitypub, models, settings
14 from bookwyrm.connectors import ConnectorException, get_data
15 from bookwyrm.status import create_generated_note
16 from bookwyrm.utils import regex
17
18
19 # pylint: disable=unnecessary-pass
20 class WebFingerError(Exception):
21 """empty error class for problems finding user information with webfinger"""
22
23 pass
24
25
26 def get_user_from_username(viewer, username):
27 """helper function to resolve a localname or a username to a user"""
28 if viewer.is_authenticated and viewer.localname == username:
29 # that's yourself, fool
30 return viewer
31
32 # raises 404 if the user isn't found
33 try:
34 return models.User.viewer_aware_objects(viewer).get(localname=username)
35 except models.User.DoesNotExist:
36 pass
37
38 # if the localname didn't match, try the username
39 try:
40 return models.User.viewer_aware_objects(viewer).get(username=username)
41 except models.User.DoesNotExist:
42 raise Http404()
43
44
45 def is_api_request(request):
46 """check whether a request is asking for html or data"""
47 return "json" in request.headers.get("Accept", "") or re.match(
48 r".*\.json/?$", request.path
49 )
50
51
52 def is_bookwyrm_request(request):
53 """check if the request is coming from another bookwyrm instance"""
54 user_agent = request.headers.get("User-Agent")
55 if user_agent is None or re.search(regex.BOOKWYRM_USER_AGENT, user_agent) is None:
56 return False
57 return True
58
59
60 def handle_remote_webfinger(query):
61 """webfingerin' other servers"""
62 user = None
63
64 # usernames could be @user@domain or user@domain
65 if not query:
66 return None
67 if query[0] == "@":
68 query = query[1:]
69 try:
70 domain = query.split("@")[1]
71 except IndexError:
72 return None
73
74 try:
75 user = models.User.objects.get(username__iexact=query)
76 except models.User.DoesNotExist:
77 url = f"https://{domain}/.well-known/webfinger?resource=acct:{query}"
78 try:
79 data = get_data(url)
80 except (ConnectorException, HTTPError):
81 return None
82
83 for link in data.get("links"):
84 if link.get("rel") == "self":
85 try:
86 user = activitypub.resolve_remote_id(
87 link["href"], model=models.User
88 )
89 except (KeyError, activitypub.ActivitySerializerError):
90 return None
91 return user
92
93
94 def subscribe_remote_webfinger(query):
95 """get subscribe template from other servers"""
96 template = None
97 # usernames could be @user@domain or user@domain
98 if not query:
99 return WebFingerError("invalid_username")
100
101 if query[0] == "@":
102 query = query[1:]
103
104 try:
105 domain = query.split("@")[1]
106 except IndexError:
107 return WebFingerError("invalid_username")
108
109 url = f"https://{domain}/.well-known/webfinger?resource=acct:{query}"
110
111 try:
112 data = get_data(url)
113 except (ConnectorException, HTTPError):
114 return WebFingerError("user_not_found")
115
116 for link in data.get("links"):
117 if link.get("rel") == "http://ostatus.org/schema/1.0/subscribe":
118 template = link["template"]
119
120 return template
121
122
123 def get_edition(book_id):
124 """look up a book in the db and return an edition"""
125 book = models.Book.objects.select_subclasses().get(id=book_id)
126 if isinstance(book, models.Work):
127 book = book.default_edition
128 return book
129
130
131 def handle_reading_status(user, shelf, book, privacy):
132 """post about a user reading a book"""
133 # tell the world about this cool thing that happened
134 try:
135 message = {
136 "to-read": "wants to read",
137 "reading": "started reading",
138 "read": "finished reading",
139 }[shelf.identifier]
140 except KeyError:
141 # it's a non-standard shelf, don't worry about it
142 return
143
144 status = create_generated_note(user, message, mention_books=[book], privacy=privacy)
145 status.save()
146
147
148 def is_blocked(viewer, user):
149 """is this viewer blocked by the user?"""
150 if viewer.is_authenticated and viewer in user.blocks.all():
151 return True
152 return False
153
154
155 def get_landing_books():
156 """list of books for the landing page"""
157
158 return list(
159 set(
160 models.Edition.objects.filter(
161 review__published_date__isnull=False,
162 review__deleted=False,
163 review__user__local=True,
164 review__privacy__in=["public", "unlisted"],
165 )
166 .exclude(cover__exact="")
167 .distinct()
168 .order_by("-review__published_date")[:6]
169 )
170 )
171
172
173 def load_date_in_user_tz_as_utc(date_str: str, user: models.User) -> datetime:
174 """ensures that data is stored consistently in the UTC timezone"""
175 if not date_str:
176 return None
177 user_tz = dateutil.tz.gettz(user.preferred_timezone)
178 date = dateutil.parser.parse(date_str, ignoretz=True)
179 try:
180 return date.replace(tzinfo=user_tz).astimezone(dateutil.tz.UTC)
181 except ParserError:
182 return None
183
184
185 def set_language(user, response):
186 """Updates a user's language"""
187 if user.preferred_language:
188 translation.activate(user.preferred_language)
189 response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user.preferred_language)
190 return response
191
192
193 def filter_stream_by_status_type(activities, allowed_types=None):
194 """filter out activities based on types"""
195 if not allowed_types:
196 allowed_types = []
197
198 if "review" not in allowed_types:
199 activities = activities.filter(
200 Q(review__isnull=True), Q(boost__boosted_status__review__isnull=True)
201 )
202 if "comment" not in allowed_types:
203 activities = activities.filter(
204 Q(comment__isnull=True), Q(boost__boosted_status__comment__isnull=True)
205 )
206 if "quotation" not in allowed_types:
207 activities = activities.filter(
208 Q(quotation__isnull=True), Q(boost__boosted_status__quotation__isnull=True)
209 )
210 if "everything" not in allowed_types:
211 activities = activities.filter(
212 Q(generatednote__isnull=True),
213 Q(boost__boosted_status__generatednote__isnull=True),
214 )
215
216 return activities
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/helpers.py b/bookwyrm/views/helpers.py
--- a/bookwyrm/views/helpers.py
+++ b/bookwyrm/views/helpers.py
@@ -1,12 +1,13 @@
""" helper functions used in various views """
import re
-from datetime import datetime
+from datetime import datetime, timedelta
import dateutil.parser
import dateutil.tz
from dateutil.parser import ParserError
from requests import HTTPError
from django.db.models import Q
+from django.conf import settings as django_settings
from django.http import Http404
from django.utils import translation
@@ -186,7 +187,11 @@
"""Updates a user's language"""
if user.preferred_language:
translation.activate(user.preferred_language)
- response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user.preferred_language)
+ response.set_cookie(
+ settings.LANGUAGE_COOKIE_NAME,
+ user.preferred_language,
+ expires=datetime.now() + timedelta(seconds=django_settings.SESSION_COOKIE_AGE),
+ )
return response
| {"golden_diff": "diff --git a/bookwyrm/views/helpers.py b/bookwyrm/views/helpers.py\n--- a/bookwyrm/views/helpers.py\n+++ b/bookwyrm/views/helpers.py\n@@ -1,12 +1,13 @@\n \"\"\" helper functions used in various views \"\"\"\n import re\n-from datetime import datetime\n+from datetime import datetime, timedelta\n import dateutil.parser\n import dateutil.tz\n from dateutil.parser import ParserError\n \n from requests import HTTPError\n from django.db.models import Q\n+from django.conf import settings as django_settings\n from django.http import Http404\n from django.utils import translation\n \n@@ -186,7 +187,11 @@\n \"\"\"Updates a user's language\"\"\"\n if user.preferred_language:\n translation.activate(user.preferred_language)\n- response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user.preferred_language)\n+ response.set_cookie(\n+ settings.LANGUAGE_COOKIE_NAME,\n+ user.preferred_language,\n+ expires=datetime.now() + timedelta(seconds=django_settings.SESSION_COOKIE_AGE),\n+ )\n return response\n", "issue": "language settings don't persist past browser sessions\n**Describe the bug**\r\nLanguage settings don't persist past browser sessions. If I change my language settings, and close my browser without logging out, when I open the browser again, the language has reverted to English. However, if I logout and log back in, bookwyrm is in the language I specified. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n( assuming your bookwyrm is already in English)\r\n1. Go to https://bookwyrm.social/preferences/profile\r\n2. Select a language other than English under the Language setting and Save\r\n3. Close the browser window and reopen bookwyrm\r\n4. See that bookwyrm is in English and not the language you selected\r\n\r\n**Expected behavior**\r\nI would expect that my language settings are persistent across browser sessions. \r\n\r\n**Instance**\r\nbookwyrm.social\r\n\r\n**Additional Context**\r\nI have been able to repro this bug on Safari and Brave on macOS BigSur\n", "before_files": [{"content": "\"\"\" helper functions used in various views \"\"\"\nimport re\nfrom datetime import datetime\nimport dateutil.parser\nimport dateutil.tz\nfrom dateutil.parser import ParserError\n\nfrom requests import HTTPError\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.utils import translation\n\nfrom bookwyrm import activitypub, models, settings\nfrom bookwyrm.connectors import ConnectorException, get_data\nfrom bookwyrm.status import create_generated_note\nfrom bookwyrm.utils import regex\n\n\n# pylint: disable=unnecessary-pass\nclass WebFingerError(Exception):\n \"\"\"empty error class for problems finding user information with webfinger\"\"\"\n\n pass\n\n\ndef get_user_from_username(viewer, username):\n \"\"\"helper function to resolve a localname or a username to a user\"\"\"\n if viewer.is_authenticated and viewer.localname == username:\n # that's yourself, fool\n return viewer\n\n # raises 404 if the user isn't found\n try:\n return models.User.viewer_aware_objects(viewer).get(localname=username)\n except models.User.DoesNotExist:\n pass\n\n # if the localname didn't match, try the username\n try:\n return models.User.viewer_aware_objects(viewer).get(username=username)\n except models.User.DoesNotExist:\n raise Http404()\n\n\ndef is_api_request(request):\n \"\"\"check whether a request is asking for html or data\"\"\"\n return \"json\" in request.headers.get(\"Accept\", \"\") or re.match(\n r\".*\\.json/?$\", request.path\n )\n\n\ndef is_bookwyrm_request(request):\n \"\"\"check if the request is coming from another bookwyrm instance\"\"\"\n user_agent = request.headers.get(\"User-Agent\")\n if user_agent is None or re.search(regex.BOOKWYRM_USER_AGENT, user_agent) is None:\n return False\n return True\n\n\ndef handle_remote_webfinger(query):\n \"\"\"webfingerin' other servers\"\"\"\n user = None\n\n # usernames could be @user@domain or user@domain\n if not query:\n return None\n if query[0] == \"@\":\n query = query[1:]\n try:\n domain = query.split(\"@\")[1]\n except IndexError:\n return None\n\n try:\n user = models.User.objects.get(username__iexact=query)\n except models.User.DoesNotExist:\n url = f\"https://{domain}/.well-known/webfinger?resource=acct:{query}\"\n try:\n data = get_data(url)\n except (ConnectorException, HTTPError):\n return None\n\n for link in data.get(\"links\"):\n if link.get(\"rel\") == \"self\":\n try:\n user = activitypub.resolve_remote_id(\n link[\"href\"], model=models.User\n )\n except (KeyError, activitypub.ActivitySerializerError):\n return None\n return user\n\n\ndef subscribe_remote_webfinger(query):\n \"\"\"get subscribe template from other servers\"\"\"\n template = None\n # usernames could be @user@domain or user@domain\n if not query:\n return WebFingerError(\"invalid_username\")\n\n if query[0] == \"@\":\n query = query[1:]\n\n try:\n domain = query.split(\"@\")[1]\n except IndexError:\n return WebFingerError(\"invalid_username\")\n\n url = f\"https://{domain}/.well-known/webfinger?resource=acct:{query}\"\n\n try:\n data = get_data(url)\n except (ConnectorException, HTTPError):\n return WebFingerError(\"user_not_found\")\n\n for link in data.get(\"links\"):\n if link.get(\"rel\") == \"http://ostatus.org/schema/1.0/subscribe\":\n template = link[\"template\"]\n\n return template\n\n\ndef get_edition(book_id):\n \"\"\"look up a book in the db and return an edition\"\"\"\n book = models.Book.objects.select_subclasses().get(id=book_id)\n if isinstance(book, models.Work):\n book = book.default_edition\n return book\n\n\ndef handle_reading_status(user, shelf, book, privacy):\n \"\"\"post about a user reading a book\"\"\"\n # tell the world about this cool thing that happened\n try:\n message = {\n \"to-read\": \"wants to read\",\n \"reading\": \"started reading\",\n \"read\": \"finished reading\",\n }[shelf.identifier]\n except KeyError:\n # it's a non-standard shelf, don't worry about it\n return\n\n status = create_generated_note(user, message, mention_books=[book], privacy=privacy)\n status.save()\n\n\ndef is_blocked(viewer, user):\n \"\"\"is this viewer blocked by the user?\"\"\"\n if viewer.is_authenticated and viewer in user.blocks.all():\n return True\n return False\n\n\ndef get_landing_books():\n \"\"\"list of books for the landing page\"\"\"\n\n return list(\n set(\n models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__deleted=False,\n review__user__local=True,\n review__privacy__in=[\"public\", \"unlisted\"],\n )\n .exclude(cover__exact=\"\")\n .distinct()\n .order_by(\"-review__published_date\")[:6]\n )\n )\n\n\ndef load_date_in_user_tz_as_utc(date_str: str, user: models.User) -> datetime:\n \"\"\"ensures that data is stored consistently in the UTC timezone\"\"\"\n if not date_str:\n return None\n user_tz = dateutil.tz.gettz(user.preferred_timezone)\n date = dateutil.parser.parse(date_str, ignoretz=True)\n try:\n return date.replace(tzinfo=user_tz).astimezone(dateutil.tz.UTC)\n except ParserError:\n return None\n\n\ndef set_language(user, response):\n \"\"\"Updates a user's language\"\"\"\n if user.preferred_language:\n translation.activate(user.preferred_language)\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, user.preferred_language)\n return response\n\n\ndef filter_stream_by_status_type(activities, allowed_types=None):\n \"\"\"filter out activities based on types\"\"\"\n if not allowed_types:\n allowed_types = []\n\n if \"review\" not in allowed_types:\n activities = activities.filter(\n Q(review__isnull=True), Q(boost__boosted_status__review__isnull=True)\n )\n if \"comment\" not in allowed_types:\n activities = activities.filter(\n Q(comment__isnull=True), Q(boost__boosted_status__comment__isnull=True)\n )\n if \"quotation\" not in allowed_types:\n activities = activities.filter(\n Q(quotation__isnull=True), Q(boost__boosted_status__quotation__isnull=True)\n )\n if \"everything\" not in allowed_types:\n activities = activities.filter(\n Q(generatednote__isnull=True),\n Q(boost__boosted_status__generatednote__isnull=True),\n )\n\n return activities\n", "path": "bookwyrm/views/helpers.py"}], "after_files": [{"content": "\"\"\" helper functions used in various views \"\"\"\nimport re\nfrom datetime import datetime, timedelta\nimport dateutil.parser\nimport dateutil.tz\nfrom dateutil.parser import ParserError\n\nfrom requests import HTTPError\nfrom django.db.models import Q\nfrom django.conf import settings as django_settings\nfrom django.http import Http404\nfrom django.utils import translation\n\nfrom bookwyrm import activitypub, models, settings\nfrom bookwyrm.connectors import ConnectorException, get_data\nfrom bookwyrm.status import create_generated_note\nfrom bookwyrm.utils import regex\n\n\n# pylint: disable=unnecessary-pass\nclass WebFingerError(Exception):\n \"\"\"empty error class for problems finding user information with webfinger\"\"\"\n\n pass\n\n\ndef get_user_from_username(viewer, username):\n \"\"\"helper function to resolve a localname or a username to a user\"\"\"\n if viewer.is_authenticated and viewer.localname == username:\n # that's yourself, fool\n return viewer\n\n # raises 404 if the user isn't found\n try:\n return models.User.viewer_aware_objects(viewer).get(localname=username)\n except models.User.DoesNotExist:\n pass\n\n # if the localname didn't match, try the username\n try:\n return models.User.viewer_aware_objects(viewer).get(username=username)\n except models.User.DoesNotExist:\n raise Http404()\n\n\ndef is_api_request(request):\n \"\"\"check whether a request is asking for html or data\"\"\"\n return \"json\" in request.headers.get(\"Accept\", \"\") or re.match(\n r\".*\\.json/?$\", request.path\n )\n\n\ndef is_bookwyrm_request(request):\n \"\"\"check if the request is coming from another bookwyrm instance\"\"\"\n user_agent = request.headers.get(\"User-Agent\")\n if user_agent is None or re.search(regex.BOOKWYRM_USER_AGENT, user_agent) is None:\n return False\n return True\n\n\ndef handle_remote_webfinger(query):\n \"\"\"webfingerin' other servers\"\"\"\n user = None\n\n # usernames could be @user@domain or user@domain\n if not query:\n return None\n if query[0] == \"@\":\n query = query[1:]\n try:\n domain = query.split(\"@\")[1]\n except IndexError:\n return None\n\n try:\n user = models.User.objects.get(username__iexact=query)\n except models.User.DoesNotExist:\n url = f\"https://{domain}/.well-known/webfinger?resource=acct:{query}\"\n try:\n data = get_data(url)\n except (ConnectorException, HTTPError):\n return None\n\n for link in data.get(\"links\"):\n if link.get(\"rel\") == \"self\":\n try:\n user = activitypub.resolve_remote_id(\n link[\"href\"], model=models.User\n )\n except (KeyError, activitypub.ActivitySerializerError):\n return None\n return user\n\n\ndef subscribe_remote_webfinger(query):\n \"\"\"get subscribe template from other servers\"\"\"\n template = None\n # usernames could be @user@domain or user@domain\n if not query:\n return WebFingerError(\"invalid_username\")\n\n if query[0] == \"@\":\n query = query[1:]\n\n try:\n domain = query.split(\"@\")[1]\n except IndexError:\n return WebFingerError(\"invalid_username\")\n\n url = f\"https://{domain}/.well-known/webfinger?resource=acct:{query}\"\n\n try:\n data = get_data(url)\n except (ConnectorException, HTTPError):\n return WebFingerError(\"user_not_found\")\n\n for link in data.get(\"links\"):\n if link.get(\"rel\") == \"http://ostatus.org/schema/1.0/subscribe\":\n template = link[\"template\"]\n\n return template\n\n\ndef get_edition(book_id):\n \"\"\"look up a book in the db and return an edition\"\"\"\n book = models.Book.objects.select_subclasses().get(id=book_id)\n if isinstance(book, models.Work):\n book = book.default_edition\n return book\n\n\ndef handle_reading_status(user, shelf, book, privacy):\n \"\"\"post about a user reading a book\"\"\"\n # tell the world about this cool thing that happened\n try:\n message = {\n \"to-read\": \"wants to read\",\n \"reading\": \"started reading\",\n \"read\": \"finished reading\",\n }[shelf.identifier]\n except KeyError:\n # it's a non-standard shelf, don't worry about it\n return\n\n status = create_generated_note(user, message, mention_books=[book], privacy=privacy)\n status.save()\n\n\ndef is_blocked(viewer, user):\n \"\"\"is this viewer blocked by the user?\"\"\"\n if viewer.is_authenticated and viewer in user.blocks.all():\n return True\n return False\n\n\ndef get_landing_books():\n \"\"\"list of books for the landing page\"\"\"\n\n return list(\n set(\n models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__deleted=False,\n review__user__local=True,\n review__privacy__in=[\"public\", \"unlisted\"],\n )\n .exclude(cover__exact=\"\")\n .distinct()\n .order_by(\"-review__published_date\")[:6]\n )\n )\n\n\ndef load_date_in_user_tz_as_utc(date_str: str, user: models.User) -> datetime:\n \"\"\"ensures that data is stored consistently in the UTC timezone\"\"\"\n if not date_str:\n return None\n user_tz = dateutil.tz.gettz(user.preferred_timezone)\n date = dateutil.parser.parse(date_str, ignoretz=True)\n try:\n return date.replace(tzinfo=user_tz).astimezone(dateutil.tz.UTC)\n except ParserError:\n return None\n\n\ndef set_language(user, response):\n \"\"\"Updates a user's language\"\"\"\n if user.preferred_language:\n translation.activate(user.preferred_language)\n response.set_cookie(\n settings.LANGUAGE_COOKIE_NAME,\n user.preferred_language,\n expires=datetime.now() + timedelta(seconds=django_settings.SESSION_COOKIE_AGE),\n )\n return response\n\n\ndef filter_stream_by_status_type(activities, allowed_types=None):\n \"\"\"filter out activities based on types\"\"\"\n if not allowed_types:\n allowed_types = []\n\n if \"review\" not in allowed_types:\n activities = activities.filter(\n Q(review__isnull=True), Q(boost__boosted_status__review__isnull=True)\n )\n if \"comment\" not in allowed_types:\n activities = activities.filter(\n Q(comment__isnull=True), Q(boost__boosted_status__comment__isnull=True)\n )\n if \"quotation\" not in allowed_types:\n activities = activities.filter(\n Q(quotation__isnull=True), Q(boost__boosted_status__quotation__isnull=True)\n )\n if \"everything\" not in allowed_types:\n activities = activities.filter(\n Q(generatednote__isnull=True),\n Q(boost__boosted_status__generatednote__isnull=True),\n )\n\n return activities\n", "path": "bookwyrm/views/helpers.py"}]} | 2,499 | 229 |
gh_patches_debug_16041 | rasdani/github-patches | git_diff | pypa__setuptools-3709 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] AttributeError: module 'distutils.log' has no attribute 'warning'
### setuptools version
setuptools==65.6.2
### Python version
Python 3.8.15
### OS
macOS 12.5.1
### Additional environment information
_No response_
### Description
Installing a package `kappa=0.6.0` doesn't work. I think it could be the issue on setuptools's side.
### Expected behavior
Installing the package works.
### How to Reproduce
```console
$ python3.8 -m venv .venv
$ source .venv/bin/activate
$ pip install setuptools==65.6.2
$ pip install wheel
$ pip install kappa==0.6.0
```
### Output
```console
(.venv) dmytro@Dmytros-MacBook-Pro install_kappa % pip install kappa==0.6.0
Collecting kappa==0.6.0
Using cached kappa-0.6.0.tar.gz (29 kB)
Preparing metadata (setup.py) ... error
error: subprocess-exited-with-error
× python setup.py egg_info did not run successfully.
│ exit code: 1
╰─> [20 lines of output]
Traceback (most recent call last):
File "<string>", line 2, in <module>
File "<pip-setuptools-caller>", line 34, in <module>
File "/private/var/folders/kv/zxwjm57d3jdgwyylrg2db6r80000gn/T/pip-install-xjv7l4bl/kappa_6f4dfc99aa59445e99fd8697c5e00cd7/setup.py", line 54, in <module>
run_setup()
File "/private/var/folders/kv/zxwjm57d3jdgwyylrg2db6r80000gn/T/pip-install-xjv7l4bl/kappa_6f4dfc99aa59445e99fd8697c5e00cd7/setup.py", line 18, in run_setup
setup(
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/__init__.py", line 87, in setup
return distutils.core.setup(**attrs)
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/core.py", line 147, in setup
_setup_distribution = dist = klass(attrs)
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/dist.py", line 475, in __init__
_Distribution.__init__(
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py", line 258, in __init__
getattr(self.metadata, "set_" + key)(val)
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py", line 1242, in set_classifiers
self.classifiers = _ensure_list(value, 'classifiers')
File "/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py", line 48, in _ensure_list
log.warning(msg)
AttributeError: module 'distutils.log' has no attribute 'warning'
[end of output]
note: This error originates from a subprocess, and is likely not a problem with pip.
error: metadata-generation-failed
× Encountered error while generating package metadata.
╰─> See above for output.
note: This is an issue with the package mentioned above, not pip.
hint: See above for details.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/logging.py`
Content:
```
1 import sys
2 import logging
3 import distutils.log
4 from . import monkey
5
6
7 def _not_warning(record):
8 return record.levelno < logging.WARNING
9
10
11 def configure():
12 """
13 Configure logging to emit warning and above to stderr
14 and everything else to stdout. This behavior is provided
15 for compatibility with distutils.log but may change in
16 the future.
17 """
18 err_handler = logging.StreamHandler()
19 err_handler.setLevel(logging.WARNING)
20 out_handler = logging.StreamHandler(sys.stdout)
21 out_handler.addFilter(_not_warning)
22 handlers = err_handler, out_handler
23 logging.basicConfig(
24 format="{message}", style='{', handlers=handlers, level=logging.DEBUG)
25 if hasattr(distutils.log, 'Log'):
26 monkey.patch_func(set_threshold, distutils.log, 'set_threshold')
27 # For some reason `distutils.log` module is getting cached in `distutils.dist`
28 # and then loaded again when patched,
29 # implying: id(distutils.log) != id(distutils.dist.log).
30 # Make sure the same module object is used everywhere:
31 distutils.dist.log = distutils.log
32
33
34 def set_threshold(level):
35 logging.root.setLevel(level*10)
36 return set_threshold.unpatched(level)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setuptools/logging.py b/setuptools/logging.py
--- a/setuptools/logging.py
+++ b/setuptools/logging.py
@@ -1,4 +1,5 @@
import sys
+import inspect
import logging
import distutils.log
from . import monkey
@@ -22,7 +23,7 @@
handlers = err_handler, out_handler
logging.basicConfig(
format="{message}", style='{', handlers=handlers, level=logging.DEBUG)
- if hasattr(distutils.log, 'Log'):
+ if inspect.ismodule(distutils.dist.log):
monkey.patch_func(set_threshold, distutils.log, 'set_threshold')
# For some reason `distutils.log` module is getting cached in `distutils.dist`
# and then loaded again when patched,
| {"golden_diff": "diff --git a/setuptools/logging.py b/setuptools/logging.py\n--- a/setuptools/logging.py\n+++ b/setuptools/logging.py\n@@ -1,4 +1,5 @@\n import sys\n+import inspect\n import logging\n import distutils.log\n from . import monkey\n@@ -22,7 +23,7 @@\n handlers = err_handler, out_handler\n logging.basicConfig(\n format=\"{message}\", style='{', handlers=handlers, level=logging.DEBUG)\n- if hasattr(distutils.log, 'Log'):\n+ if inspect.ismodule(distutils.dist.log):\n monkey.patch_func(set_threshold, distutils.log, 'set_threshold')\n # For some reason `distutils.log` module is getting cached in `distutils.dist`\n # and then loaded again when patched,\n", "issue": "[BUG] AttributeError: module 'distutils.log' has no attribute 'warning'\n### setuptools version\n\nsetuptools==65.6.2\n\n### Python version\n\nPython 3.8.15\n\n### OS\n\nmacOS 12.5.1\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nInstalling a package `kappa=0.6.0` doesn't work. I think it could be the issue on setuptools's side.\n\n### Expected behavior\n\nInstalling the package works.\n\n### How to Reproduce\n\n```console\r\n$ python3.8 -m venv .venv\r\n$ source .venv/bin/activate\r\n$ pip install setuptools==65.6.2\r\n$ pip install wheel\r\n$ pip install kappa==0.6.0\r\n```\n\n### Output\n\n```console\r\n(.venv) dmytro@Dmytros-MacBook-Pro install_kappa % pip install kappa==0.6.0\r\nCollecting kappa==0.6.0\r\n Using cached kappa-0.6.0.tar.gz (29 kB)\r\n Preparing metadata (setup.py) ... error\r\n error: subprocess-exited-with-error\r\n\r\n \u00d7 python setup.py egg_info did not run successfully.\r\n \u2502 exit code: 1\r\n \u2570\u2500> [20 lines of output]\r\n Traceback (most recent call last):\r\n File \"<string>\", line 2, in <module>\r\n File \"<pip-setuptools-caller>\", line 34, in <module>\r\n File \"/private/var/folders/kv/zxwjm57d3jdgwyylrg2db6r80000gn/T/pip-install-xjv7l4bl/kappa_6f4dfc99aa59445e99fd8697c5e00cd7/setup.py\", line 54, in <module>\r\n run_setup()\r\n File \"/private/var/folders/kv/zxwjm57d3jdgwyylrg2db6r80000gn/T/pip-install-xjv7l4bl/kappa_6f4dfc99aa59445e99fd8697c5e00cd7/setup.py\", line 18, in run_setup\r\n setup(\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/__init__.py\", line 87, in setup\r\n return distutils.core.setup(**attrs)\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/core.py\", line 147, in setup\r\n _setup_distribution = dist = klass(attrs)\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/dist.py\", line 475, in __init__\r\n _Distribution.__init__(\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py\", line 258, in __init__\r\n getattr(self.metadata, \"set_\" + key)(val)\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py\", line 1242, in set_classifiers\r\n self.classifiers = _ensure_list(value, 'classifiers')\r\n File \"/Users/dmytro/Tests/install_kappa/.venv/lib/python3.8/site-packages/setuptools/_distutils/dist.py\", line 48, in _ensure_list\r\n log.warning(msg)\r\n AttributeError: module 'distutils.log' has no attribute 'warning'\r\n [end of output]\r\n\r\n note: This error originates from a subprocess, and is likely not a problem with pip.\r\nerror: metadata-generation-failed\r\n\r\n\u00d7 Encountered error while generating package metadata.\r\n\u2570\u2500> See above for output.\r\n\r\nnote: This is an issue with the package mentioned above, not pip.\r\nhint: See above for details.\r\n```\r\n\n", "before_files": [{"content": "import sys\nimport logging\nimport distutils.log\nfrom . import monkey\n\n\ndef _not_warning(record):\n return record.levelno < logging.WARNING\n\n\ndef configure():\n \"\"\"\n Configure logging to emit warning and above to stderr\n and everything else to stdout. This behavior is provided\n for compatibility with distutils.log but may change in\n the future.\n \"\"\"\n err_handler = logging.StreamHandler()\n err_handler.setLevel(logging.WARNING)\n out_handler = logging.StreamHandler(sys.stdout)\n out_handler.addFilter(_not_warning)\n handlers = err_handler, out_handler\n logging.basicConfig(\n format=\"{message}\", style='{', handlers=handlers, level=logging.DEBUG)\n if hasattr(distutils.log, 'Log'):\n monkey.patch_func(set_threshold, distutils.log, 'set_threshold')\n # For some reason `distutils.log` module is getting cached in `distutils.dist`\n # and then loaded again when patched,\n # implying: id(distutils.log) != id(distutils.dist.log).\n # Make sure the same module object is used everywhere:\n distutils.dist.log = distutils.log\n\n\ndef set_threshold(level):\n logging.root.setLevel(level*10)\n return set_threshold.unpatched(level)\n", "path": "setuptools/logging.py"}], "after_files": [{"content": "import sys\nimport inspect\nimport logging\nimport distutils.log\nfrom . import monkey\n\n\ndef _not_warning(record):\n return record.levelno < logging.WARNING\n\n\ndef configure():\n \"\"\"\n Configure logging to emit warning and above to stderr\n and everything else to stdout. This behavior is provided\n for compatibility with distutils.log but may change in\n the future.\n \"\"\"\n err_handler = logging.StreamHandler()\n err_handler.setLevel(logging.WARNING)\n out_handler = logging.StreamHandler(sys.stdout)\n out_handler.addFilter(_not_warning)\n handlers = err_handler, out_handler\n logging.basicConfig(\n format=\"{message}\", style='{', handlers=handlers, level=logging.DEBUG)\n if inspect.ismodule(distutils.dist.log):\n monkey.patch_func(set_threshold, distutils.log, 'set_threshold')\n # For some reason `distutils.log` module is getting cached in `distutils.dist`\n # and then loaded again when patched,\n # implying: id(distutils.log) != id(distutils.dist.log).\n # Make sure the same module object is used everywhere:\n distutils.dist.log = distutils.log\n\n\ndef set_threshold(level):\n logging.root.setLevel(level*10)\n return set_threshold.unpatched(level)\n", "path": "setuptools/logging.py"}]} | 1,468 | 165 |
gh_patches_debug_6045 | rasdani/github-patches | git_diff | ManimCommunity__manim-509 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueTracker must support increment also using +=
Just what the title says. Here's an example
```python
class Test(Scene):
def construct(self):
tracker = ValueTracker(0.0)
tracker.set_value(2.0)
print(tracker.get_value()) # -> 2.000
tracker.increment_value(3.0)
print(tracker.get_value()) # -> 5.000
tracker += 4.0 # NEW proposed feature
print(tracker.get_value()) # -> 9.000
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/mobject/value_tracker.py`
Content:
```
1 """Mobjects that dynamically show the change of a variable."""
2
3 __all__ = ["ValueTracker", "ExponentialValueTracker", "ComplexValueTracker"]
4
5
6 import numpy as np
7
8 from ..utils.paths import straight_path
9 from ..mobject.mobject import Mobject
10
11
12 class ValueTracker(Mobject):
13 """A mobject that can be used for tracking (real-valued) parameters.
14 Useful for animating parameter changes.
15
16 Not meant to be displayed. Instead the position encodes some
17 number, often one which another animation or continual_animation
18 uses for its update function, and by treating it as a mobject it can
19 still be animated and manipulated just like anything else.
20
21 Examples
22 --------
23 .. manim:: ValueTrackerExample
24
25 class ValueTrackerExample(Scene):
26 def construct(self):
27 number_line = NumberLine()
28 pointer = Vector(DOWN)
29 label = MathTex("x").add_updater(lambda m: m.next_to(pointer, UP))
30
31 pointer_value = ValueTracker(0)
32 pointer.add_updater(
33 lambda m: m.next_to(
34 number_line.n2p(pointer_value.get_value()),
35 UP
36 )
37 )
38 self.add(number_line, pointer,label)
39 self.play(pointer_value.set_value, 5)
40 self.wait()
41 self.play(pointer_value.set_value, 3)
42
43 """
44
45 def __init__(self, value=0, **kwargs):
46 Mobject.__init__(self, **kwargs)
47 self.points = np.zeros((1, 3))
48 self.set_value(value)
49
50 def get_value(self):
51 return self.points[0, 0]
52
53 def set_value(self, value):
54 self.points[0, 0] = value
55 return self
56
57 def increment_value(self, d_value):
58 self.set_value(self.get_value() + d_value)
59
60 def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):
61 """
62 Turns self into an interpolation between mobject1
63 and mobject2.
64 """
65 self.points = path_func(mobject1.points, mobject2.points, alpha)
66 return self
67
68
69 class ExponentialValueTracker(ValueTracker):
70 """
71 Operates just like ValueTracker, except it encodes the value as the
72 exponential of a position coordinate, which changes how interpolation
73 behaves
74 """
75
76 def get_value(self):
77 return np.exp(ValueTracker.get_value(self))
78
79 def set_value(self, value):
80 return ValueTracker.set_value(self, np.log(value))
81
82
83 class ComplexValueTracker(ValueTracker):
84 def get_value(self):
85 return complex(*self.points[0, :2])
86
87 def set_value(self, z):
88 z = complex(z)
89 self.points[0, :2] = (z.real, z.imag)
90 return self
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/mobject/value_tracker.py b/manim/mobject/value_tracker.py
--- a/manim/mobject/value_tracker.py
+++ b/manim/mobject/value_tracker.py
@@ -57,6 +57,10 @@
def increment_value(self, d_value):
self.set_value(self.get_value() + d_value)
+ def __iadd__(self, d_value):
+ self.increment_value(d_value)
+ return self
+
def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):
"""
Turns self into an interpolation between mobject1
| {"golden_diff": "diff --git a/manim/mobject/value_tracker.py b/manim/mobject/value_tracker.py\n--- a/manim/mobject/value_tracker.py\n+++ b/manim/mobject/value_tracker.py\n@@ -57,6 +57,10 @@\n def increment_value(self, d_value):\n self.set_value(self.get_value() + d_value)\n \n+ def __iadd__(self, d_value):\n+ self.increment_value(d_value)\n+ return self\n+\n def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):\n \"\"\"\n Turns self into an interpolation between mobject1\n", "issue": "ValueTracker must support increment also using +=\nJust what the title says. Here's an example \r\n```python\r\nclass Test(Scene):\r\n def construct(self):\r\n tracker = ValueTracker(0.0)\r\n tracker.set_value(2.0)\r\n print(tracker.get_value()) # -> 2.000\r\n tracker.increment_value(3.0)\r\n print(tracker.get_value()) # -> 5.000\r\n tracker += 4.0 # NEW proposed feature\r\n print(tracker.get_value()) # -> 9.000\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Mobjects that dynamically show the change of a variable.\"\"\"\n\n__all__ = [\"ValueTracker\", \"ExponentialValueTracker\", \"ComplexValueTracker\"]\n\n\nimport numpy as np\n\nfrom ..utils.paths import straight_path\nfrom ..mobject.mobject import Mobject\n\n\nclass ValueTracker(Mobject):\n \"\"\"A mobject that can be used for tracking (real-valued) parameters.\n Useful for animating parameter changes.\n\n Not meant to be displayed. Instead the position encodes some\n number, often one which another animation or continual_animation\n uses for its update function, and by treating it as a mobject it can\n still be animated and manipulated just like anything else.\n\n Examples\n --------\n .. manim:: ValueTrackerExample\n\n class ValueTrackerExample(Scene):\n def construct(self):\n number_line = NumberLine()\n pointer = Vector(DOWN)\n label = MathTex(\"x\").add_updater(lambda m: m.next_to(pointer, UP))\n\n pointer_value = ValueTracker(0)\n pointer.add_updater(\n lambda m: m.next_to(\n number_line.n2p(pointer_value.get_value()),\n UP\n )\n )\n self.add(number_line, pointer,label)\n self.play(pointer_value.set_value, 5)\n self.wait()\n self.play(pointer_value.set_value, 3)\n\n \"\"\"\n\n def __init__(self, value=0, **kwargs):\n Mobject.__init__(self, **kwargs)\n self.points = np.zeros((1, 3))\n self.set_value(value)\n\n def get_value(self):\n return self.points[0, 0]\n\n def set_value(self, value):\n self.points[0, 0] = value\n return self\n\n def increment_value(self, d_value):\n self.set_value(self.get_value() + d_value)\n\n def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):\n \"\"\"\n Turns self into an interpolation between mobject1\n and mobject2.\n \"\"\"\n self.points = path_func(mobject1.points, mobject2.points, alpha)\n return self\n\n\nclass ExponentialValueTracker(ValueTracker):\n \"\"\"\n Operates just like ValueTracker, except it encodes the value as the\n exponential of a position coordinate, which changes how interpolation\n behaves\n \"\"\"\n\n def get_value(self):\n return np.exp(ValueTracker.get_value(self))\n\n def set_value(self, value):\n return ValueTracker.set_value(self, np.log(value))\n\n\nclass ComplexValueTracker(ValueTracker):\n def get_value(self):\n return complex(*self.points[0, :2])\n\n def set_value(self, z):\n z = complex(z)\n self.points[0, :2] = (z.real, z.imag)\n return self\n", "path": "manim/mobject/value_tracker.py"}], "after_files": [{"content": "\"\"\"Mobjects that dynamically show the change of a variable.\"\"\"\n\n__all__ = [\"ValueTracker\", \"ExponentialValueTracker\", \"ComplexValueTracker\"]\n\n\nimport numpy as np\n\nfrom ..utils.paths import straight_path\nfrom ..mobject.mobject import Mobject\n\n\nclass ValueTracker(Mobject):\n \"\"\"A mobject that can be used for tracking (real-valued) parameters.\n Useful for animating parameter changes.\n\n Not meant to be displayed. Instead the position encodes some\n number, often one which another animation or continual_animation\n uses for its update function, and by treating it as a mobject it can\n still be animated and manipulated just like anything else.\n\n Examples\n --------\n .. manim:: ValueTrackerExample\n\n class ValueTrackerExample(Scene):\n def construct(self):\n number_line = NumberLine()\n pointer = Vector(DOWN)\n label = MathTex(\"x\").add_updater(lambda m: m.next_to(pointer, UP))\n\n pointer_value = ValueTracker(0)\n pointer.add_updater(\n lambda m: m.next_to(\n number_line.n2p(pointer_value.get_value()),\n UP\n )\n )\n self.add(number_line, pointer,label)\n self.play(pointer_value.set_value, 5)\n self.wait()\n self.play(pointer_value.set_value, 3)\n\n \"\"\"\n\n def __init__(self, value=0, **kwargs):\n Mobject.__init__(self, **kwargs)\n self.points = np.zeros((1, 3))\n self.set_value(value)\n\n def get_value(self):\n return self.points[0, 0]\n\n def set_value(self, value):\n self.points[0, 0] = value\n return self\n\n def increment_value(self, d_value):\n self.set_value(self.get_value() + d_value)\n\n def __iadd__(self, d_value):\n self.increment_value(d_value)\n return self\n\n def interpolate(self, mobject1, mobject2, alpha, path_func=straight_path):\n \"\"\"\n Turns self into an interpolation between mobject1\n and mobject2.\n \"\"\"\n self.points = path_func(mobject1.points, mobject2.points, alpha)\n return self\n\n\nclass ExponentialValueTracker(ValueTracker):\n \"\"\"\n Operates just like ValueTracker, except it encodes the value as the\n exponential of a position coordinate, which changes how interpolation\n behaves\n \"\"\"\n\n def get_value(self):\n return np.exp(ValueTracker.get_value(self))\n\n def set_value(self, value):\n return ValueTracker.set_value(self, np.log(value))\n\n\nclass ComplexValueTracker(ValueTracker):\n def get_value(self):\n return complex(*self.points[0, :2])\n\n def set_value(self, z):\n z = complex(z)\n self.points[0, :2] = (z.real, z.imag)\n return self\n", "path": "manim/mobject/value_tracker.py"}]} | 1,168 | 135 |
gh_patches_debug_21579 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1103 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Searching for UUIDs from the command line
### Expected behaviour
I would like to be able to search for UUIDs from the command line, e.g.:
datacube dataset search id = 26931d17-7a4e-4b55-98e7-d6777fb61df6
### Actual behaviour
If I try to run this command, `datacube` crashes with the following Exception:
```
File "/g/data/v10/public/modules/dea/20200828/lib/python3.6/site-packages/datacube/ui/click.py", line 356, in my_parse
return parse_expressions(*list(value))
File "/g/data/v10/public/modules/dea/20200828/lib/python3.6/site-packages/datacube/ui/expression.py", line 112, in parse_expressions
tree = expr_parser.parse(' '.join(expression_text))
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/lark.py", line 391, in parse
return self.parser.parse(text, start=start)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parser_frontends.py", line 199, in parse
return self._parse(text, start)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parser_frontends.py", line 53, in _parse
return self.parser.parse(input, start, *args)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/earley.py", line 293, in parse
to_scan = self._parse(stream, columns, to_scan, start_symbol)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/xearley.py", line 138, in _parse
to_scan = scan(i, to_scan)
File "/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/xearley.py", line 115, in scan
raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, set(to_scan))
lark.exceptions.UnexpectedCharacters: No terminal defined for '-' at line 1 col 14
id = 26931d17-7a4e-4b55-98e7-d6777fb61df6
^
Expecting: {'EQUAL', 'IN'}
```
### Cause
This happens because the search expression parser is treating the UUID as a number instead of a string. It's possible to work around this by surrounding the UUID in quotes. e.g.:
datacube dataset search id = \"26931d17-7a4e-4b55-98e7-d6777fb61df6\"
### Intended changes
It should be possible to improve the parser to recognise UUIDs, to avoid having to manually quote them from the shell. The search expression 'language' was intended to be shell escaping friendly, so this seems like an obvious improvement.
### Environment information
* Which ``datacube --version`` are you using?
`Open Data Cube core, version 1.8.3`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/ui/expression.py`
Content:
```
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2020 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 """
6 Search expression parsing for command line applications.
7
8 Three types of expressions are available:
9
10 FIELD = VALUE
11 FIELD in DATE-RANGE
12 FIELD in [START, END]
13
14 Where DATE-RANGE is one of YYYY, YYYY-MM or YYYY-MM-DD
15 and START, END are either numbers or dates.
16 """
17 # flake8: noqa
18
19 from lark import Lark, v_args, Transformer
20
21 from datacube.api.query import _time_to_search_dims
22 from datacube.model import Range
23
24
25 search_grammar = r"""
26 start: expression*
27 ?expression: equals_expr
28 | time_in_expr
29 | field_in_expr
30
31 equals_expr: field "=" value
32 time_in_expr: time "in" date_range
33 field_in_expr: field "in" "[" orderable "," orderable "]"
34
35 field: FIELD
36 time: TIME
37
38 ?orderable: INT -> integer
39 | SIGNED_NUMBER -> number
40
41 ?value: INT -> integer
42 | SIGNED_NUMBER -> number
43 | ESCAPED_STRING -> string
44 | SIMPLE_STRING -> simple_string
45 | URL_STRING -> url_string
46
47
48 ?date_range: date -> single_date
49 | "[" date "," date "]" -> date_pair
50
51 date: YEAR ["-" MONTH ["-" DAY ]]
52
53 TIME: "time"
54 FIELD: /[a-zA-Z][\w\d_]*/
55 YEAR: DIGIT ~ 4
56 MONTH: DIGIT ~ 1..2
57 DAY: DIGIT ~ 1..2
58 SIMPLE_STRING: /[a-zA-Z][\w._-]*/
59 URL_STRING: /[a-z0-9+.-]+:\/\/([:\/\w._-])*/
60
61
62 %import common.ESCAPED_STRING
63 %import common.SIGNED_NUMBER
64 %import common.INT
65 %import common.DIGIT
66 %import common.CNAME
67 %import common.WS
68 %ignore WS
69 """
70
71
72 def identity(x):
73 return x
74
75
76 @v_args(inline=True)
77 class TreeToSearchExprs(Transformer):
78 # Convert the expressions
79 def equals_expr(self, field, value):
80 return {str(field): value}
81
82 def field_in_expr(self, field, lower, upper):
83 return {str(field): Range(lower, upper)}
84
85 def time_in_expr(self, time_field, date_range):
86 return {str(time_field): date_range}
87
88 # Convert the literals
89 def string(self, val):
90 return str(val[1:-1])
91
92 simple_string = url_string = field = time = str
93 number = float
94 integer = int
95 value = identity
96
97 def single_date(self, date):
98 return _time_to_search_dims(date)
99
100 def date_pair(self, start, end):
101 return _time_to_search_dims((start, end))
102
103 def date(self, y, m=None, d=None):
104 return "-".join(x for x in [y, m, d] if x is not None)
105
106 # Merge everything into a single dict
107 def start(self, *search_exprs):
108 combined = {}
109 for expr in search_exprs:
110 combined.update(expr)
111 return combined
112
113
114 def parse_expressions(*expression_text):
115 expr_parser = Lark(search_grammar)
116 tree = expr_parser.parse(' '.join(expression_text))
117 return TreeToSearchExprs().transform(tree)
118
119
120 def main():
121 expr_parser = Lark(search_grammar)
122
123 sample_inputs = """platform = "LANDSAT_8"
124 platform = "LAND SAT_8"
125 platform = 4
126 lat in [4, 6]
127 time in [2014, 2014]
128 time in [2014-03-01, 2014-04-01]
129 time in 2014-03-02
130 time in 2014-3-2
131 time in 2014-3
132 time in 2014
133 platform = LANDSAT_8
134 lat in [4, 6] time in 2014-03-02
135 platform=LS8 lat in [-14, -23.5] instrument="OTHER"
136 """.strip().split('\n')
137
138 for sample in sample_inputs:
139 transformer = TreeToSearchExprs()
140 tree = expr_parser.parse(sample)
141
142 print(sample)
143 print(tree)
144 print(transformer.transform(tree))
145 print()
146
147
148 if __name__ == '__main__':
149 main()
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datacube/ui/expression.py b/datacube/ui/expression.py
--- a/datacube/ui/expression.py
+++ b/datacube/ui/expression.py
@@ -43,7 +43,7 @@
| ESCAPED_STRING -> string
| SIMPLE_STRING -> simple_string
| URL_STRING -> url_string
-
+ | UUID -> simple_string
?date_range: date -> single_date
| "[" date "," date "]" -> date_pair
@@ -55,14 +55,16 @@
YEAR: DIGIT ~ 4
MONTH: DIGIT ~ 1..2
DAY: DIGIT ~ 1..2
- SIMPLE_STRING: /[a-zA-Z][\w._-]*/
+ SIMPLE_STRING: /[a-zA-Z][\w._-]*/ | /[0-9]+[\w_-][\w._-]*/
URL_STRING: /[a-z0-9+.-]+:\/\/([:\/\w._-])*/
+ UUID: HEXDIGIT~8 "-" HEXDIGIT~4 "-" HEXDIGIT~4 "-" HEXDIGIT~4 "-" HEXDIGIT~12
%import common.ESCAPED_STRING
%import common.SIGNED_NUMBER
%import common.INT
%import common.DIGIT
+ %import common.HEXDIGIT
%import common.CNAME
%import common.WS
%ignore WS
| {"golden_diff": "diff --git a/datacube/ui/expression.py b/datacube/ui/expression.py\n--- a/datacube/ui/expression.py\n+++ b/datacube/ui/expression.py\n@@ -43,7 +43,7 @@\n | ESCAPED_STRING -> string\n | SIMPLE_STRING -> simple_string\n | URL_STRING -> url_string\n-\n+ | UUID -> simple_string\n \n ?date_range: date -> single_date\n | \"[\" date \",\" date \"]\" -> date_pair\n@@ -55,14 +55,16 @@\n YEAR: DIGIT ~ 4\n MONTH: DIGIT ~ 1..2\n DAY: DIGIT ~ 1..2\n- SIMPLE_STRING: /[a-zA-Z][\\w._-]*/\n+ SIMPLE_STRING: /[a-zA-Z][\\w._-]*/ | /[0-9]+[\\w_-][\\w._-]*/\n URL_STRING: /[a-z0-9+.-]+:\\/\\/([:\\/\\w._-])*/\n+ UUID: HEXDIGIT~8 \"-\" HEXDIGIT~4 \"-\" HEXDIGIT~4 \"-\" HEXDIGIT~4 \"-\" HEXDIGIT~12\n \n \n %import common.ESCAPED_STRING\n %import common.SIGNED_NUMBER\n %import common.INT\n %import common.DIGIT\n+ %import common.HEXDIGIT\n %import common.CNAME\n %import common.WS\n %ignore WS\n", "issue": "Searching for UUIDs from the command line\n### Expected behaviour\r\nI would like to be able to search for UUIDs from the command line, e.g.:\r\n\r\n datacube dataset search id = 26931d17-7a4e-4b55-98e7-d6777fb61df6\r\n\r\n### Actual behaviour\r\nIf I try to run this command, `datacube` crashes with the following Exception:\r\n\r\n```\r\n File \"/g/data/v10/public/modules/dea/20200828/lib/python3.6/site-packages/datacube/ui/click.py\", line 356, in my_parse\r\n return parse_expressions(*list(value))\r\n File \"/g/data/v10/public/modules/dea/20200828/lib/python3.6/site-packages/datacube/ui/expression.py\", line 112, in parse_expressions\r\n tree = expr_parser.parse(' '.join(expression_text))\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/lark.py\", line 391, in parse\r\n return self.parser.parse(text, start=start)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parser_frontends.py\", line 199, in parse\r\n return self._parse(text, start)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parser_frontends.py\", line 53, in _parse\r\n return self.parser.parse(input, start, *args)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/earley.py\", line 293, in parse\r\n to_scan = self._parse(stream, columns, to_scan, start_symbol)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/xearley.py\", line 138, in _parse\r\n to_scan = scan(i, to_scan)\r\n File \"/g/data/v10/public/modules/dea-env/20200713/lib/python3.6/site-packages/lark/parsers/xearley.py\", line 115, in scan\r\n raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, set(to_scan))\r\nlark.exceptions.UnexpectedCharacters: No terminal defined for '-' at line 1 col 14\r\n\r\nid = 26931d17-7a4e-4b55-98e7-d6777fb61df6\r\n ^\r\n\r\nExpecting: {'EQUAL', 'IN'}\r\n```\r\n\r\n### Cause\r\nThis happens because the search expression parser is treating the UUID as a number instead of a string. It's possible to work around this by surrounding the UUID in quotes. e.g.:\r\n\r\n datacube dataset search id = \\\"26931d17-7a4e-4b55-98e7-d6777fb61df6\\\"\r\n\r\n\r\n### Intended changes\r\n\r\nIt should be possible to improve the parser to recognise UUIDs, to avoid having to manually quote them from the shell. The search expression 'language' was intended to be shell escaping friendly, so this seems like an obvious improvement. \r\n\r\n### Environment information\r\n\r\n* Which ``datacube --version`` are you using?\r\n`Open Data Cube core, version 1.8.3`\r\n\r\n\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"\nSearch expression parsing for command line applications.\n\nThree types of expressions are available:\n\n FIELD = VALUE\n FIELD in DATE-RANGE\n FIELD in [START, END]\n\nWhere DATE-RANGE is one of YYYY, YYYY-MM or YYYY-MM-DD\nand START, END are either numbers or dates.\n\"\"\"\n# flake8: noqa\n\nfrom lark import Lark, v_args, Transformer\n\nfrom datacube.api.query import _time_to_search_dims\nfrom datacube.model import Range\n\n\nsearch_grammar = r\"\"\"\n start: expression*\n ?expression: equals_expr\n | time_in_expr\n | field_in_expr\n\n equals_expr: field \"=\" value\n time_in_expr: time \"in\" date_range\n field_in_expr: field \"in\" \"[\" orderable \",\" orderable \"]\"\n\n field: FIELD\n time: TIME\n\n ?orderable: INT -> integer\n | SIGNED_NUMBER -> number\n\n ?value: INT -> integer\n | SIGNED_NUMBER -> number\n | ESCAPED_STRING -> string\n | SIMPLE_STRING -> simple_string\n | URL_STRING -> url_string\n\n\n ?date_range: date -> single_date\n | \"[\" date \",\" date \"]\" -> date_pair\n\n date: YEAR [\"-\" MONTH [\"-\" DAY ]]\n\n TIME: \"time\"\n FIELD: /[a-zA-Z][\\w\\d_]*/\n YEAR: DIGIT ~ 4\n MONTH: DIGIT ~ 1..2\n DAY: DIGIT ~ 1..2\n SIMPLE_STRING: /[a-zA-Z][\\w._-]*/\n URL_STRING: /[a-z0-9+.-]+:\\/\\/([:\\/\\w._-])*/\n\n\n %import common.ESCAPED_STRING\n %import common.SIGNED_NUMBER\n %import common.INT\n %import common.DIGIT\n %import common.CNAME\n %import common.WS\n %ignore WS\n\"\"\"\n\n\ndef identity(x):\n return x\n\n\n@v_args(inline=True)\nclass TreeToSearchExprs(Transformer):\n # Convert the expressions\n def equals_expr(self, field, value):\n return {str(field): value}\n\n def field_in_expr(self, field, lower, upper):\n return {str(field): Range(lower, upper)}\n\n def time_in_expr(self, time_field, date_range):\n return {str(time_field): date_range}\n\n # Convert the literals\n def string(self, val):\n return str(val[1:-1])\n\n simple_string = url_string = field = time = str\n number = float\n integer = int\n value = identity\n\n def single_date(self, date):\n return _time_to_search_dims(date)\n\n def date_pair(self, start, end):\n return _time_to_search_dims((start, end))\n\n def date(self, y, m=None, d=None):\n return \"-\".join(x for x in [y, m, d] if x is not None)\n\n # Merge everything into a single dict\n def start(self, *search_exprs):\n combined = {}\n for expr in search_exprs:\n combined.update(expr)\n return combined\n\n\ndef parse_expressions(*expression_text):\n expr_parser = Lark(search_grammar)\n tree = expr_parser.parse(' '.join(expression_text))\n return TreeToSearchExprs().transform(tree)\n\n\ndef main():\n expr_parser = Lark(search_grammar)\n\n sample_inputs = \"\"\"platform = \"LANDSAT_8\"\n platform = \"LAND SAT_8\"\n platform = 4\n lat in [4, 6]\n time in [2014, 2014]\n time in [2014-03-01, 2014-04-01]\n time in 2014-03-02\n time in 2014-3-2\n time in 2014-3\n time in 2014\n platform = LANDSAT_8\n lat in [4, 6] time in 2014-03-02\n platform=LS8 lat in [-14, -23.5] instrument=\"OTHER\"\n \"\"\".strip().split('\\n')\n\n for sample in sample_inputs:\n transformer = TreeToSearchExprs()\n tree = expr_parser.parse(sample)\n\n print(sample)\n print(tree)\n print(transformer.transform(tree))\n print()\n\n\nif __name__ == '__main__':\n main()\n", "path": "datacube/ui/expression.py"}], "after_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"\nSearch expression parsing for command line applications.\n\nThree types of expressions are available:\n\n FIELD = VALUE\n FIELD in DATE-RANGE\n FIELD in [START, END]\n\nWhere DATE-RANGE is one of YYYY, YYYY-MM or YYYY-MM-DD\nand START, END are either numbers or dates.\n\"\"\"\n# flake8: noqa\n\nfrom lark import Lark, v_args, Transformer\n\nfrom datacube.api.query import _time_to_search_dims\nfrom datacube.model import Range\n\n\nsearch_grammar = r\"\"\"\n start: expression*\n ?expression: equals_expr\n | time_in_expr\n | field_in_expr\n\n equals_expr: field \"=\" value\n time_in_expr: time \"in\" date_range\n field_in_expr: field \"in\" \"[\" orderable \",\" orderable \"]\"\n\n field: FIELD\n time: TIME\n\n ?orderable: INT -> integer\n | SIGNED_NUMBER -> number\n\n ?value: INT -> integer\n | SIGNED_NUMBER -> number\n | ESCAPED_STRING -> string\n | SIMPLE_STRING -> simple_string\n | URL_STRING -> url_string\n | UUID -> simple_string\n\n ?date_range: date -> single_date\n | \"[\" date \",\" date \"]\" -> date_pair\n\n date: YEAR [\"-\" MONTH [\"-\" DAY ]]\n\n TIME: \"time\"\n FIELD: /[a-zA-Z][\\w\\d_]*/\n YEAR: DIGIT ~ 4\n MONTH: DIGIT ~ 1..2\n DAY: DIGIT ~ 1..2\n SIMPLE_STRING: /[a-zA-Z][\\w._-]*/ | /[0-9]+[\\w_-][\\w._-]*/\n URL_STRING: /[a-z0-9+.-]+:\\/\\/([:\\/\\w._-])*/\n UUID: HEXDIGIT~8 \"-\" HEXDIGIT~4 \"-\" HEXDIGIT~4 \"-\" HEXDIGIT~4 \"-\" HEXDIGIT~12\n\n\n %import common.ESCAPED_STRING\n %import common.SIGNED_NUMBER\n %import common.INT\n %import common.DIGIT\n %import common.HEXDIGIT\n %import common.CNAME\n %import common.WS\n %ignore WS\n\"\"\"\n\n\ndef identity(x):\n return x\n\n\n@v_args(inline=True)\nclass TreeToSearchExprs(Transformer):\n # Convert the expressions\n def equals_expr(self, field, value):\n return {str(field): value}\n\n def field_in_expr(self, field, lower, upper):\n return {str(field): Range(lower, upper)}\n\n def time_in_expr(self, time_field, date_range):\n return {str(time_field): date_range}\n\n # Convert the literals\n def string(self, val):\n return str(val[1:-1])\n\n simple_string = url_string = field = time = str\n number = float\n integer = int\n value = identity\n\n def single_date(self, date):\n return _time_to_search_dims(date)\n\n def date_pair(self, start, end):\n return _time_to_search_dims((start, end))\n\n def date(self, y, m=None, d=None):\n return \"-\".join(x for x in [y, m, d] if x is not None)\n\n # Merge everything into a single dict\n def start(self, *search_exprs):\n combined = {}\n for expr in search_exprs:\n combined.update(expr)\n return combined\n\n\ndef parse_expressions(*expression_text):\n expr_parser = Lark(search_grammar)\n tree = expr_parser.parse(' '.join(expression_text))\n return TreeToSearchExprs().transform(tree)\n\n\ndef main():\n expr_parser = Lark(search_grammar)\n\n sample_inputs = \"\"\"platform = \"LANDSAT_8\"\n platform = \"LAND SAT_8\"\n platform = 4\n lat in [4, 6]\n time in [2014, 2014]\n time in [2014-03-01, 2014-04-01]\n time in 2014-03-02\n time in 2014-3-2\n time in 2014-3\n time in 2014\n platform = LANDSAT_8\n lat in [4, 6] time in 2014-03-02\n platform=LS8 lat in [-14, -23.5] instrument=\"OTHER\"\n \"\"\".strip().split('\\n')\n\n for sample in sample_inputs:\n transformer = TreeToSearchExprs()\n tree = expr_parser.parse(sample)\n\n print(sample)\n print(tree)\n print(transformer.transform(tree))\n print()\n\n\nif __name__ == '__main__':\n main()\n", "path": "datacube/ui/expression.py"}]} | 2,468 | 316 |
gh_patches_debug_3649 | rasdani/github-patches | git_diff | python-poetry__poetry-604 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Building sdist is slow with wide exclude pattern
<!--
Hi there! Thank you for wanting to make Poetry better.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
## Issue
I'm working on a project with the following `pyproject.toml`: https://gist.github.com/Lothiraldan/2c0b5ce0171e8450490e3b493e7c2960 and I want to ship a React project inside my package.
Include and exclude are working great, far easier to configure than `MANIFEST.IN` IMHO.
My issue is that the builder code is walking all of the directories and for each file check that it's not in the excluded list. One of my exclude pattern is `"balto/web_interfaces/balto_react/node_modules/**/*"` which generates a lot of matching files. The length of the excluded file list is `28761` in my case.
This makes the following line https://github.com/sdispater/poetry/blob/master/poetry/masonry/builders/sdist.py#L281 quite slow. A build takes about 4 minutes on my laptop.
Here is a `py-spy` dump of the process:
```
Collecting samples from 'pid: 31302' (python v3.6.6)
Total Samples 5100
GIL: 0.00%, Active: 95.50%, Threads: 1
%Own %Total OwnTime TotalTime Function (filename:line)
38.00% 51.00% 10.55s 14.14s __eq__ (4/python3.6/pathlib.py:736)
29.00% 93.00% 6.64s 24.87s <listcomp> (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:281)
16.50% 16.50% 4.38s 4.38s _cparts (4/python3.6/pathlib.py:728)
7.50% 11.00% 2.81s 3.65s __eq__ (4/python3.6/pathlib.py:734)
1.50% 1.50% 0.015s 0.015s run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/application.py:104)
1.00% 1.00% 0.130s 0.130s _cparts (4/python3.6/pathlib.py:724)
1.00% 2.00% 0.315s 0.435s __eq__ (4/python3.6/pathlib.py:733)
0.50% 0.50% 0.025s 0.035s parse_parts (4/python3.6/pathlib.py:87)
0.50% 0.50% 0.165s 0.180s wrapped (4/python3.6/pathlib.py:387)
0.00% 1.00% 0.000s 0.355s find_packages (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:277)
0.00% 0.00% 0.030s 0.030s _get_sep (4/python3.6/posixpath.py:45)
0.00% 94.00% 0.000s 25.36s execute (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/commands/command.py:107)
0.00% 0.00% 0.010s 0.075s <listcomp> (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:276)
0.00% 0.00% 0.025s 0.025s _select_from (4/python3.6/pathlib.py:529)
0.00% 94.00% 0.000s 25.36s do_run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/application.py:197)
0.00% 94.00% 0.000s 25.36s build (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builder.py:21)
0.00% 94.00% 0.000s 25.36s do_run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/console/application.py:88)
Press Control-C to quit, or ? for help.
```
I have some ideas about how to make it faster, I will send some patches if that's okay.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/masonry/builders/builder.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import os
3 import re
4 import shutil
5 import tempfile
6
7 from collections import defaultdict
8 from contextlib import contextmanager
9
10 from poetry.utils._compat import Path
11 from poetry.vcs import get_vcs
12
13 from ..metadata import Metadata
14 from ..utils.module import Module
15 from ..utils.package_include import PackageInclude
16
17
18 AUTHOR_REGEX = re.compile(r"(?u)^(?P<name>[- .,\w\d'’\"()]+) <(?P<email>.+?)>$")
19
20
21 class Builder(object):
22
23 AVAILABLE_PYTHONS = {"2", "2.7", "3", "3.4", "3.5", "3.6", "3.7"}
24
25 def __init__(self, poetry, env, io):
26 self._poetry = poetry
27 self._env = env
28 self._io = io
29 self._package = poetry.package
30 self._path = poetry.file.parent
31 self._module = Module(
32 self._package.name,
33 self._path.as_posix(),
34 packages=self._package.packages,
35 includes=self._package.include,
36 )
37 self._meta = Metadata.from_package(self._package)
38
39 def build(self):
40 raise NotImplementedError()
41
42 def find_excluded_files(self): # type: () -> list
43 # Checking VCS
44 vcs = get_vcs(self._path)
45 if not vcs:
46 vcs_ignored_files = []
47 else:
48 vcs_ignored_files = vcs.get_ignored_files()
49
50 explicitely_excluded = []
51 for excluded_glob in self._package.exclude:
52 for excluded in self._path.glob(excluded_glob):
53 explicitely_excluded.append(excluded)
54
55 ignored = vcs_ignored_files + explicitely_excluded
56 result = []
57 for file in ignored:
58 try:
59 file = Path(file).absolute().relative_to(self._path)
60 except ValueError:
61 # Should only happen in tests
62 continue
63
64 result.append(file)
65
66 return result
67
68 def find_files_to_add(self, exclude_build=True): # type: () -> list
69 """
70 Finds all files to add to the tarball
71 """
72 excluded = self.find_excluded_files()
73 to_add = []
74
75 for include in self._module.includes:
76 for file in include.elements:
77 if "__pycache__" in str(file):
78 continue
79
80 if file.is_dir():
81 continue
82
83 file = file.relative_to(self._path)
84
85 if file in excluded and isinstance(include, PackageInclude):
86 continue
87
88 if file.suffix == ".pyc":
89 continue
90
91 if file in to_add:
92 # Skip duplicates
93 continue
94
95 self._io.writeln(
96 " - Adding: <comment>{}</comment>".format(str(file)),
97 verbosity=self._io.VERBOSITY_VERY_VERBOSE,
98 )
99 to_add.append(file)
100
101 # Include project files
102 self._io.writeln(
103 " - Adding: <comment>pyproject.toml</comment>",
104 verbosity=self._io.VERBOSITY_VERY_VERBOSE,
105 )
106 to_add.append(Path("pyproject.toml"))
107
108 # If a license file exists, add it
109 for license_file in self._path.glob("LICENSE*"):
110 self._io.writeln(
111 " - Adding: <comment>{}</comment>".format(
112 license_file.relative_to(self._path)
113 ),
114 verbosity=self._io.VERBOSITY_VERY_VERBOSE,
115 )
116 to_add.append(license_file.relative_to(self._path))
117
118 # If a README is specificed we need to include it
119 # to avoid errors
120 if "readme" in self._poetry.local_config:
121 readme = self._path / self._poetry.local_config["readme"]
122 if readme.exists():
123 self._io.writeln(
124 " - Adding: <comment>{}</comment>".format(
125 readme.relative_to(self._path)
126 ),
127 verbosity=self._io.VERBOSITY_VERY_VERBOSE,
128 )
129 to_add.append(readme.relative_to(self._path))
130
131 # If a build script is specified and explicitely required
132 # we add it to the list of files
133 if self._package.build and not exclude_build:
134 to_add.append(Path(self._package.build))
135
136 return sorted(to_add)
137
138 def convert_entry_points(self): # type: () -> dict
139 result = defaultdict(list)
140
141 # Scripts -> Entry points
142 for name, ep in self._poetry.local_config.get("scripts", {}).items():
143 extras = ""
144 if isinstance(ep, dict):
145 extras = "[{}]".format(", ".join(ep["extras"]))
146 ep = ep["callable"]
147
148 result["console_scripts"].append("{} = {}{}".format(name, ep, extras))
149
150 # Plugins -> entry points
151 plugins = self._poetry.local_config.get("plugins", {})
152 for groupname, group in plugins.items():
153 for name, ep in sorted(group.items()):
154 result[groupname].append("{} = {}".format(name, ep))
155
156 for groupname in result:
157 result[groupname] = sorted(result[groupname])
158
159 return dict(result)
160
161 @classmethod
162 def convert_author(cls, author): # type: () -> dict
163 m = AUTHOR_REGEX.match(author)
164
165 name = m.group("name")
166 email = m.group("email")
167
168 return {"name": name, "email": email}
169
170 @classmethod
171 @contextmanager
172 def temporary_directory(cls, *args, **kwargs):
173 try:
174 from tempfile import TemporaryDirectory
175
176 with TemporaryDirectory(*args, **kwargs) as name:
177 yield name
178 except ImportError:
179 name = tempfile.mkdtemp(*args, **kwargs)
180
181 yield name
182
183 shutil.rmtree(name)
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/poetry/masonry/builders/builder.py b/poetry/masonry/builders/builder.py
--- a/poetry/masonry/builders/builder.py
+++ b/poetry/masonry/builders/builder.py
@@ -63,7 +63,10 @@
result.append(file)
- return result
+ # The list of excluded files might be big and we will do a lot
+ # containment check (x in excluded).
+ # Returning a set make those tests much much faster.
+ return set(result)
def find_files_to_add(self, exclude_build=True): # type: () -> list
"""
| {"golden_diff": "diff --git a/poetry/masonry/builders/builder.py b/poetry/masonry/builders/builder.py\n--- a/poetry/masonry/builders/builder.py\n+++ b/poetry/masonry/builders/builder.py\n@@ -63,7 +63,10 @@\n \n result.append(file)\n \n- return result\n+ # The list of excluded files might be big and we will do a lot\n+ # containment check (x in excluded).\n+ # Returning a set make those tests much much faster.\n+ return set(result)\n \n def find_files_to_add(self, exclude_build=True): # type: () -> list\n \"\"\"\n", "issue": "Building sdist is slow with wide exclude pattern\n<!--\r\n Hi there! Thank you for wanting to make Poetry better.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n\r\n## Issue\r\n\r\nI'm working on a project with the following `pyproject.toml`: https://gist.github.com/Lothiraldan/2c0b5ce0171e8450490e3b493e7c2960 and I want to ship a React project inside my package.\r\n\r\nInclude and exclude are working great, far easier to configure than `MANIFEST.IN` IMHO.\r\n\r\nMy issue is that the builder code is walking all of the directories and for each file check that it's not in the excluded list. One of my exclude pattern is `\"balto/web_interfaces/balto_react/node_modules/**/*\"` which generates a lot of matching files. The length of the excluded file list is `28761` in my case.\r\n\r\nThis makes the following line https://github.com/sdispater/poetry/blob/master/poetry/masonry/builders/sdist.py#L281 quite slow. A build takes about 4 minutes on my laptop.\r\n\r\nHere is a `py-spy` dump of the process:\r\n```\r\nCollecting samples from 'pid: 31302' (python v3.6.6)\r\nTotal Samples 5100\r\nGIL: 0.00%, Active: 95.50%, Threads: 1\r\n\r\n %Own %Total OwnTime TotalTime Function (filename:line) \r\n 38.00% 51.00% 10.55s 14.14s __eq__ (4/python3.6/pathlib.py:736)\r\n 29.00% 93.00% 6.64s 24.87s <listcomp> (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:281)\r\n 16.50% 16.50% 4.38s 4.38s _cparts (4/python3.6/pathlib.py:728)\r\n 7.50% 11.00% 2.81s 3.65s __eq__ (4/python3.6/pathlib.py:734)\r\n 1.50% 1.50% 0.015s 0.015s run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/application.py:104)\r\n 1.00% 1.00% 0.130s 0.130s _cparts (4/python3.6/pathlib.py:724)\r\n 1.00% 2.00% 0.315s 0.435s __eq__ (4/python3.6/pathlib.py:733)\r\n 0.50% 0.50% 0.025s 0.035s parse_parts (4/python3.6/pathlib.py:87)\r\n 0.50% 0.50% 0.165s 0.180s wrapped (4/python3.6/pathlib.py:387)\r\n 0.00% 1.00% 0.000s 0.355s find_packages (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:277)\r\n 0.00% 0.00% 0.030s 0.030s _get_sep (4/python3.6/posixpath.py:45)\r\n 0.00% 94.00% 0.000s 25.36s execute (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/commands/command.py:107)\r\n 0.00% 0.00% 0.010s 0.075s <listcomp> (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builders/sdist.py:276)\r\n 0.00% 0.00% 0.025s 0.025s _select_from (4/python3.6/pathlib.py:529)\r\n 0.00% 94.00% 0.000s 25.36s do_run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/cleo/application.py:197)\r\n 0.00% 94.00% 0.000s 25.36s build (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/masonry/builder.py:21)\r\n 0.00% 94.00% 0.000s 25.36s do_run (/home/lothiraldan/.local/pipx/venvs/poetry/lib64/python3.6/site-packages/poetry/console/application.py:88)\r\n\r\nPress Control-C to quit, or ? for help.\r\n```\r\n\r\nI have some ideas about how to make it faster, I will send some patches if that's okay.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport re\nimport shutil\nimport tempfile\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nfrom poetry.utils._compat import Path\nfrom poetry.vcs import get_vcs\n\nfrom ..metadata import Metadata\nfrom ..utils.module import Module\nfrom ..utils.package_include import PackageInclude\n\n\nAUTHOR_REGEX = re.compile(r\"(?u)^(?P<name>[- .,\\w\\d'\u2019\\\"()]+) <(?P<email>.+?)>$\")\n\n\nclass Builder(object):\n\n AVAILABLE_PYTHONS = {\"2\", \"2.7\", \"3\", \"3.4\", \"3.5\", \"3.6\", \"3.7\"}\n\n def __init__(self, poetry, env, io):\n self._poetry = poetry\n self._env = env\n self._io = io\n self._package = poetry.package\n self._path = poetry.file.parent\n self._module = Module(\n self._package.name,\n self._path.as_posix(),\n packages=self._package.packages,\n includes=self._package.include,\n )\n self._meta = Metadata.from_package(self._package)\n\n def build(self):\n raise NotImplementedError()\n\n def find_excluded_files(self): # type: () -> list\n # Checking VCS\n vcs = get_vcs(self._path)\n if not vcs:\n vcs_ignored_files = []\n else:\n vcs_ignored_files = vcs.get_ignored_files()\n\n explicitely_excluded = []\n for excluded_glob in self._package.exclude:\n for excluded in self._path.glob(excluded_glob):\n explicitely_excluded.append(excluded)\n\n ignored = vcs_ignored_files + explicitely_excluded\n result = []\n for file in ignored:\n try:\n file = Path(file).absolute().relative_to(self._path)\n except ValueError:\n # Should only happen in tests\n continue\n\n result.append(file)\n\n return result\n\n def find_files_to_add(self, exclude_build=True): # type: () -> list\n \"\"\"\n Finds all files to add to the tarball\n \"\"\"\n excluded = self.find_excluded_files()\n to_add = []\n\n for include in self._module.includes:\n for file in include.elements:\n if \"__pycache__\" in str(file):\n continue\n\n if file.is_dir():\n continue\n\n file = file.relative_to(self._path)\n\n if file in excluded and isinstance(include, PackageInclude):\n continue\n\n if file.suffix == \".pyc\":\n continue\n\n if file in to_add:\n # Skip duplicates\n continue\n\n self._io.writeln(\n \" - Adding: <comment>{}</comment>\".format(str(file)),\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(file)\n\n # Include project files\n self._io.writeln(\n \" - Adding: <comment>pyproject.toml</comment>\",\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(Path(\"pyproject.toml\"))\n\n # If a license file exists, add it\n for license_file in self._path.glob(\"LICENSE*\"):\n self._io.writeln(\n \" - Adding: <comment>{}</comment>\".format(\n license_file.relative_to(self._path)\n ),\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(license_file.relative_to(self._path))\n\n # If a README is specificed we need to include it\n # to avoid errors\n if \"readme\" in self._poetry.local_config:\n readme = self._path / self._poetry.local_config[\"readme\"]\n if readme.exists():\n self._io.writeln(\n \" - Adding: <comment>{}</comment>\".format(\n readme.relative_to(self._path)\n ),\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(readme.relative_to(self._path))\n\n # If a build script is specified and explicitely required\n # we add it to the list of files\n if self._package.build and not exclude_build:\n to_add.append(Path(self._package.build))\n\n return sorted(to_add)\n\n def convert_entry_points(self): # type: () -> dict\n result = defaultdict(list)\n\n # Scripts -> Entry points\n for name, ep in self._poetry.local_config.get(\"scripts\", {}).items():\n extras = \"\"\n if isinstance(ep, dict):\n extras = \"[{}]\".format(\", \".join(ep[\"extras\"]))\n ep = ep[\"callable\"]\n\n result[\"console_scripts\"].append(\"{} = {}{}\".format(name, ep, extras))\n\n # Plugins -> entry points\n plugins = self._poetry.local_config.get(\"plugins\", {})\n for groupname, group in plugins.items():\n for name, ep in sorted(group.items()):\n result[groupname].append(\"{} = {}\".format(name, ep))\n\n for groupname in result:\n result[groupname] = sorted(result[groupname])\n\n return dict(result)\n\n @classmethod\n def convert_author(cls, author): # type: () -> dict\n m = AUTHOR_REGEX.match(author)\n\n name = m.group(\"name\")\n email = m.group(\"email\")\n\n return {\"name\": name, \"email\": email}\n\n @classmethod\n @contextmanager\n def temporary_directory(cls, *args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n", "path": "poetry/masonry/builders/builder.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport re\nimport shutil\nimport tempfile\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nfrom poetry.utils._compat import Path\nfrom poetry.vcs import get_vcs\n\nfrom ..metadata import Metadata\nfrom ..utils.module import Module\nfrom ..utils.package_include import PackageInclude\n\n\nAUTHOR_REGEX = re.compile(r\"(?u)^(?P<name>[- .,\\w\\d'\u2019\\\"()]+) <(?P<email>.+?)>$\")\n\n\nclass Builder(object):\n\n AVAILABLE_PYTHONS = {\"2\", \"2.7\", \"3\", \"3.4\", \"3.5\", \"3.6\", \"3.7\"}\n\n def __init__(self, poetry, env, io):\n self._poetry = poetry\n self._env = env\n self._io = io\n self._package = poetry.package\n self._path = poetry.file.parent\n self._module = Module(\n self._package.name,\n self._path.as_posix(),\n packages=self._package.packages,\n includes=self._package.include,\n )\n self._meta = Metadata.from_package(self._package)\n\n def build(self):\n raise NotImplementedError()\n\n def find_excluded_files(self): # type: () -> list\n # Checking VCS\n vcs = get_vcs(self._path)\n if not vcs:\n vcs_ignored_files = []\n else:\n vcs_ignored_files = vcs.get_ignored_files()\n\n explicitely_excluded = []\n for excluded_glob in self._package.exclude:\n for excluded in self._path.glob(excluded_glob):\n explicitely_excluded.append(excluded)\n\n ignored = vcs_ignored_files + explicitely_excluded\n result = []\n for file in ignored:\n try:\n file = Path(file).absolute().relative_to(self._path)\n except ValueError:\n # Should only happen in tests\n continue\n\n result.append(file)\n\n # The list of excluded files might be big and we will do a lot\n # containment check (x in excluded).\n # Returning a set make those tests much much faster.\n return set(result)\n\n def find_files_to_add(self, exclude_build=True): # type: () -> list\n \"\"\"\n Finds all files to add to the tarball\n \"\"\"\n excluded = self.find_excluded_files()\n to_add = []\n\n for include in self._module.includes:\n for file in include.elements:\n if \"__pycache__\" in str(file):\n continue\n\n if file.is_dir():\n continue\n\n file = file.relative_to(self._path)\n\n if file in excluded and isinstance(include, PackageInclude):\n continue\n\n if file.suffix == \".pyc\":\n continue\n\n if file in to_add:\n # Skip duplicates\n continue\n\n self._io.writeln(\n \" - Adding: <comment>{}</comment>\".format(str(file)),\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(file)\n\n # Include project files\n self._io.writeln(\n \" - Adding: <comment>pyproject.toml</comment>\",\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(Path(\"pyproject.toml\"))\n\n # If a license file exists, add it\n for license_file in self._path.glob(\"LICENSE*\"):\n self._io.writeln(\n \" - Adding: <comment>{}</comment>\".format(\n license_file.relative_to(self._path)\n ),\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(license_file.relative_to(self._path))\n\n # If a README is specificed we need to include it\n # to avoid errors\n if \"readme\" in self._poetry.local_config:\n readme = self._path / self._poetry.local_config[\"readme\"]\n if readme.exists():\n self._io.writeln(\n \" - Adding: <comment>{}</comment>\".format(\n readme.relative_to(self._path)\n ),\n verbosity=self._io.VERBOSITY_VERY_VERBOSE,\n )\n to_add.append(readme.relative_to(self._path))\n\n # If a build script is specified and explicitely required\n # we add it to the list of files\n if self._package.build and not exclude_build:\n to_add.append(Path(self._package.build))\n\n return sorted(to_add)\n\n def convert_entry_points(self): # type: () -> dict\n result = defaultdict(list)\n\n # Scripts -> Entry points\n for name, ep in self._poetry.local_config.get(\"scripts\", {}).items():\n extras = \"\"\n if isinstance(ep, dict):\n extras = \"[{}]\".format(\", \".join(ep[\"extras\"]))\n ep = ep[\"callable\"]\n\n result[\"console_scripts\"].append(\"{} = {}{}\".format(name, ep, extras))\n\n # Plugins -> entry points\n plugins = self._poetry.local_config.get(\"plugins\", {})\n for groupname, group in plugins.items():\n for name, ep in sorted(group.items()):\n result[groupname].append(\"{} = {}\".format(name, ep))\n\n for groupname in result:\n result[groupname] = sorted(result[groupname])\n\n return dict(result)\n\n @classmethod\n def convert_author(cls, author): # type: () -> dict\n m = AUTHOR_REGEX.match(author)\n\n name = m.group(\"name\")\n email = m.group(\"email\")\n\n return {\"name\": name, \"email\": email}\n\n @classmethod\n @contextmanager\n def temporary_directory(cls, *args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n", "path": "poetry/masonry/builders/builder.py"}]} | 3,425 | 147 |
gh_patches_debug_32578 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1566 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tornado <6.0 still tries to instrument (and fails with cryptic warning)
Flower version is flower==0.9.3
elastic-apm==6.7.2
Flower service is not working for elastic-apm==6.7.2.
Can you please suggest which version is compatible for flower==0.9.3.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/tornado.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 """
31 Instrumentation for Tornado
32 """
33 import elasticapm
34 from elasticapm.conf import constants
35 from elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule
36 from elasticapm.traces import capture_span
37 from elasticapm.utils.disttracing import TraceParent
38
39
40 class TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):
41 name = "tornado_request_execute"
42 creates_transactions = True
43 instrument_list = [("tornado.web", "RequestHandler._execute")]
44
45 async def call(self, module, method, wrapped, instance, args, kwargs):
46 if not hasattr(instance.application, "elasticapm_client"):
47 # If tornado was instrumented but not as the main framework
48 # (i.e. in Flower), we should skip it.
49 return await wrapped(*args, **kwargs)
50
51 # Late import to avoid ImportErrors
52 from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response
53
54 request = instance.request
55 client = instance.application.elasticapm_client
56 should_ignore = client.should_ignore_url(request.path)
57 if not should_ignore:
58 trace_parent = TraceParent.from_headers(request.headers)
59 client.begin_transaction("request", trace_parent=trace_parent)
60 elasticapm.set_context(
61 lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), "request"
62 )
63 # TODO: Can we somehow incorporate the routing rule itself here?
64 elasticapm.set_transaction_name("{} {}".format(request.method, type(instance).__name__), override=False)
65
66 ret = await wrapped(*args, **kwargs)
67
68 if not should_ignore:
69 elasticapm.set_context(
70 lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), "response"
71 )
72 status = instance.get_status()
73 result = "HTTP {}xx".format(status // 100)
74 elasticapm.set_transaction_result(result, override=False)
75 elasticapm.set_transaction_outcome(http_status_code=status)
76 client.end_transaction()
77
78 return ret
79
80
81 class TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):
82 name = "tornado_handle_request_exception"
83
84 instrument_list = [("tornado.web", "RequestHandler._handle_request_exception")]
85
86 def call(self, module, method, wrapped, instance, args, kwargs):
87 if not hasattr(instance.application, "elasticapm_client"):
88 # If tornado was instrumented but not as the main framework
89 # (i.e. in Flower), we should skip it.
90 return wrapped(*args, **kwargs)
91
92 # Late import to avoid ImportErrors
93 from tornado.web import Finish, HTTPError
94
95 from elasticapm.contrib.tornado.utils import get_data_from_request
96
97 e = args[0]
98 if isinstance(e, Finish):
99 # Not an error; Finish is an exception that ends a request without an error response
100 return wrapped(*args, **kwargs)
101
102 client = instance.application.elasticapm_client
103 request = instance.request
104 client.capture_exception(
105 context={"request": get_data_from_request(instance, request, client.config, constants.ERROR)}
106 )
107 elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE)
108 if isinstance(e, HTTPError):
109 elasticapm.set_transaction_result("HTTP {}xx".format(int(e.status_code / 100)), override=False)
110 elasticapm.set_context({"status_code": e.status_code}, "response")
111 else:
112 elasticapm.set_transaction_result("HTTP 5xx", override=False)
113 elasticapm.set_context({"status_code": 500}, "response")
114
115 return wrapped(*args, **kwargs)
116
117
118 class TornadoRenderInstrumentation(AbstractInstrumentedModule):
119 name = "tornado_render"
120
121 instrument_list = [("tornado.web", "RequestHandler.render")]
122
123 def call(self, module, method, wrapped, instance, args, kwargs):
124 if "template_name" in kwargs:
125 name = kwargs["template_name"]
126 else:
127 name = args[0]
128
129 with capture_span(name, span_type="template", span_subtype="tornado", span_action="render"):
130 return wrapped(*args, **kwargs)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/instrumentation/packages/tornado.py b/elasticapm/instrumentation/packages/tornado.py
--- a/elasticapm/instrumentation/packages/tornado.py
+++ b/elasticapm/instrumentation/packages/tornado.py
@@ -35,9 +35,25 @@
from elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule
from elasticapm.traces import capture_span
from elasticapm.utils.disttracing import TraceParent
+from elasticapm.utils.logging import get_logger
+logger = get_logger("elasticapm.instrument")
-class TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):
+
+class TornadoBaseInstrumentedModule(AbstractInstrumentedModule):
+ def instrument(self):
+ try:
+ import tornado
+
+ if tornado.version_info[0] < 6:
+ logger.debug("Skipping instrumentation of %s. Tornado is only supported with version 6.0+", self.name)
+ return
+ except ImportError:
+ pass
+ super().instrument()
+
+
+class TornadoRequestExecuteInstrumentation(TornadoBaseInstrumentedModule, AsyncAbstractInstrumentedModule):
name = "tornado_request_execute"
creates_transactions = True
instrument_list = [("tornado.web", "RequestHandler._execute")]
@@ -78,7 +94,7 @@
return ret
-class TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):
+class TornadoHandleRequestExceptionInstrumentation(TornadoBaseInstrumentedModule):
name = "tornado_handle_request_exception"
instrument_list = [("tornado.web", "RequestHandler._handle_request_exception")]
@@ -115,7 +131,7 @@
return wrapped(*args, **kwargs)
-class TornadoRenderInstrumentation(AbstractInstrumentedModule):
+class TornadoRenderInstrumentation(TornadoBaseInstrumentedModule):
name = "tornado_render"
instrument_list = [("tornado.web", "RequestHandler.render")]
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/tornado.py b/elasticapm/instrumentation/packages/tornado.py\n--- a/elasticapm/instrumentation/packages/tornado.py\n+++ b/elasticapm/instrumentation/packages/tornado.py\n@@ -35,9 +35,25 @@\n from elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule\n from elasticapm.traces import capture_span\n from elasticapm.utils.disttracing import TraceParent\n+from elasticapm.utils.logging import get_logger\n \n+logger = get_logger(\"elasticapm.instrument\")\n \n-class TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):\n+\n+class TornadoBaseInstrumentedModule(AbstractInstrumentedModule):\n+ def instrument(self):\n+ try:\n+ import tornado\n+\n+ if tornado.version_info[0] < 6:\n+ logger.debug(\"Skipping instrumentation of %s. Tornado is only supported with version 6.0+\", self.name)\n+ return\n+ except ImportError:\n+ pass\n+ super().instrument()\n+\n+\n+class TornadoRequestExecuteInstrumentation(TornadoBaseInstrumentedModule, AsyncAbstractInstrumentedModule):\n name = \"tornado_request_execute\"\n creates_transactions = True\n instrument_list = [(\"tornado.web\", \"RequestHandler._execute\")]\n@@ -78,7 +94,7 @@\n return ret\n \n \n-class TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):\n+class TornadoHandleRequestExceptionInstrumentation(TornadoBaseInstrumentedModule):\n name = \"tornado_handle_request_exception\"\n \n instrument_list = [(\"tornado.web\", \"RequestHandler._handle_request_exception\")]\n@@ -115,7 +131,7 @@\n return wrapped(*args, **kwargs)\n \n \n-class TornadoRenderInstrumentation(AbstractInstrumentedModule):\n+class TornadoRenderInstrumentation(TornadoBaseInstrumentedModule):\n name = \"tornado_render\"\n \n instrument_list = [(\"tornado.web\", \"RequestHandler.render\")]\n", "issue": "Tornado <6.0 still tries to instrument (and fails with cryptic warning)\nFlower version is flower==0.9.3\r\nelastic-apm==6.7.2\r\n\r\nFlower service is not working for elastic-apm==6.7.2. \r\nCan you please suggest which version is compatible for flower==0.9.3.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nInstrumentation for Tornado\n\"\"\"\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.disttracing import TraceParent\n\n\nclass TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"tornado_request_execute\"\n creates_transactions = True\n instrument_list = [(\"tornado.web\", \"RequestHandler._execute\")]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n if not hasattr(instance.application, \"elasticapm_client\"):\n # If tornado was instrumented but not as the main framework\n # (i.e. in Flower), we should skip it.\n return await wrapped(*args, **kwargs)\n\n # Late import to avoid ImportErrors\n from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response\n\n request = instance.request\n client = instance.application.elasticapm_client\n should_ignore = client.should_ignore_url(request.path)\n if not should_ignore:\n trace_parent = TraceParent.from_headers(request.headers)\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n elasticapm.set_context(\n lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), \"request\"\n )\n # TODO: Can we somehow incorporate the routing rule itself here?\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, type(instance).__name__), override=False)\n\n ret = await wrapped(*args, **kwargs)\n\n if not should_ignore:\n elasticapm.set_context(\n lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), \"response\"\n )\n status = instance.get_status()\n result = \"HTTP {}xx\".format(status // 100)\n elasticapm.set_transaction_result(result, override=False)\n elasticapm.set_transaction_outcome(http_status_code=status)\n client.end_transaction()\n\n return ret\n\n\nclass TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):\n name = \"tornado_handle_request_exception\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler._handle_request_exception\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if not hasattr(instance.application, \"elasticapm_client\"):\n # If tornado was instrumented but not as the main framework\n # (i.e. in Flower), we should skip it.\n return wrapped(*args, **kwargs)\n\n # Late import to avoid ImportErrors\n from tornado.web import Finish, HTTPError\n\n from elasticapm.contrib.tornado.utils import get_data_from_request\n\n e = args[0]\n if isinstance(e, Finish):\n # Not an error; Finish is an exception that ends a request without an error response\n return wrapped(*args, **kwargs)\n\n client = instance.application.elasticapm_client\n request = instance.request\n client.capture_exception(\n context={\"request\": get_data_from_request(instance, request, client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE)\n if isinstance(e, HTTPError):\n elasticapm.set_transaction_result(\"HTTP {}xx\".format(int(e.status_code / 100)), override=False)\n elasticapm.set_context({\"status_code\": e.status_code}, \"response\")\n else:\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n return wrapped(*args, **kwargs)\n\n\nclass TornadoRenderInstrumentation(AbstractInstrumentedModule):\n name = \"tornado_render\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler.render\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"template_name\" in kwargs:\n name = kwargs[\"template_name\"]\n else:\n name = args[0]\n\n with capture_span(name, span_type=\"template\", span_subtype=\"tornado\", span_action=\"render\"):\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/tornado.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nInstrumentation for Tornado\n\"\"\"\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.instrument\")\n\n\nclass TornadoBaseInstrumentedModule(AbstractInstrumentedModule):\n def instrument(self):\n try:\n import tornado\n\n if tornado.version_info[0] < 6:\n logger.debug(\"Skipping instrumentation of %s. Tornado is only supported with version 6.0+\", self.name)\n return\n except ImportError:\n pass\n super().instrument()\n\n\nclass TornadoRequestExecuteInstrumentation(TornadoBaseInstrumentedModule, AsyncAbstractInstrumentedModule):\n name = \"tornado_request_execute\"\n creates_transactions = True\n instrument_list = [(\"tornado.web\", \"RequestHandler._execute\")]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n if not hasattr(instance.application, \"elasticapm_client\"):\n # If tornado was instrumented but not as the main framework\n # (i.e. in Flower), we should skip it.\n return await wrapped(*args, **kwargs)\n\n # Late import to avoid ImportErrors\n from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response\n\n request = instance.request\n client = instance.application.elasticapm_client\n should_ignore = client.should_ignore_url(request.path)\n if not should_ignore:\n trace_parent = TraceParent.from_headers(request.headers)\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n elasticapm.set_context(\n lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), \"request\"\n )\n # TODO: Can we somehow incorporate the routing rule itself here?\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, type(instance).__name__), override=False)\n\n ret = await wrapped(*args, **kwargs)\n\n if not should_ignore:\n elasticapm.set_context(\n lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), \"response\"\n )\n status = instance.get_status()\n result = \"HTTP {}xx\".format(status // 100)\n elasticapm.set_transaction_result(result, override=False)\n elasticapm.set_transaction_outcome(http_status_code=status)\n client.end_transaction()\n\n return ret\n\n\nclass TornadoHandleRequestExceptionInstrumentation(TornadoBaseInstrumentedModule):\n name = \"tornado_handle_request_exception\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler._handle_request_exception\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if not hasattr(instance.application, \"elasticapm_client\"):\n # If tornado was instrumented but not as the main framework\n # (i.e. in Flower), we should skip it.\n return wrapped(*args, **kwargs)\n\n # Late import to avoid ImportErrors\n from tornado.web import Finish, HTTPError\n\n from elasticapm.contrib.tornado.utils import get_data_from_request\n\n e = args[0]\n if isinstance(e, Finish):\n # Not an error; Finish is an exception that ends a request without an error response\n return wrapped(*args, **kwargs)\n\n client = instance.application.elasticapm_client\n request = instance.request\n client.capture_exception(\n context={\"request\": get_data_from_request(instance, request, client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE)\n if isinstance(e, HTTPError):\n elasticapm.set_transaction_result(\"HTTP {}xx\".format(int(e.status_code / 100)), override=False)\n elasticapm.set_context({\"status_code\": e.status_code}, \"response\")\n else:\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n return wrapped(*args, **kwargs)\n\n\nclass TornadoRenderInstrumentation(TornadoBaseInstrumentedModule):\n name = \"tornado_render\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler.render\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"template_name\" in kwargs:\n name = kwargs[\"template_name\"]\n else:\n name = args[0]\n\n with capture_span(name, span_type=\"template\", span_subtype=\"tornado\", span_action=\"render\"):\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/tornado.py"}]} | 1,863 | 438 |
gh_patches_debug_56182 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
prompt.read_user_dict() is broken due to click upgrade from 7.1.2 to 8.0.0
* Cookiecutter version: 1.7.3
* Template project url: -
* Python version: 3.9.5
* Operating System: macOS Catalina 10.15.7
### Description:
Apparently, there is a breaking change in `click==8.0.0` affecting dictionary values in cookiecutter.json
cookiecutter.json example:
```json
{
"project_name": "",
"project_policy": {"project_policy_example": "yes"}
}
```
```
% python -m cookiecutter ../Projects/project-configs
devplatform_project_name [infra-dev]:
project_name []: t
project_policy [default]:
Error: Unable to decode to JSON.
```
Looking closer at the cookiecutter.promt, I can see that in `read_user_dict()`, click passes `user_value='default'` to `process_json()`, instead of passing an actual default value from the cookiecutter.json as it was in `click 7.1.2`.
Link to the `process_json()` code: https://github.com/cookiecutter/cookiecutter/blob/master/cookiecutter/prompt.py#L81

As far as I can suppose, that issue could have been introduced by this PR https://github.com/pallets/click/pull/1517/
### Quick local fix
Install click first and specify version older than 8.0.0
```
pip install click==7.1.2
pip install cookiecutter
```
### Quick fix for cookiecutter library
in `setup.py` replace 'click>=7.0' with `'click>=7,<8.0.0'`
### What I've run:
```shell
% python3.9 -m venv test39
% source test39/bin/activate
% python -V
Python 3.9.5
% python -m pip install click==7.1.2
Collecting click==7.1.2
Using cached click-7.1.2-py2.py3-none-any.whl (82 kB)
Installing collected packages: click
Successfully installed click-7.1.2
(test39) ro.solyanik@macbook-ro Environments % python -m pip install cookiecutter
Collecting cookiecutter
Using cached cookiecutter-1.7.3-py2.py3-none-any.whl (34 kB)
Collecting six>=1.10
................................................
Installing collected packages: six, python-dateutil, MarkupSafe, urllib3, text-unidecode, Jinja2, idna, chardet, certifi, arrow, requests, python-slugify, poyo, jinja2-time, binaryornot, cookiecutter
Successfully installed Jinja2-3.0.1 MarkupSafe-2.0.1 arrow-1.1.0 binaryornot-0.4.4 certifi-2020.12.5 chardet-4.0.0 cookiecutter-1.7.3 idna-2.10 jinja2-time-0.2.0 poyo-0.5.0 python-dateutil-2.8.1 python-slugify-5.0.2 requests-2.25.1 six-1.16.0 text-unidecode-1.3 urllib3-1.26.4
% python -m cookiecutter ../Projects/project-configs
project_name []: t
project_policy [default]:
% ls t
Makefile README.md t tests
% rm -rf t
% python -m pip install click==8.0.0
Collecting click==8.0.0
Using cached click-8.0.0-py3-none-any.whl (96 kB)
Installing collected packages: click
Attempting uninstall: click
Found existing installation: click 7.1.2
Uninstalling click-7.1.2:
Successfully uninstalled click-7.1.2
Successfully installed click-8.0.0
% python -m cookiecutter ../Projects/project-configs
devplatform_project_name [infra-dev]:
project_name []: t
project_policy [default]:
Error: Unable to decode to JSON.
project_policy [default]:
Error: Unable to decode to JSON.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """cookiecutter distutils configuration."""
3 from setuptools import setup
4
5 version = "2.0.0"
6
7 with open('README.md', encoding='utf-8') as readme_file:
8 readme = readme_file.read()
9
10 requirements = [
11 'binaryornot>=0.4.4',
12 'Jinja2>=2.7,<4.0.0',
13 'click>=7.0',
14 'pyyaml>=5.3.1',
15 'jinja2-time>=0.2.0',
16 'python-slugify>=4.0.0',
17 'requests>=2.23.0',
18 ]
19
20 setup(
21 name='cookiecutter',
22 version=version,
23 description=(
24 'A command-line utility that creates projects from project '
25 'templates, e.g. creating a Python package project from a '
26 'Python package project template.'
27 ),
28 long_description=readme,
29 long_description_content_type='text/markdown',
30 author='Audrey Feldroy',
31 author_email='[email protected]',
32 url='https://github.com/cookiecutter/cookiecutter',
33 packages=['cookiecutter'],
34 package_dir={'cookiecutter': 'cookiecutter'},
35 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
36 include_package_data=True,
37 python_requires='>=3.6',
38 install_requires=requirements,
39 license='BSD',
40 zip_safe=False,
41 classifiers=[
42 "Development Status :: 5 - Production/Stable",
43 "Environment :: Console",
44 "Intended Audience :: Developers",
45 "Natural Language :: English",
46 "License :: OSI Approved :: BSD License",
47 "Programming Language :: Python :: 3 :: Only",
48 "Programming Language :: Python :: 3",
49 "Programming Language :: Python :: 3.6",
50 "Programming Language :: Python :: 3.7",
51 "Programming Language :: Python :: 3.8",
52 "Programming Language :: Python :: 3.9",
53 "Programming Language :: Python :: Implementation :: CPython",
54 "Programming Language :: Python :: Implementation :: PyPy",
55 "Programming Language :: Python",
56 "Topic :: Software Development",
57 ],
58 keywords=[
59 "cookiecutter",
60 "Python",
61 "projects",
62 "project templates",
63 "Jinja2",
64 "skeleton",
65 "scaffolding",
66 "project directory",
67 "package",
68 "packaging",
69 ],
70 )
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
requirements = [
'binaryornot>=0.4.4',
'Jinja2>=2.7,<4.0.0',
- 'click>=7.0',
+ 'click>=7.0,<8.0.0',
'pyyaml>=5.3.1',
'jinja2-time>=0.2.0',
'python-slugify>=4.0.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n requirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n- 'click>=7.0',\n+ 'click>=7.0,<8.0.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n", "issue": "prompt.read_user_dict() is broken due to click upgrade from 7.1.2 to 8.0.0\n* Cookiecutter version: 1.7.3\r\n* Template project url: -\r\n* Python version: 3.9.5\r\n* Operating System: macOS Catalina 10.15.7\r\n\r\n### Description:\r\n\r\nApparently, there is a breaking change in `click==8.0.0` affecting dictionary values in cookiecutter.json\r\ncookiecutter.json example:\r\n```json\r\n{\r\n \"project_name\": \"\",\r\n \"project_policy\": {\"project_policy_example\": \"yes\"}\r\n}\r\n```\r\n \r\n```\r\n% python -m cookiecutter ../Projects/project-configs\r\ndevplatform_project_name [infra-dev]: \r\nproject_name []: t\r\nproject_policy [default]: \r\nError: Unable to decode to JSON.\r\n```\r\n\r\nLooking closer at the cookiecutter.promt, I can see that in `read_user_dict()`, click passes `user_value='default'` to `process_json()`, instead of passing an actual default value from the cookiecutter.json as it was in `click 7.1.2`. \r\nLink to the `process_json()` code: https://github.com/cookiecutter/cookiecutter/blob/master/cookiecutter/prompt.py#L81\r\n\r\n\r\nAs far as I can suppose, that issue could have been introduced by this PR https://github.com/pallets/click/pull/1517/\r\n\r\n### Quick local fix\r\nInstall click first and specify version older than 8.0.0\r\n```\r\npip install click==7.1.2\r\npip install cookiecutter\r\n```\r\n\r\n### Quick fix for cookiecutter library\r\nin `setup.py` replace 'click>=7.0' with `'click>=7,<8.0.0'`\r\n\r\n### What I've run:\r\n\r\n```shell\r\n% python3.9 -m venv test39 \r\n \r\n% source test39/bin/activate\r\n\r\n% python -V\r\nPython 3.9.5\r\n\r\n\r\n% python -m pip install click==7.1.2\r\nCollecting click==7.1.2\r\n Using cached click-7.1.2-py2.py3-none-any.whl (82 kB)\r\nInstalling collected packages: click\r\nSuccessfully installed click-7.1.2\r\n(test39) ro.solyanik@macbook-ro Environments % python -m pip install cookiecutter\r\nCollecting cookiecutter\r\n Using cached cookiecutter-1.7.3-py2.py3-none-any.whl (34 kB)\r\nCollecting six>=1.10\r\n................................................\r\nInstalling collected packages: six, python-dateutil, MarkupSafe, urllib3, text-unidecode, Jinja2, idna, chardet, certifi, arrow, requests, python-slugify, poyo, jinja2-time, binaryornot, cookiecutter\r\nSuccessfully installed Jinja2-3.0.1 MarkupSafe-2.0.1 arrow-1.1.0 binaryornot-0.4.4 certifi-2020.12.5 chardet-4.0.0 cookiecutter-1.7.3 idna-2.10 jinja2-time-0.2.0 poyo-0.5.0 python-dateutil-2.8.1 python-slugify-5.0.2 requests-2.25.1 six-1.16.0 text-unidecode-1.3 urllib3-1.26.4\r\n\r\n% python -m cookiecutter ../Projects/project-configs\r\nproject_name []: t\r\nproject_policy [default]: \r\n\r\n% ls t \r\nMakefile README.md t tests\r\n\r\n% rm -rf t\r\n\r\n% python -m pip install click==8.0.0 \r\nCollecting click==8.0.0\r\n Using cached click-8.0.0-py3-none-any.whl (96 kB)\r\nInstalling collected packages: click\r\n Attempting uninstall: click\r\n Found existing installation: click 7.1.2\r\n Uninstalling click-7.1.2:\r\n Successfully uninstalled click-7.1.2\r\nSuccessfully installed click-8.0.0\r\n\r\n% python -m cookiecutter ../Projects/project-configs\r\ndevplatform_project_name [infra-dev]: \r\nproject_name []: t\r\nproject_policy [default]: \r\nError: Unable to decode to JSON.\r\nproject_policy [default]: \r\nError: Unable to decode to JSON.\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.0.0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.6',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"cookiecutter distutils configuration.\"\"\"\nfrom setuptools import setup\n\nversion = \"2.0.0\"\n\nwith open('README.md', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<8.0.0',\n 'pyyaml>=5.3.1',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n]\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Feldroy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=3.6',\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n keywords=[\n \"cookiecutter\",\n \"Python\",\n \"projects\",\n \"project templates\",\n \"Jinja2\",\n \"skeleton\",\n \"scaffolding\",\n \"project directory\",\n \"package\",\n \"packaging\",\n ],\n)\n", "path": "setup.py"}]} | 1,960 | 124 |
gh_patches_debug_3807 | rasdani/github-patches | git_diff | quantumlib__Cirq-3574 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs build is failing
Since the black formatter merge the RTD builds are failing with some weird pip error:
https://readthedocs.org/projects/cirq/builds/
Need to look into it and resolve it if the error is on our end or report it to the RTD team if it's on their end.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17 from setuptools import find_packages, setup
18
19 # This reads the __version__ variable from cirq/_version.py
20 __version__ = ''
21 exec(open('cirq/_version.py').read())
22
23 name = 'cirq'
24
25 description = (
26 'A framework for creating, editing, and invoking '
27 'Noisy Intermediate Scale Quantum (NISQ) circuits.'
28 )
29
30 # README file as long_description.
31 long_description = io.open('README.rst', encoding='utf-8').read()
32
33 # If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.
34 # It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence
35 # it will be a pre-release version on PyPi. See
36 # https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning
37 # for more details.
38 if 'CIRQ_PRE_RELEASE_VERSION' in os.environ:
39 __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']
40 long_description = (
41 "**This is a development version of Cirq and may be "
42 "unstable.**\n\n**For the latest stable release of Cirq "
43 "see**\n`here <https://pypi.org/project/cirq>`__.\n\n" + long_description
44 )
45
46 # Read in requirements
47 requirements = open('requirements.txt').readlines()
48 requirements = [r.strip() for r in requirements]
49 contrib_requirements = open('cirq/contrib/contrib-requirements.txt').readlines()
50 contrib_requirements = [r.strip() for r in contrib_requirements]
51 dev_requirements = open('dev_tools/conf/pip-list-dev-tools.txt').readlines()
52 dev_requirements = [r.strip() for r in dev_requirements]
53
54 cirq_packages = ['cirq'] + ['cirq.' + package for package in find_packages(where='cirq')]
55
56 # Sanity check
57 assert __version__, 'Version string cannot be empty'
58
59 setup(
60 name=name,
61 version=__version__,
62 url='http://github.com/quantumlib/cirq',
63 author='The Cirq Developers',
64 author_email='[email protected]',
65 python_requires=('>=3.6.0'),
66 install_requires=requirements,
67 extras_require={
68 'contrib': contrib_requirements,
69 'dev_env': dev_requirements + contrib_requirements,
70 },
71 license='Apache 2',
72 description=description,
73 long_description=long_description,
74 packages=cirq_packages,
75 package_data={
76 'cirq': ['py.typed'],
77 'cirq.google.api.v1': ['*.proto', '*.pyi'],
78 'cirq.google.api.v2': ['*.proto', '*.pyi'],
79 'cirq.protocols.json_test_data': ['*'],
80 },
81 )
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -62,7 +62,7 @@
url='http://github.com/quantumlib/cirq',
author='The Cirq Developers',
author_email='[email protected]',
- python_requires=('>=3.6.0'),
+ python_requires=('>=3.7.0'),
install_requires=requirements,
extras_require={
'contrib': contrib_requirements,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -62,7 +62,7 @@\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n- python_requires=('>=3.6.0'),\n+ python_requires=('>=3.7.0'),\n install_requires=requirements,\n extras_require={\n 'contrib': contrib_requirements,\n", "issue": "Docs build is failing\nSince the black formatter merge the RTD builds are failing with some weird pip error:\r\n\r\nhttps://readthedocs.org/projects/cirq/builds/\r\n\r\nNeed to look into it and resolve it if the error is on our end or report it to the RTD team if it's on their end.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nfrom setuptools import find_packages, setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\nexec(open('cirq/_version.py').read())\n\nname = 'cirq'\n\ndescription = (\n 'A framework for creating, editing, and invoking '\n 'Noisy Intermediate Scale Quantum (NISQ) circuits.'\n)\n\n# README file as long_description.\nlong_description = io.open('README.rst', encoding='utf-8').read()\n\n# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.\n# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence\n# it will be a pre-release version on PyPi. See\n# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning\n# for more details.\nif 'CIRQ_PRE_RELEASE_VERSION' in os.environ:\n __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n \"see**\\n`here <https://pypi.org/project/cirq>`__.\\n\\n\" + long_description\n )\n\n# Read in requirements\nrequirements = open('requirements.txt').readlines()\nrequirements = [r.strip() for r in requirements]\ncontrib_requirements = open('cirq/contrib/contrib-requirements.txt').readlines()\ncontrib_requirements = [r.strip() for r in contrib_requirements]\ndev_requirements = open('dev_tools/conf/pip-list-dev-tools.txt').readlines()\ndev_requirements = [r.strip() for r in dev_requirements]\n\ncirq_packages = ['cirq'] + ['cirq.' + package for package in find_packages(where='cirq')]\n\n# Sanity check\nassert __version__, 'Version string cannot be empty'\n\nsetup(\n name=name,\n version=__version__,\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n python_requires=('>=3.6.0'),\n install_requires=requirements,\n extras_require={\n 'contrib': contrib_requirements,\n 'dev_env': dev_requirements + contrib_requirements,\n },\n license='Apache 2',\n description=description,\n long_description=long_description,\n packages=cirq_packages,\n package_data={\n 'cirq': ['py.typed'],\n 'cirq.google.api.v1': ['*.proto', '*.pyi'],\n 'cirq.google.api.v2': ['*.proto', '*.pyi'],\n 'cirq.protocols.json_test_data': ['*'],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nfrom setuptools import find_packages, setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\nexec(open('cirq/_version.py').read())\n\nname = 'cirq'\n\ndescription = (\n 'A framework for creating, editing, and invoking '\n 'Noisy Intermediate Scale Quantum (NISQ) circuits.'\n)\n\n# README file as long_description.\nlong_description = io.open('README.rst', encoding='utf-8').read()\n\n# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.\n# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence\n# it will be a pre-release version on PyPi. See\n# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning\n# for more details.\nif 'CIRQ_PRE_RELEASE_VERSION' in os.environ:\n __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n \"see**\\n`here <https://pypi.org/project/cirq>`__.\\n\\n\" + long_description\n )\n\n# Read in requirements\nrequirements = open('requirements.txt').readlines()\nrequirements = [r.strip() for r in requirements]\ncontrib_requirements = open('cirq/contrib/contrib-requirements.txt').readlines()\ncontrib_requirements = [r.strip() for r in contrib_requirements]\ndev_requirements = open('dev_tools/conf/pip-list-dev-tools.txt').readlines()\ndev_requirements = [r.strip() for r in dev_requirements]\n\ncirq_packages = ['cirq'] + ['cirq.' + package for package in find_packages(where='cirq')]\n\n# Sanity check\nassert __version__, 'Version string cannot be empty'\n\nsetup(\n name=name,\n version=__version__,\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n python_requires=('>=3.7.0'),\n install_requires=requirements,\n extras_require={\n 'contrib': contrib_requirements,\n 'dev_env': dev_requirements + contrib_requirements,\n },\n license='Apache 2',\n description=description,\n long_description=long_description,\n packages=cirq_packages,\n package_data={\n 'cirq': ['py.typed'],\n 'cirq.google.api.v1': ['*.proto', '*.pyi'],\n 'cirq.google.api.v2': ['*.proto', '*.pyi'],\n 'cirq.protocols.json_test_data': ['*'],\n },\n)\n", "path": "setup.py"}]} | 1,207 | 107 |
gh_patches_debug_27198 | rasdani/github-patches | git_diff | python-poetry__poetry-1910 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
poetry complains about missing argument when using `--help`
<!--
Hi there! Thank you for discovering and submitting an issue.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [ ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
## Issue
<!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ -->
I don't know whether this is a poetry issue or cleo and if this problem arises in earlier versions.
When I type `poetry add --help` I receive the error message
```
Not enough arguments (missing: "name").
```
Similar for `poetry remove --help`
```
Not enough arguments (missing: "packages").
```
If I append any name I get the help page.
The expected behavior would be, that whenever I use `--help`, the help page should be displayed and mandatory arguments for sub command shouldn't be checked.
Saw this with version 1.0.0b6 and 1.0.0b7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/console/config/application_config.py`
Content:
```
1 import logging
2
3 from typing import Any
4
5 from cleo.config import ApplicationConfig as BaseApplicationConfig
6 from clikit.api.application.application import Application
7 from clikit.api.args.raw_args import RawArgs
8 from clikit.api.event import PRE_HANDLE
9 from clikit.api.event import PreHandleEvent
10 from clikit.api.event import PreResolveEvent
11 from clikit.api.event.event_dispatcher import EventDispatcher
12 from clikit.api.formatter import Style
13 from clikit.api.io import Input
14 from clikit.api.io import InputStream
15 from clikit.api.io import Output
16 from clikit.api.io import OutputStream
17 from clikit.api.io.flags import DEBUG
18 from clikit.api.io.flags import VERBOSE
19 from clikit.api.io.flags import VERY_VERBOSE
20 from clikit.api.io.io import IO
21 from clikit.formatter import AnsiFormatter
22 from clikit.formatter import PlainFormatter
23 from clikit.io.input_stream import StandardInputStream
24 from clikit.io.output_stream import ErrorOutputStream
25 from clikit.io.output_stream import StandardOutputStream
26
27 from poetry.console.commands.command import Command
28 from poetry.console.commands.env_command import EnvCommand
29 from poetry.console.logging.io_formatter import IOFormatter
30 from poetry.console.logging.io_handler import IOHandler
31
32
33 class ApplicationConfig(BaseApplicationConfig):
34 def configure(self):
35 super(ApplicationConfig, self).configure()
36
37 self.add_style(Style("c1").fg("cyan"))
38 self.add_style(Style("info").fg("blue"))
39 self.add_style(Style("comment").fg("green"))
40 self.add_style(Style("error").fg("red").bold())
41 self.add_style(Style("warning").fg("yellow"))
42 self.add_style(Style("debug").fg("black").bold())
43
44 self.add_event_listener(PRE_HANDLE, self.register_command_loggers)
45 self.add_event_listener(PRE_HANDLE, self.set_env)
46
47 def register_command_loggers(
48 self, event, event_name, _
49 ): # type: (PreHandleEvent, str, Any) -> None
50 command = event.command.config.handler
51 if not isinstance(command, Command):
52 return
53
54 io = event.io
55
56 loggers = ["poetry.packages.package", "poetry.utils.password_manager"]
57
58 loggers += command.loggers
59
60 handler = IOHandler(io)
61 handler.setFormatter(IOFormatter())
62
63 for logger in loggers:
64 logger = logging.getLogger(logger)
65
66 logger.handlers = [handler]
67 logger.propagate = False
68
69 level = logging.WARNING
70 if io.is_debug():
71 level = logging.DEBUG
72 elif io.is_very_verbose() or io.is_verbose():
73 level = logging.INFO
74
75 logger.setLevel(level)
76
77 def set_env(self, event, event_name, _): # type: (PreHandleEvent, str, Any) -> None
78 from poetry.utils.env import EnvManager
79
80 command = event.command.config.handler # type: EnvCommand
81 if not isinstance(command, EnvCommand):
82 return
83
84 io = event.io
85 poetry = command.poetry
86
87 env_manager = EnvManager(poetry)
88 env = env_manager.create_venv(io)
89
90 if env.is_venv() and io.is_verbose():
91 io.write_line("Using virtualenv: <comment>{}</>".format(env.path))
92
93 command.set_env(env)
94
95 def resolve_help_command(
96 self, event, event_name, dispatcher
97 ): # type: (PreResolveEvent, str, EventDispatcher) -> None
98 args = event.raw_args
99 application = event.application
100
101 if args.has_option_token("-h") or args.has_option_token("--help"):
102 from clikit.api.resolver import ResolvedCommand
103
104 resolved_command = self.command_resolver.resolve(args, application)
105 # If the current command is the run one, skip option
106 # check and interpret them as part of the executed command
107 if resolved_command.command.name == "run":
108 event.set_resolved_command(resolved_command)
109
110 return event.stop_propagation()
111
112 command = application.get_command("help")
113
114 # Enable lenient parsing
115 parsed_args = command.parse(args, True)
116
117 event.set_resolved_command(ResolvedCommand(command, parsed_args))
118 event.stop_propagation()
119
120 def create_io(
121 self,
122 application,
123 args,
124 input_stream=None,
125 output_stream=None,
126 error_stream=None,
127 ): # type: (Application, RawArgs, InputStream, OutputStream, OutputStream) -> IO
128 if input_stream is None:
129 input_stream = StandardInputStream()
130
131 if output_stream is None:
132 output_stream = StandardOutputStream()
133
134 if error_stream is None:
135 error_stream = ErrorOutputStream()
136
137 style_set = application.config.style_set
138
139 if output_stream.supports_ansi():
140 output_formatter = AnsiFormatter(style_set)
141 else:
142 output_formatter = PlainFormatter(style_set)
143
144 if error_stream.supports_ansi():
145 error_formatter = AnsiFormatter(style_set)
146 else:
147 error_formatter = PlainFormatter(style_set)
148
149 io = self.io_class(
150 Input(input_stream),
151 Output(output_stream, output_formatter),
152 Output(error_stream, error_formatter),
153 )
154
155 resolved_command = application.resolve_command(args)
156 # If the current command is the run one, skip option
157 # check and interpret them as part of the executed command
158 if resolved_command.command.name == "run":
159 return io
160
161 if args.has_option_token("--no-ansi"):
162 formatter = PlainFormatter(style_set)
163 io.output.set_formatter(formatter)
164 io.error_output.set_formatter(formatter)
165 elif args.has_option_token("--ansi"):
166 formatter = AnsiFormatter(style_set, True)
167 io.output.set_formatter(formatter)
168 io.error_output.set_formatter(formatter)
169
170 if args.has_option_token("-vvv") or self.is_debug():
171 io.set_verbosity(DEBUG)
172 elif args.has_option_token("-vv"):
173 io.set_verbosity(VERY_VERBOSE)
174 elif args.has_option_token("-v"):
175 io.set_verbosity(VERBOSE)
176
177 if args.has_option_token("--quiet") or args.has_option_token("-q"):
178 io.set_quiet(True)
179
180 if args.has_option_token("--no-interaction") or args.has_option_token("-n"):
181 io.set_interactive(False)
182
183 return io
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/poetry/console/config/application_config.py b/poetry/console/config/application_config.py
--- a/poetry/console/config/application_config.py
+++ b/poetry/console/config/application_config.py
@@ -9,6 +9,7 @@
from clikit.api.event import PreHandleEvent
from clikit.api.event import PreResolveEvent
from clikit.api.event.event_dispatcher import EventDispatcher
+from clikit.api.exceptions import CliKitException
from clikit.api.formatter import Style
from clikit.api.io import Input
from clikit.api.io import InputStream
@@ -101,7 +102,16 @@
if args.has_option_token("-h") or args.has_option_token("--help"):
from clikit.api.resolver import ResolvedCommand
- resolved_command = self.command_resolver.resolve(args, application)
+ try:
+ resolved_command = self.command_resolver.resolve(args, application)
+ except CliKitException:
+ # We weren't able to resolve the command,
+ # due to a parse error most likely,
+ # so we fall back on the default behavior
+ return super(ApplicationConfig, self).resolve_help_command(
+ event, event_name, dispatcher
+ )
+
# If the current command is the run one, skip option
# check and interpret them as part of the executed command
if resolved_command.command.name == "run":
| {"golden_diff": "diff --git a/poetry/console/config/application_config.py b/poetry/console/config/application_config.py\n--- a/poetry/console/config/application_config.py\n+++ b/poetry/console/config/application_config.py\n@@ -9,6 +9,7 @@\n from clikit.api.event import PreHandleEvent\n from clikit.api.event import PreResolveEvent\n from clikit.api.event.event_dispatcher import EventDispatcher\n+from clikit.api.exceptions import CliKitException\n from clikit.api.formatter import Style\n from clikit.api.io import Input\n from clikit.api.io import InputStream\n@@ -101,7 +102,16 @@\n if args.has_option_token(\"-h\") or args.has_option_token(\"--help\"):\n from clikit.api.resolver import ResolvedCommand\n \n- resolved_command = self.command_resolver.resolve(args, application)\n+ try:\n+ resolved_command = self.command_resolver.resolve(args, application)\n+ except CliKitException:\n+ # We weren't able to resolve the command,\n+ # due to a parse error most likely,\n+ # so we fall back on the default behavior\n+ return super(ApplicationConfig, self).resolve_help_command(\n+ event, event_name, dispatcher\n+ )\n+\n # If the current command is the run one, skip option\n # check and interpret them as part of the executed command\n if resolved_command.command.name == \"run\":\n", "issue": "poetry complains about missing argument when using `--help`\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [ ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\n\r\nI don't know whether this is a poetry issue or cleo and if this problem arises in earlier versions.\r\n\r\nWhen I type `poetry add --help` I receive the error message\r\n\r\n```\r\nNot enough arguments (missing: \"name\").\r\n```\r\n\r\nSimilar for `poetry remove --help`\r\n\r\n```\r\nNot enough arguments (missing: \"packages\").\r\n```\r\n\r\nIf I append any name I get the help page.\r\n\r\nThe expected behavior would be, that whenever I use `--help`, the help page should be displayed and mandatory arguments for sub command shouldn't be checked.\r\n\r\nSaw this with version 1.0.0b6 and 1.0.0b7\n", "before_files": [{"content": "import logging\n\nfrom typing import Any\n\nfrom cleo.config import ApplicationConfig as BaseApplicationConfig\nfrom clikit.api.application.application import Application\nfrom clikit.api.args.raw_args import RawArgs\nfrom clikit.api.event import PRE_HANDLE\nfrom clikit.api.event import PreHandleEvent\nfrom clikit.api.event import PreResolveEvent\nfrom clikit.api.event.event_dispatcher import EventDispatcher\nfrom clikit.api.formatter import Style\nfrom clikit.api.io import Input\nfrom clikit.api.io import InputStream\nfrom clikit.api.io import Output\nfrom clikit.api.io import OutputStream\nfrom clikit.api.io.flags import DEBUG\nfrom clikit.api.io.flags import VERBOSE\nfrom clikit.api.io.flags import VERY_VERBOSE\nfrom clikit.api.io.io import IO\nfrom clikit.formatter import AnsiFormatter\nfrom clikit.formatter import PlainFormatter\nfrom clikit.io.input_stream import StandardInputStream\nfrom clikit.io.output_stream import ErrorOutputStream\nfrom clikit.io.output_stream import StandardOutputStream\n\nfrom poetry.console.commands.command import Command\nfrom poetry.console.commands.env_command import EnvCommand\nfrom poetry.console.logging.io_formatter import IOFormatter\nfrom poetry.console.logging.io_handler import IOHandler\n\n\nclass ApplicationConfig(BaseApplicationConfig):\n def configure(self):\n super(ApplicationConfig, self).configure()\n\n self.add_style(Style(\"c1\").fg(\"cyan\"))\n self.add_style(Style(\"info\").fg(\"blue\"))\n self.add_style(Style(\"comment\").fg(\"green\"))\n self.add_style(Style(\"error\").fg(\"red\").bold())\n self.add_style(Style(\"warning\").fg(\"yellow\"))\n self.add_style(Style(\"debug\").fg(\"black\").bold())\n\n self.add_event_listener(PRE_HANDLE, self.register_command_loggers)\n self.add_event_listener(PRE_HANDLE, self.set_env)\n\n def register_command_loggers(\n self, event, event_name, _\n ): # type: (PreHandleEvent, str, Any) -> None\n command = event.command.config.handler\n if not isinstance(command, Command):\n return\n\n io = event.io\n\n loggers = [\"poetry.packages.package\", \"poetry.utils.password_manager\"]\n\n loggers += command.loggers\n\n handler = IOHandler(io)\n handler.setFormatter(IOFormatter())\n\n for logger in loggers:\n logger = logging.getLogger(logger)\n\n logger.handlers = [handler]\n logger.propagate = False\n\n level = logging.WARNING\n if io.is_debug():\n level = logging.DEBUG\n elif io.is_very_verbose() or io.is_verbose():\n level = logging.INFO\n\n logger.setLevel(level)\n\n def set_env(self, event, event_name, _): # type: (PreHandleEvent, str, Any) -> None\n from poetry.utils.env import EnvManager\n\n command = event.command.config.handler # type: EnvCommand\n if not isinstance(command, EnvCommand):\n return\n\n io = event.io\n poetry = command.poetry\n\n env_manager = EnvManager(poetry)\n env = env_manager.create_venv(io)\n\n if env.is_venv() and io.is_verbose():\n io.write_line(\"Using virtualenv: <comment>{}</>\".format(env.path))\n\n command.set_env(env)\n\n def resolve_help_command(\n self, event, event_name, dispatcher\n ): # type: (PreResolveEvent, str, EventDispatcher) -> None\n args = event.raw_args\n application = event.application\n\n if args.has_option_token(\"-h\") or args.has_option_token(\"--help\"):\n from clikit.api.resolver import ResolvedCommand\n\n resolved_command = self.command_resolver.resolve(args, application)\n # If the current command is the run one, skip option\n # check and interpret them as part of the executed command\n if resolved_command.command.name == \"run\":\n event.set_resolved_command(resolved_command)\n\n return event.stop_propagation()\n\n command = application.get_command(\"help\")\n\n # Enable lenient parsing\n parsed_args = command.parse(args, True)\n\n event.set_resolved_command(ResolvedCommand(command, parsed_args))\n event.stop_propagation()\n\n def create_io(\n self,\n application,\n args,\n input_stream=None,\n output_stream=None,\n error_stream=None,\n ): # type: (Application, RawArgs, InputStream, OutputStream, OutputStream) -> IO\n if input_stream is None:\n input_stream = StandardInputStream()\n\n if output_stream is None:\n output_stream = StandardOutputStream()\n\n if error_stream is None:\n error_stream = ErrorOutputStream()\n\n style_set = application.config.style_set\n\n if output_stream.supports_ansi():\n output_formatter = AnsiFormatter(style_set)\n else:\n output_formatter = PlainFormatter(style_set)\n\n if error_stream.supports_ansi():\n error_formatter = AnsiFormatter(style_set)\n else:\n error_formatter = PlainFormatter(style_set)\n\n io = self.io_class(\n Input(input_stream),\n Output(output_stream, output_formatter),\n Output(error_stream, error_formatter),\n )\n\n resolved_command = application.resolve_command(args)\n # If the current command is the run one, skip option\n # check and interpret them as part of the executed command\n if resolved_command.command.name == \"run\":\n return io\n\n if args.has_option_token(\"--no-ansi\"):\n formatter = PlainFormatter(style_set)\n io.output.set_formatter(formatter)\n io.error_output.set_formatter(formatter)\n elif args.has_option_token(\"--ansi\"):\n formatter = AnsiFormatter(style_set, True)\n io.output.set_formatter(formatter)\n io.error_output.set_formatter(formatter)\n\n if args.has_option_token(\"-vvv\") or self.is_debug():\n io.set_verbosity(DEBUG)\n elif args.has_option_token(\"-vv\"):\n io.set_verbosity(VERY_VERBOSE)\n elif args.has_option_token(\"-v\"):\n io.set_verbosity(VERBOSE)\n\n if args.has_option_token(\"--quiet\") or args.has_option_token(\"-q\"):\n io.set_quiet(True)\n\n if args.has_option_token(\"--no-interaction\") or args.has_option_token(\"-n\"):\n io.set_interactive(False)\n\n return io\n", "path": "poetry/console/config/application_config.py"}], "after_files": [{"content": "import logging\n\nfrom typing import Any\n\nfrom cleo.config import ApplicationConfig as BaseApplicationConfig\nfrom clikit.api.application.application import Application\nfrom clikit.api.args.raw_args import RawArgs\nfrom clikit.api.event import PRE_HANDLE\nfrom clikit.api.event import PreHandleEvent\nfrom clikit.api.event import PreResolveEvent\nfrom clikit.api.event.event_dispatcher import EventDispatcher\nfrom clikit.api.exceptions import CliKitException\nfrom clikit.api.formatter import Style\nfrom clikit.api.io import Input\nfrom clikit.api.io import InputStream\nfrom clikit.api.io import Output\nfrom clikit.api.io import OutputStream\nfrom clikit.api.io.flags import DEBUG\nfrom clikit.api.io.flags import VERBOSE\nfrom clikit.api.io.flags import VERY_VERBOSE\nfrom clikit.api.io.io import IO\nfrom clikit.formatter import AnsiFormatter\nfrom clikit.formatter import PlainFormatter\nfrom clikit.io.input_stream import StandardInputStream\nfrom clikit.io.output_stream import ErrorOutputStream\nfrom clikit.io.output_stream import StandardOutputStream\n\nfrom poetry.console.commands.command import Command\nfrom poetry.console.commands.env_command import EnvCommand\nfrom poetry.console.logging.io_formatter import IOFormatter\nfrom poetry.console.logging.io_handler import IOHandler\n\n\nclass ApplicationConfig(BaseApplicationConfig):\n def configure(self):\n super(ApplicationConfig, self).configure()\n\n self.add_style(Style(\"c1\").fg(\"cyan\"))\n self.add_style(Style(\"info\").fg(\"blue\"))\n self.add_style(Style(\"comment\").fg(\"green\"))\n self.add_style(Style(\"error\").fg(\"red\").bold())\n self.add_style(Style(\"warning\").fg(\"yellow\"))\n self.add_style(Style(\"debug\").fg(\"black\").bold())\n\n self.add_event_listener(PRE_HANDLE, self.register_command_loggers)\n self.add_event_listener(PRE_HANDLE, self.set_env)\n\n def register_command_loggers(\n self, event, event_name, _\n ): # type: (PreHandleEvent, str, Any) -> None\n command = event.command.config.handler\n if not isinstance(command, Command):\n return\n\n io = event.io\n\n loggers = [\"poetry.packages.package\", \"poetry.utils.password_manager\"]\n\n loggers += command.loggers\n\n handler = IOHandler(io)\n handler.setFormatter(IOFormatter())\n\n for logger in loggers:\n logger = logging.getLogger(logger)\n\n logger.handlers = [handler]\n logger.propagate = False\n\n level = logging.WARNING\n if io.is_debug():\n level = logging.DEBUG\n elif io.is_very_verbose() or io.is_verbose():\n level = logging.INFO\n\n logger.setLevel(level)\n\n def set_env(self, event, event_name, _): # type: (PreHandleEvent, str, Any) -> None\n from poetry.utils.env import EnvManager\n\n command = event.command.config.handler # type: EnvCommand\n if not isinstance(command, EnvCommand):\n return\n\n io = event.io\n poetry = command.poetry\n\n env_manager = EnvManager(poetry)\n env = env_manager.create_venv(io)\n\n if env.is_venv() and io.is_verbose():\n io.write_line(\"Using virtualenv: <comment>{}</>\".format(env.path))\n\n command.set_env(env)\n\n def resolve_help_command(\n self, event, event_name, dispatcher\n ): # type: (PreResolveEvent, str, EventDispatcher) -> None\n args = event.raw_args\n application = event.application\n\n if args.has_option_token(\"-h\") or args.has_option_token(\"--help\"):\n from clikit.api.resolver import ResolvedCommand\n\n try:\n resolved_command = self.command_resolver.resolve(args, application)\n except CliKitException:\n # We weren't able to resolve the command,\n # due to a parse error most likely,\n # so we fall back on the default behavior\n return super(ApplicationConfig, self).resolve_help_command(\n event, event_name, dispatcher\n )\n\n # If the current command is the run one, skip option\n # check and interpret them as part of the executed command\n if resolved_command.command.name == \"run\":\n event.set_resolved_command(resolved_command)\n\n return event.stop_propagation()\n\n command = application.get_command(\"help\")\n\n # Enable lenient parsing\n parsed_args = command.parse(args, True)\n\n event.set_resolved_command(ResolvedCommand(command, parsed_args))\n event.stop_propagation()\n\n def create_io(\n self,\n application,\n args,\n input_stream=None,\n output_stream=None,\n error_stream=None,\n ): # type: (Application, RawArgs, InputStream, OutputStream, OutputStream) -> IO\n if input_stream is None:\n input_stream = StandardInputStream()\n\n if output_stream is None:\n output_stream = StandardOutputStream()\n\n if error_stream is None:\n error_stream = ErrorOutputStream()\n\n style_set = application.config.style_set\n\n if output_stream.supports_ansi():\n output_formatter = AnsiFormatter(style_set)\n else:\n output_formatter = PlainFormatter(style_set)\n\n if error_stream.supports_ansi():\n error_formatter = AnsiFormatter(style_set)\n else:\n error_formatter = PlainFormatter(style_set)\n\n io = self.io_class(\n Input(input_stream),\n Output(output_stream, output_formatter),\n Output(error_stream, error_formatter),\n )\n\n resolved_command = application.resolve_command(args)\n # If the current command is the run one, skip option\n # check and interpret them as part of the executed command\n if resolved_command.command.name == \"run\":\n return io\n\n if args.has_option_token(\"--no-ansi\"):\n formatter = PlainFormatter(style_set)\n io.output.set_formatter(formatter)\n io.error_output.set_formatter(formatter)\n elif args.has_option_token(\"--ansi\"):\n formatter = AnsiFormatter(style_set, True)\n io.output.set_formatter(formatter)\n io.error_output.set_formatter(formatter)\n\n if args.has_option_token(\"-vvv\") or self.is_debug():\n io.set_verbosity(DEBUG)\n elif args.has_option_token(\"-vv\"):\n io.set_verbosity(VERY_VERBOSE)\n elif args.has_option_token(\"-v\"):\n io.set_verbosity(VERBOSE)\n\n if args.has_option_token(\"--quiet\") or args.has_option_token(\"-q\"):\n io.set_quiet(True)\n\n if args.has_option_token(\"--no-interaction\") or args.has_option_token(\"-n\"):\n io.set_interactive(False)\n\n return io\n", "path": "poetry/console/config/application_config.py"}]} | 2,429 | 308 |
gh_patches_debug_2706 | rasdani/github-patches | git_diff | fossasia__open-event-server-4302 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom-forms: Change data.type in custom-form
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server
**Current behavior:**
The type attribute is `custom_form` which leads to error 409 while making a request after #4300
**Expected behavior:**
The type attribute should be `custom-form`
@enigmaeth Can you please check?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/custom_forms.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
2 from marshmallow_jsonapi.flask import Schema, Relationship
3 from marshmallow_jsonapi import fields
4 import marshmallow.validate as validate
5 from app.api.helpers.permissions import jwt_required
6 from flask_rest_jsonapi.exceptions import ObjectNotFound
7
8 from app.api.bootstrap import api
9 from app.api.helpers.utilities import dasherize
10 from app.models import db
11 from app.models.custom_form import CustomForms
12 from app.models.event import Event
13 from app.api.helpers.db import safe_query
14 from app.api.helpers.utilities import require_relationship
15 from app.api.helpers.permission_manager import has_access
16 from app.api.helpers.query import event_query
17
18
19 class CustomFormSchema(Schema):
20 """
21 API Schema for Custom Forms database model
22 """
23 class Meta:
24 """
25 Meta class for CustomForm Schema
26 """
27 type_ = 'custom_form'
28 self_view = 'v1.custom_form_detail'
29 self_view_kwargs = {'id': '<id>'}
30 inflect = dasherize
31
32 id = fields.Integer(dump_only=True)
33 field_identifier = fields.Str(required=True)
34 form = fields.Str(required=True)
35 type = fields.Str(default="text", validate=validate.OneOf(
36 choices=["text", "checkbox", "select", "file", "image"]))
37 is_required = fields.Boolean(default=False)
38 is_included = fields.Boolean(default=False)
39 is_fixed = fields.Boolean(default=False)
40 event = Relationship(attribute='event',
41 self_view='v1.custom_form_event',
42 self_view_kwargs={'id': '<id>'},
43 related_view='v1.event_detail',
44 related_view_kwargs={'custom_form_id': '<id>'},
45 schema='EventSchema',
46 type_='event')
47
48
49 class CustomFormListPost(ResourceList):
50 """
51 Create and List Custom Forms
52 """
53
54 def before_post(self, args, kwargs, data):
55 """
56 method to check for required relationship with event
57 :param args:
58 :param kwargs:
59 :param data:
60 :return:
61 """
62 require_relationship(['event'], data)
63 if not has_access('is_coorganizer', event_id=data['event']):
64 raise ObjectNotFound({'parameter': 'event_id'},
65 "Event: {} not found".format(data['event_id']))
66
67 schema = CustomFormSchema
68 methods = ['POST', ]
69 data_layer = {'session': db.session,
70 'model': CustomForms
71 }
72
73
74 class CustomFormList(ResourceList):
75 """
76 Create and List Custom Forms
77 """
78 def query(self, view_kwargs):
79 """
80 query method for different view_kwargs
81 :param view_kwargs:
82 :return:
83 """
84 query_ = self.session.query(CustomForms)
85 query_ = event_query(self, query_, view_kwargs)
86 return query_
87
88 view_kwargs = True
89 decorators = (jwt_required, )
90 methods = ['GET', ]
91 schema = CustomFormSchema
92 data_layer = {'session': db.session,
93 'model': CustomForms,
94 'methods': {
95 'query': query
96 }}
97
98
99 class CustomFormDetail(ResourceDetail):
100 """
101 CustomForm Resource
102 """
103
104 def before_get_object(self, view_kwargs):
105 """
106 before get method
107 :param view_kwargs:
108 :return:
109 """
110 event = None
111 if view_kwargs.get('event_id'):
112 event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')
113 elif view_kwargs.get('event_identifier'):
114 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')
115
116 if event:
117 custom_form = safe_query(self, CustomForms, 'event_id', event.id, 'event_id')
118 view_kwargs['id'] = custom_form.id
119
120 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
121 fetch_as="event_id", model=CustomForms, methods="PATCH,DELETE"), )
122 schema = CustomFormSchema
123 data_layer = {'session': db.session,
124 'model': CustomForms}
125
126
127 class CustomFormRelationshipRequired(ResourceRelationship):
128 """
129 CustomForm Relationship (Required)
130 """
131 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
132 fetch_as="event_id", model=CustomForms, methods="PATCH"),)
133 methods = ['GET', 'PATCH']
134 schema = CustomFormSchema
135 data_layer = {'session': db.session,
136 'model': CustomForms}
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/custom_forms.py b/app/api/custom_forms.py
--- a/app/api/custom_forms.py
+++ b/app/api/custom_forms.py
@@ -24,7 +24,7 @@
"""
Meta class for CustomForm Schema
"""
- type_ = 'custom_form'
+ type_ = 'custom-form'
self_view = 'v1.custom_form_detail'
self_view_kwargs = {'id': '<id>'}
inflect = dasherize
| {"golden_diff": "diff --git a/app/api/custom_forms.py b/app/api/custom_forms.py\n--- a/app/api/custom_forms.py\n+++ b/app/api/custom_forms.py\n@@ -24,7 +24,7 @@\n \"\"\"\n Meta class for CustomForm Schema\n \"\"\"\n- type_ = 'custom_form'\n+ type_ = 'custom-form'\n self_view = 'v1.custom_form_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n", "issue": "Custom-forms: Change data.type in custom-form\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\n**Current behavior:**\r\nThe type attribute is `custom_form` which leads to error 409 while making a request after #4300 \r\n\r\n**Expected behavior:**\r\nThe type attribute should be `custom-form` \r\n\r\n@enigmaeth Can you please check?\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nimport marshmallow.validate as validate\nfrom app.api.helpers.permissions import jwt_required\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.models.custom_form import CustomForms\nfrom app.models.event import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\n\n\nclass CustomFormSchema(Schema):\n \"\"\"\n API Schema for Custom Forms database model\n \"\"\"\n class Meta:\n \"\"\"\n Meta class for CustomForm Schema\n \"\"\"\n type_ = 'custom_form'\n self_view = 'v1.custom_form_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Integer(dump_only=True)\n field_identifier = fields.Str(required=True)\n form = fields.Str(required=True)\n type = fields.Str(default=\"text\", validate=validate.OneOf(\n choices=[\"text\", \"checkbox\", \"select\", \"file\", \"image\"]))\n is_required = fields.Boolean(default=False)\n is_included = fields.Boolean(default=False)\n is_fixed = fields.Boolean(default=False)\n event = Relationship(attribute='event',\n self_view='v1.custom_form_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'custom_form_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass CustomFormListPost(ResourceList):\n \"\"\"\n Create and List Custom Forms\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n method to check for required relationship with event\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event_id']))\n\n schema = CustomFormSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': CustomForms\n }\n\n\nclass CustomFormList(ResourceList):\n \"\"\"\n Create and List Custom Forms\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(CustomForms)\n query_ = event_query(self, query_, view_kwargs)\n return query_\n\n view_kwargs = True\n decorators = (jwt_required, )\n methods = ['GET', ]\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms,\n 'methods': {\n 'query': query\n }}\n\n\nclass CustomFormDetail(ResourceDetail):\n \"\"\"\n CustomForm Resource\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method\n :param view_kwargs:\n :return:\n \"\"\"\n event = None\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n\n if event:\n custom_form = safe_query(self, CustomForms, 'event_id', event.id, 'event_id')\n view_kwargs['id'] = custom_form.id\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=CustomForms, methods=\"PATCH,DELETE\"), )\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms}\n\n\nclass CustomFormRelationshipRequired(ResourceRelationship):\n \"\"\"\n CustomForm Relationship (Required)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=CustomForms, methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms}\n", "path": "app/api/custom_forms.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nimport marshmallow.validate as validate\nfrom app.api.helpers.permissions import jwt_required\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.utilities import dasherize\nfrom app.models import db\nfrom app.models.custom_form import CustomForms\nfrom app.models.event import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\n\n\nclass CustomFormSchema(Schema):\n \"\"\"\n API Schema for Custom Forms database model\n \"\"\"\n class Meta:\n \"\"\"\n Meta class for CustomForm Schema\n \"\"\"\n type_ = 'custom-form'\n self_view = 'v1.custom_form_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Integer(dump_only=True)\n field_identifier = fields.Str(required=True)\n form = fields.Str(required=True)\n type = fields.Str(default=\"text\", validate=validate.OneOf(\n choices=[\"text\", \"checkbox\", \"select\", \"file\", \"image\"]))\n is_required = fields.Boolean(default=False)\n is_included = fields.Boolean(default=False)\n is_fixed = fields.Boolean(default=False)\n event = Relationship(attribute='event',\n self_view='v1.custom_form_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'custom_form_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass CustomFormListPost(ResourceList):\n \"\"\"\n Create and List Custom Forms\n \"\"\"\n\n def before_post(self, args, kwargs, data):\n \"\"\"\n method to check for required relationship with event\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event_id']))\n\n schema = CustomFormSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': CustomForms\n }\n\n\nclass CustomFormList(ResourceList):\n \"\"\"\n Create and List Custom Forms\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n query method for different view_kwargs\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(CustomForms)\n query_ = event_query(self, query_, view_kwargs)\n return query_\n\n view_kwargs = True\n decorators = (jwt_required, )\n methods = ['GET', ]\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms,\n 'methods': {\n 'query': query\n }}\n\n\nclass CustomFormDetail(ResourceDetail):\n \"\"\"\n CustomForm Resource\n \"\"\"\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get method\n :param view_kwargs:\n :return:\n \"\"\"\n event = None\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n\n if event:\n custom_form = safe_query(self, CustomForms, 'event_id', event.id, 'event_id')\n view_kwargs['id'] = custom_form.id\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=CustomForms, methods=\"PATCH,DELETE\"), )\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms}\n\n\nclass CustomFormRelationshipRequired(ResourceRelationship):\n \"\"\"\n CustomForm Relationship (Required)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=CustomForms, methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n schema = CustomFormSchema\n data_layer = {'session': db.session,\n 'model': CustomForms}\n", "path": "app/api/custom_forms.py"}]} | 1,652 | 105 |
gh_patches_debug_30587 | rasdani/github-patches | git_diff | networkx__networkx-2618 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`networkx.version` shadows any other module named `version` if imported first
Steps to reproduce:
```
$ pip freeze | grep networkx
networkx==1.11
$ touch version.py
$ python -c 'import version; print(version)'
<module 'version' from '/Users/ben/scratch/version.py'>
$ python -c 'import networkx; import version; print(version)'
<module 'version' from '/Users/ben/.virtualenvs/personal/lib/python3.6/site-packages/networkx/version.py'>
```
Reading the code, it looks like the `release` module is adding the networkx package to `sys.path`, importing version and deleting it again?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/release.py`
Content:
```
1 """Release data for NetworkX.
2
3 When NetworkX is imported a number of steps are followed to determine
4 the version information.
5
6 1) If the release is not a development release (dev=False), then version
7 information is read from version.py, a file containing statically
8 defined version information. This file should exist on every
9 downloadable release of NetworkX since setup.py creates it during
10 packaging/installation. However, version.py might not exist if one
11 is running NetworkX from the mercurial repository. In the event that
12 version.py does not exist, then no vcs information will be available.
13
14 2) If the release is a development release, then version information
15 is read dynamically, when possible. If no dynamic information can be
16 read, then an attempt is made to read the information from version.py.
17 If version.py does not exist, then no vcs information will be available.
18
19 Clarification:
20 version.py is created only by setup.py
21
22 When setup.py creates version.py, it does so before packaging/installation.
23 So the created file is included in the source distribution. When a user
24 downloads a tar.gz file and extracts the files, the files will not be in a
25 live version control repository. So when the user runs setup.py to install
26 NetworkX, we must make sure write_versionfile() does not overwrite the
27 revision information contained in the version.py that was included in the
28 tar.gz file. This is why write_versionfile() includes an early escape.
29
30 """
31
32 # Copyright (C) 2004-2017 by
33 # Aric Hagberg <[email protected]>
34 # Dan Schult <[email protected]>
35 # Pieter Swart <[email protected]>
36 # All rights reserved.
37 # BSD license.
38
39 from __future__ import absolute_import
40
41 import os
42 import sys
43 import time
44 import datetime
45
46 basedir = os.path.abspath(os.path.split(__file__)[0])
47
48
49 def write_versionfile():
50 """Creates a static file containing version information."""
51 versionfile = os.path.join(basedir, 'version.py')
52
53 text = '''"""
54 Version information for NetworkX, created during installation.
55
56 Do not add this file to the repository.
57
58 """
59
60 import datetime
61
62 version = %(version)r
63 date = %(date)r
64
65 # Was NetworkX built from a development version? If so, remember that the major
66 # and minor versions reference the "target" (rather than "current") release.
67 dev = %(dev)r
68
69 # Format: (name, major, min, revision)
70 version_info = %(version_info)r
71
72 # Format: a 'datetime.datetime' instance
73 date_info = %(date_info)r
74
75 # Format: (vcs, vcs_tuple)
76 vcs_info = %(vcs_info)r
77
78 '''
79
80 # Try to update all information
81 date, date_info, version, version_info, vcs_info = get_info(dynamic=True)
82
83 def writefile():
84 fh = open(versionfile, 'w')
85 subs = {
86 'dev': dev,
87 'version': version,
88 'version_info': version_info,
89 'date': date,
90 'date_info': date_info,
91 'vcs_info': vcs_info
92 }
93 fh.write(text % subs)
94 fh.close()
95
96 if vcs_info[0] == 'mercurial':
97 # Then, we want to update version.py.
98 writefile()
99 else:
100 if os.path.isfile(versionfile):
101 # This is *good*, and the most likely place users will be when
102 # running setup.py. We do not want to overwrite version.py.
103 # Grab the version so that setup can use it.
104 sys.path.insert(0, basedir)
105 from version import version
106 del sys.path[0]
107 else:
108 # This is *bad*. It means the user might have a tarball that
109 # does not include version.py. Let this error raise so we can
110 # fix the tarball.
111 ##raise Exception('version.py not found!')
112
113 # We no longer require that prepared tarballs include a version.py
114 # So we use the possibly trunctated value from get_info()
115 # Then we write a new file.
116 writefile()
117
118 return version
119
120
121 def get_revision():
122 """Returns revision and vcs information, dynamically obtained."""
123 vcs, revision, tag = None, None, None
124
125 gitdir = os.path.join(basedir, '..', '.git')
126
127 if os.path.isdir(gitdir):
128 vcs = 'git'
129 # For now, we are not bothering with revision and tag.
130
131 vcs_info = (vcs, (revision, tag))
132
133 return revision, vcs_info
134
135
136 def get_info(dynamic=True):
137 # Date information
138 date_info = datetime.datetime.now()
139 date = time.asctime(date_info.timetuple())
140
141 revision, version, version_info, vcs_info = None, None, None, None
142
143 import_failed = False
144 dynamic_failed = False
145
146 if dynamic:
147 revision, vcs_info = get_revision()
148 if revision is None:
149 dynamic_failed = True
150
151 if dynamic_failed or not dynamic:
152 # This is where most final releases of NetworkX will be.
153 # All info should come from version.py. If it does not exist, then
154 # no vcs information will be provided.
155 sys.path.insert(0, basedir)
156 try:
157 from version import date, date_info, version, version_info, vcs_info
158 except ImportError:
159 import_failed = True
160 vcs_info = (None, (None, None))
161 else:
162 revision = vcs_info[1][0]
163 del sys.path[0]
164
165 if import_failed or (dynamic and not dynamic_failed):
166 # We are here if:
167 # we failed to determine static versioning info, or
168 # we successfully obtained dynamic revision info
169 version = ''.join([str(major), '.', str(minor)])
170 if dev:
171 version += '.dev_' + date_info.strftime("%Y%m%d%H%M%S")
172 version_info = (name, major, minor, revision)
173
174 return date, date_info, version, version_info, vcs_info
175
176
177 # Version information
178 name = 'networkx'
179 major = "2"
180 minor = "0"
181
182
183 # Declare current release as a development release.
184 # Change to False before tagging a release; then change back.
185 dev = True
186
187
188 description = "Python package for creating and manipulating graphs and networks"
189
190 long_description = \
191 """
192 NetworkX is a Python package for the creation, manipulation, and
193 study of the structure, dynamics, and functions of complex networks.
194
195 """
196 license = 'BSD'
197 authors = {'Hagberg': ('Aric Hagberg', '[email protected]'),
198 'Schult': ('Dan Schult', '[email protected]'),
199 'Swart': ('Pieter Swart', '[email protected]')
200 }
201 maintainer = "NetworkX Developers"
202 maintainer_email = "[email protected]"
203 url = 'http://networkx.github.io/'
204 download_url = 'https://pypi.python.org/pypi/networkx/'
205 platforms = ['Linux', 'Mac OSX', 'Windows', 'Unix']
206 keywords = ['Networks', 'Graph Theory', 'Mathematics',
207 'network', 'graph', 'discrete mathematics', 'math']
208 classifiers = [
209 'Development Status :: 5 - Production/Stable',
210 'Intended Audience :: Developers',
211 'Intended Audience :: Science/Research',
212 'License :: OSI Approved :: BSD License',
213 'Operating System :: OS Independent',
214 'Programming Language :: Python :: 2',
215 'Programming Language :: Python :: 2.7',
216 'Programming Language :: Python :: 3',
217 'Programming Language :: Python :: 3.4',
218 'Programming Language :: Python :: 3.5',
219 'Programming Language :: Python :: 3.6',
220 'Topic :: Software Development :: Libraries :: Python Modules',
221 'Topic :: Scientific/Engineering :: Bio-Informatics',
222 'Topic :: Scientific/Engineering :: Information Analysis',
223 'Topic :: Scientific/Engineering :: Mathematics',
224 'Topic :: Scientific/Engineering :: Physics']
225
226 date, date_info, version, version_info, vcs_info = get_info()
227
228 if __name__ == '__main__':
229 # Write versionfile for nightly snapshots.
230 write_versionfile()
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/release.py b/networkx/release.py
--- a/networkx/release.py
+++ b/networkx/release.py
@@ -101,9 +101,9 @@
# This is *good*, and the most likely place users will be when
# running setup.py. We do not want to overwrite version.py.
# Grab the version so that setup can use it.
- sys.path.insert(0, basedir)
+ #sys.path.insert(0, basedir)
from version import version
- del sys.path[0]
+ #del sys.path[0]
else:
# This is *bad*. It means the user might have a tarball that
# does not include version.py. Let this error raise so we can
@@ -152,7 +152,7 @@
# This is where most final releases of NetworkX will be.
# All info should come from version.py. If it does not exist, then
# no vcs information will be provided.
- sys.path.insert(0, basedir)
+ #sys.path.insert(0, basedir)
try:
from version import date, date_info, version, version_info, vcs_info
except ImportError:
@@ -160,7 +160,7 @@
vcs_info = (None, (None, None))
else:
revision = vcs_info[1][0]
- del sys.path[0]
+ #del sys.path[0]
if import_failed or (dynamic and not dynamic_failed):
# We are here if:
| {"golden_diff": "diff --git a/networkx/release.py b/networkx/release.py\n--- a/networkx/release.py\n+++ b/networkx/release.py\n@@ -101,9 +101,9 @@\n # This is *good*, and the most likely place users will be when\n # running setup.py. We do not want to overwrite version.py.\n # Grab the version so that setup can use it.\n- sys.path.insert(0, basedir)\n+ #sys.path.insert(0, basedir)\n from version import version\n- del sys.path[0]\n+ #del sys.path[0]\n else:\n # This is *bad*. It means the user might have a tarball that\n # does not include version.py. Let this error raise so we can\n@@ -152,7 +152,7 @@\n # This is where most final releases of NetworkX will be.\n # All info should come from version.py. If it does not exist, then\n # no vcs information will be provided.\n- sys.path.insert(0, basedir)\n+ #sys.path.insert(0, basedir)\n try:\n from version import date, date_info, version, version_info, vcs_info\n except ImportError:\n@@ -160,7 +160,7 @@\n vcs_info = (None, (None, None))\n else:\n revision = vcs_info[1][0]\n- del sys.path[0]\n+ #del sys.path[0]\n \n if import_failed or (dynamic and not dynamic_failed):\n # We are here if:\n", "issue": "`networkx.version` shadows any other module named `version` if imported first\nSteps to reproduce:\r\n\r\n```\r\n$ pip freeze | grep networkx\r\nnetworkx==1.11\r\n$ touch version.py\r\n$ python -c 'import version; print(version)'\r\n<module 'version' from '/Users/ben/scratch/version.py'>\r\n$ python -c 'import networkx; import version; print(version)'\r\n<module 'version' from '/Users/ben/.virtualenvs/personal/lib/python3.6/site-packages/networkx/version.py'>\r\n```\r\n\r\nReading the code, it looks like the `release` module is adding the networkx package to `sys.path`, importing version and deleting it again?\n", "before_files": [{"content": "\"\"\"Release data for NetworkX.\n\nWhen NetworkX is imported a number of steps are followed to determine\nthe version information.\n\n 1) If the release is not a development release (dev=False), then version\n information is read from version.py, a file containing statically\n defined version information. This file should exist on every\n downloadable release of NetworkX since setup.py creates it during\n packaging/installation. However, version.py might not exist if one\n is running NetworkX from the mercurial repository. In the event that\n version.py does not exist, then no vcs information will be available.\n\n 2) If the release is a development release, then version information\n is read dynamically, when possible. If no dynamic information can be\n read, then an attempt is made to read the information from version.py.\n If version.py does not exist, then no vcs information will be available.\n\nClarification:\n version.py is created only by setup.py\n\nWhen setup.py creates version.py, it does so before packaging/installation.\nSo the created file is included in the source distribution. When a user\ndownloads a tar.gz file and extracts the files, the files will not be in a\nlive version control repository. So when the user runs setup.py to install\nNetworkX, we must make sure write_versionfile() does not overwrite the\nrevision information contained in the version.py that was included in the\ntar.gz file. This is why write_versionfile() includes an early escape.\n\n\"\"\"\n\n# Copyright (C) 2004-2017 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport time\nimport datetime\n\nbasedir = os.path.abspath(os.path.split(__file__)[0])\n\n\ndef write_versionfile():\n \"\"\"Creates a static file containing version information.\"\"\"\n versionfile = os.path.join(basedir, 'version.py')\n\n text = '''\"\"\"\nVersion information for NetworkX, created during installation.\n\nDo not add this file to the repository.\n\n\"\"\"\n\nimport datetime\n\nversion = %(version)r\ndate = %(date)r\n\n# Was NetworkX built from a development version? If so, remember that the major\n# and minor versions reference the \"target\" (rather than \"current\") release.\ndev = %(dev)r\n\n# Format: (name, major, min, revision)\nversion_info = %(version_info)r\n\n# Format: a 'datetime.datetime' instance\ndate_info = %(date_info)r\n\n# Format: (vcs, vcs_tuple)\nvcs_info = %(vcs_info)r\n\n'''\n\n # Try to update all information\n date, date_info, version, version_info, vcs_info = get_info(dynamic=True)\n\n def writefile():\n fh = open(versionfile, 'w')\n subs = {\n 'dev': dev,\n 'version': version,\n 'version_info': version_info,\n 'date': date,\n 'date_info': date_info,\n 'vcs_info': vcs_info\n }\n fh.write(text % subs)\n fh.close()\n\n if vcs_info[0] == 'mercurial':\n # Then, we want to update version.py.\n writefile()\n else:\n if os.path.isfile(versionfile):\n # This is *good*, and the most likely place users will be when\n # running setup.py. We do not want to overwrite version.py.\n # Grab the version so that setup can use it.\n sys.path.insert(0, basedir)\n from version import version\n del sys.path[0]\n else:\n # This is *bad*. It means the user might have a tarball that\n # does not include version.py. Let this error raise so we can\n # fix the tarball.\n ##raise Exception('version.py not found!')\n\n # We no longer require that prepared tarballs include a version.py\n # So we use the possibly trunctated value from get_info()\n # Then we write a new file.\n writefile()\n\n return version\n\n\ndef get_revision():\n \"\"\"Returns revision and vcs information, dynamically obtained.\"\"\"\n vcs, revision, tag = None, None, None\n\n gitdir = os.path.join(basedir, '..', '.git')\n\n if os.path.isdir(gitdir):\n vcs = 'git'\n # For now, we are not bothering with revision and tag.\n\n vcs_info = (vcs, (revision, tag))\n\n return revision, vcs_info\n\n\ndef get_info(dynamic=True):\n # Date information\n date_info = datetime.datetime.now()\n date = time.asctime(date_info.timetuple())\n\n revision, version, version_info, vcs_info = None, None, None, None\n\n import_failed = False\n dynamic_failed = False\n\n if dynamic:\n revision, vcs_info = get_revision()\n if revision is None:\n dynamic_failed = True\n\n if dynamic_failed or not dynamic:\n # This is where most final releases of NetworkX will be.\n # All info should come from version.py. If it does not exist, then\n # no vcs information will be provided.\n sys.path.insert(0, basedir)\n try:\n from version import date, date_info, version, version_info, vcs_info\n except ImportError:\n import_failed = True\n vcs_info = (None, (None, None))\n else:\n revision = vcs_info[1][0]\n del sys.path[0]\n\n if import_failed or (dynamic and not dynamic_failed):\n # We are here if:\n # we failed to determine static versioning info, or\n # we successfully obtained dynamic revision info\n version = ''.join([str(major), '.', str(minor)])\n if dev:\n version += '.dev_' + date_info.strftime(\"%Y%m%d%H%M%S\")\n version_info = (name, major, minor, revision)\n\n return date, date_info, version, version_info, vcs_info\n\n\n# Version information\nname = 'networkx'\nmajor = \"2\"\nminor = \"0\"\n\n\n# Declare current release as a development release.\n# Change to False before tagging a release; then change back.\ndev = True\n\n\ndescription = \"Python package for creating and manipulating graphs and networks\"\n\nlong_description = \\\n \"\"\"\nNetworkX is a Python package for the creation, manipulation, and\nstudy of the structure, dynamics, and functions of complex networks.\n\n\"\"\"\nlicense = 'BSD'\nauthors = {'Hagberg': ('Aric Hagberg', '[email protected]'),\n 'Schult': ('Dan Schult', '[email protected]'),\n 'Swart': ('Pieter Swart', '[email protected]')\n }\nmaintainer = \"NetworkX Developers\"\nmaintainer_email = \"[email protected]\"\nurl = 'http://networkx.github.io/'\ndownload_url = 'https://pypi.python.org/pypi/networkx/'\nplatforms = ['Linux', 'Mac OSX', 'Windows', 'Unix']\nkeywords = ['Networks', 'Graph Theory', 'Mathematics',\n 'network', 'graph', 'discrete mathematics', 'math']\nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics']\n\ndate, date_info, version, version_info, vcs_info = get_info()\n\nif __name__ == '__main__':\n # Write versionfile for nightly snapshots.\n write_versionfile()\n", "path": "networkx/release.py"}], "after_files": [{"content": "\"\"\"Release data for NetworkX.\n\nWhen NetworkX is imported a number of steps are followed to determine\nthe version information.\n\n 1) If the release is not a development release (dev=False), then version\n information is read from version.py, a file containing statically\n defined version information. This file should exist on every\n downloadable release of NetworkX since setup.py creates it during\n packaging/installation. However, version.py might not exist if one\n is running NetworkX from the mercurial repository. In the event that\n version.py does not exist, then no vcs information will be available.\n\n 2) If the release is a development release, then version information\n is read dynamically, when possible. If no dynamic information can be\n read, then an attempt is made to read the information from version.py.\n If version.py does not exist, then no vcs information will be available.\n\nClarification:\n version.py is created only by setup.py\n\nWhen setup.py creates version.py, it does so before packaging/installation.\nSo the created file is included in the source distribution. When a user\ndownloads a tar.gz file and extracts the files, the files will not be in a\nlive version control repository. So when the user runs setup.py to install\nNetworkX, we must make sure write_versionfile() does not overwrite the\nrevision information contained in the version.py that was included in the\ntar.gz file. This is why write_versionfile() includes an early escape.\n\n\"\"\"\n\n# Copyright (C) 2004-2017 by\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport time\nimport datetime\n\nbasedir = os.path.abspath(os.path.split(__file__)[0])\n\n\ndef write_versionfile():\n \"\"\"Creates a static file containing version information.\"\"\"\n versionfile = os.path.join(basedir, 'version.py')\n\n text = '''\"\"\"\nVersion information for NetworkX, created during installation.\n\nDo not add this file to the repository.\n\n\"\"\"\n\nimport datetime\n\nversion = %(version)r\ndate = %(date)r\n\n# Was NetworkX built from a development version? If so, remember that the major\n# and minor versions reference the \"target\" (rather than \"current\") release.\ndev = %(dev)r\n\n# Format: (name, major, min, revision)\nversion_info = %(version_info)r\n\n# Format: a 'datetime.datetime' instance\ndate_info = %(date_info)r\n\n# Format: (vcs, vcs_tuple)\nvcs_info = %(vcs_info)r\n\n'''\n\n # Try to update all information\n date, date_info, version, version_info, vcs_info = get_info(dynamic=True)\n\n def writefile():\n fh = open(versionfile, 'w')\n subs = {\n 'dev': dev,\n 'version': version,\n 'version_info': version_info,\n 'date': date,\n 'date_info': date_info,\n 'vcs_info': vcs_info\n }\n fh.write(text % subs)\n fh.close()\n\n if vcs_info[0] == 'mercurial':\n # Then, we want to update version.py.\n writefile()\n else:\n if os.path.isfile(versionfile):\n # This is *good*, and the most likely place users will be when\n # running setup.py. We do not want to overwrite version.py.\n # Grab the version so that setup can use it.\n #sys.path.insert(0, basedir)\n from version import version\n #del sys.path[0]\n else:\n # This is *bad*. It means the user might have a tarball that\n # does not include version.py. Let this error raise so we can\n # fix the tarball.\n ##raise Exception('version.py not found!')\n\n # We no longer require that prepared tarballs include a version.py\n # So we use the possibly trunctated value from get_info()\n # Then we write a new file.\n writefile()\n\n return version\n\n\ndef get_revision():\n \"\"\"Returns revision and vcs information, dynamically obtained.\"\"\"\n vcs, revision, tag = None, None, None\n\n gitdir = os.path.join(basedir, '..', '.git')\n\n if os.path.isdir(gitdir):\n vcs = 'git'\n # For now, we are not bothering with revision and tag.\n\n vcs_info = (vcs, (revision, tag))\n\n return revision, vcs_info\n\n\ndef get_info(dynamic=True):\n # Date information\n date_info = datetime.datetime.now()\n date = time.asctime(date_info.timetuple())\n\n revision, version, version_info, vcs_info = None, None, None, None\n\n import_failed = False\n dynamic_failed = False\n\n if dynamic:\n revision, vcs_info = get_revision()\n if revision is None:\n dynamic_failed = True\n\n if dynamic_failed or not dynamic:\n # This is where most final releases of NetworkX will be.\n # All info should come from version.py. If it does not exist, then\n # no vcs information will be provided.\n #sys.path.insert(0, basedir)\n try:\n from version import date, date_info, version, version_info, vcs_info\n except ImportError:\n import_failed = True\n vcs_info = (None, (None, None))\n else:\n revision = vcs_info[1][0]\n #del sys.path[0]\n\n if import_failed or (dynamic and not dynamic_failed):\n # We are here if:\n # we failed to determine static versioning info, or\n # we successfully obtained dynamic revision info\n version = ''.join([str(major), '.', str(minor)])\n if dev:\n version += '.dev_' + date_info.strftime(\"%Y%m%d%H%M%S\")\n version_info = (name, major, minor, revision)\n\n return date, date_info, version, version_info, vcs_info\n\n\n# Version information\nname = 'networkx'\nmajor = \"2\"\nminor = \"0\"\n\n\n# Declare current release as a development release.\n# Change to False before tagging a release; then change back.\ndev = True\n\n\ndescription = \"Python package for creating and manipulating graphs and networks\"\n\nlong_description = \\\n \"\"\"\nNetworkX is a Python package for the creation, manipulation, and\nstudy of the structure, dynamics, and functions of complex networks.\n\n\"\"\"\nlicense = 'BSD'\nauthors = {'Hagberg': ('Aric Hagberg', '[email protected]'),\n 'Schult': ('Dan Schult', '[email protected]'),\n 'Swart': ('Pieter Swart', '[email protected]')\n }\nmaintainer = \"NetworkX Developers\"\nmaintainer_email = \"[email protected]\"\nurl = 'http://networkx.github.io/'\ndownload_url = 'https://pypi.python.org/pypi/networkx/'\nplatforms = ['Linux', 'Mac OSX', 'Windows', 'Unix']\nkeywords = ['Networks', 'Graph Theory', 'Mathematics',\n 'network', 'graph', 'discrete mathematics', 'math']\nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics']\n\ndate, date_info, version, version_info, vcs_info = get_info()\n\nif __name__ == '__main__':\n # Write versionfile for nightly snapshots.\n write_versionfile()\n", "path": "networkx/release.py"}]} | 2,841 | 353 |
gh_patches_debug_37642 | rasdani/github-patches | git_diff | learningequality__kolibri-12059 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature Request: Add --manifest-only option to exportcontent
My understanding is that 0.16 will generate a channel manifest during
`kolibri manage exportcontent`
My request is that you add an option that will not do the export of content but only generate the manifest. This manifest could then be used on another remote install to import from network the same set of content.
```[tasklist]
### Tasks
- [ ] Add --manifest-only command line option to the exportcontent management command
- [ ] If this option is selected, generate the manifest, but skip copying any files (channel database files, and content files)
- [ ] Write tests to confirm the --manifest-only behaviour
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/content/management/commands/exportcontent.py`
Content:
```
1 import logging
2 import os
3
4 from django.core.management.base import CommandError
5
6 from ...utils import paths
7 from kolibri.core.content.errors import InvalidStorageFilenameError
8 from kolibri.core.content.models import ChannelMetadata
9 from kolibri.core.content.utils.content_manifest import ContentManifest
10 from kolibri.core.content.utils.import_export_content import get_content_nodes_data
11 from kolibri.core.content.utils.import_export_content import get_import_export_nodes
12 from kolibri.core.content.utils.paths import get_content_file_name
13 from kolibri.core.tasks.management.commands.base import AsyncCommand
14 from kolibri.core.tasks.utils import get_current_job
15 from kolibri.utils import file_transfer as transfer
16
17 logger = logging.getLogger(__name__)
18
19
20 class Command(AsyncCommand):
21 exported_size = 0
22 total_resources = 0
23
24 def add_arguments(self, parser):
25 node_ids_help_text = """
26 Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.
27 Make sure to call this near the end of the argument list.
28
29 e.g.
30
31 kolibri manage importcontent network <channel id> --node_ids <id1>,<id2>, [<ids>,...]
32 """
33 parser.add_argument(
34 "--node_ids",
35 "-n",
36 # Split the comma separated string we get, into a list of strings
37 type=lambda x: x.split(",") if x else [],
38 default=None,
39 required=False,
40 dest="node_ids",
41 help=node_ids_help_text,
42 )
43
44 exclude_node_ids_help_text = """
45 Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.
46 Make sure to call this near the end of the argument list.
47
48 e.g.
49
50 kolibri manage importcontent network <channel id> --exclude_node_ids <id1>,<id2>, [<ids>,...]
51 """
52 parser.add_argument(
53 "--exclude_node_ids",
54 type=lambda x: x.split(",") if x else [],
55 default=None,
56 required=False,
57 dest="exclude_node_ids",
58 help=exclude_node_ids_help_text,
59 )
60
61 parser.add_argument("channel_id", type=str)
62 parser.add_argument("destination", type=str)
63
64 def update_job_metadata(self, total_bytes_to_transfer, total_resource_count):
65 job = get_current_job()
66 if job:
67 job.extra_metadata["file_size"] = total_bytes_to_transfer
68 job.extra_metadata["total_resources"] = total_resource_count
69 job.save_meta()
70
71 def handle_async(self, *args, **options):
72 if paths.using_remote_storage():
73 raise CommandError("Cannot export files when using remote file storage")
74 channel_id = options["channel_id"]
75 data_dir = os.path.realpath(options["destination"])
76 node_ids = options["node_ids"]
77 exclude_node_ids = options["exclude_node_ids"]
78 logger.info(
79 "Exporting content for channel id {} to {}".format(channel_id, data_dir)
80 )
81
82 channel_metadata = ChannelMetadata.objects.get(id=channel_id)
83
84 nodes_queries_list = get_import_export_nodes(
85 channel_id, node_ids, exclude_node_ids, available=True
86 )
87
88 (total_resource_count, files, total_bytes_to_transfer) = get_content_nodes_data(
89 channel_id, nodes_queries_list, available=True
90 )
91
92 self.update_job_metadata(total_bytes_to_transfer, total_resource_count)
93
94 exported_files = []
95
96 with self.start_progress(
97 total=total_bytes_to_transfer
98 ) as overall_progress_update:
99 for f in files:
100
101 if self.is_cancelled():
102 break
103
104 dest = self.export_file(f, data_dir, overall_progress_update)
105 if dest:
106 exported_files.append(dest)
107
108 # Reraise any cancellation
109 self.check_for_cancel()
110
111 logger.info(
112 "Exporting manifest for channel id {} to {}".format(channel_id, data_dir)
113 )
114
115 manifest_path = os.path.join(data_dir, "content", "manifest.json")
116 content_manifest = ContentManifest()
117 content_manifest.read(manifest_path)
118 content_manifest.add_content_nodes(
119 channel_id, channel_metadata.version, nodes_queries_list
120 )
121 content_manifest.write(manifest_path)
122
123 def export_file(self, f, data_dir, overall_progress_update):
124 filename = get_content_file_name(f)
125 try:
126 srcpath = paths.get_content_storage_file_path(filename)
127 dest = paths.get_content_storage_file_path(filename, datafolder=data_dir)
128 except InvalidStorageFilenameError:
129 # If any files have an invalid storage file name, don't export them.
130 overall_progress_update(f["file_size"])
131 return
132
133 # if the file already exists, add its size to our overall progress, and skip
134 if os.path.isfile(dest) and os.path.getsize(dest) == f["file_size"]:
135 overall_progress_update(f["file_size"])
136 return
137 copy = transfer.FileCopy(srcpath, dest, cancel_check=self.is_cancelled)
138 with copy, self.start_progress(
139 total=copy.transfer_size
140 ) as file_cp_progress_update:
141
142 def progress_update(length):
143 self.exported_size = self.exported_size + length
144 overall_progress_update(length)
145 file_cp_progress_update(length)
146
147 try:
148 copy.run(progress_update=progress_update)
149 except transfer.TransferCanceled:
150 job = get_current_job()
151 if job:
152 job.extra_metadata["file_size"] = self.exported_size
153 job.extra_metadata["total_resources"] = 0
154 job.save_meta()
155 return
156 return dest
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/content/management/commands/exportcontent.py b/kolibri/core/content/management/commands/exportcontent.py
--- a/kolibri/core/content/management/commands/exportcontent.py
+++ b/kolibri/core/content/management/commands/exportcontent.py
@@ -60,6 +60,12 @@
parser.add_argument("channel_id", type=str)
parser.add_argument("destination", type=str)
+ parser.add_argument(
+ "--manifest-only",
+ action="store_true",
+ default=False,
+ help="Generate only the manifest.json file",
+ )
def update_job_metadata(self, total_bytes_to_transfer, total_resource_count):
job = get_current_job()
@@ -75,9 +81,6 @@
data_dir = os.path.realpath(options["destination"])
node_ids = options["node_ids"]
exclude_node_ids = options["exclude_node_ids"]
- logger.info(
- "Exporting content for channel id {} to {}".format(channel_id, data_dir)
- )
channel_metadata = ChannelMetadata.objects.get(id=channel_id)
@@ -91,19 +94,11 @@
self.update_job_metadata(total_bytes_to_transfer, total_resource_count)
- exported_files = []
-
- with self.start_progress(
- total=total_bytes_to_transfer
- ) as overall_progress_update:
- for f in files:
-
- if self.is_cancelled():
- break
-
- dest = self.export_file(f, data_dir, overall_progress_update)
- if dest:
- exported_files.append(dest)
+ # dont copy files if we are only exporting the manifest
+ if not options["manifest_only"]:
+ self.copy_content_files(
+ channel_id, data_dir, files, total_bytes_to_transfer
+ )
# Reraise any cancellation
self.check_for_cancel()
@@ -120,6 +115,18 @@
)
content_manifest.write(manifest_path)
+ def copy_content_files(self, channel_id, data_dir, files, total_bytes_to_transfer):
+ logger.info(
+ "Exporting content for channel id {} to {}".format(channel_id, data_dir)
+ )
+ with self.start_progress(
+ total=total_bytes_to_transfer
+ ) as overall_progress_update:
+ for f in files:
+ if self.is_cancelled():
+ break
+ self.export_file(f, data_dir, overall_progress_update)
+
def export_file(self, f, data_dir, overall_progress_update):
filename = get_content_file_name(f)
try:
| {"golden_diff": "diff --git a/kolibri/core/content/management/commands/exportcontent.py b/kolibri/core/content/management/commands/exportcontent.py\n--- a/kolibri/core/content/management/commands/exportcontent.py\n+++ b/kolibri/core/content/management/commands/exportcontent.py\n@@ -60,6 +60,12 @@\n \n parser.add_argument(\"channel_id\", type=str)\n parser.add_argument(\"destination\", type=str)\n+ parser.add_argument(\n+ \"--manifest-only\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Generate only the manifest.json file\",\n+ )\n \n def update_job_metadata(self, total_bytes_to_transfer, total_resource_count):\n job = get_current_job()\n@@ -75,9 +81,6 @@\n data_dir = os.path.realpath(options[\"destination\"])\n node_ids = options[\"node_ids\"]\n exclude_node_ids = options[\"exclude_node_ids\"]\n- logger.info(\n- \"Exporting content for channel id {} to {}\".format(channel_id, data_dir)\n- )\n \n channel_metadata = ChannelMetadata.objects.get(id=channel_id)\n \n@@ -91,19 +94,11 @@\n \n self.update_job_metadata(total_bytes_to_transfer, total_resource_count)\n \n- exported_files = []\n-\n- with self.start_progress(\n- total=total_bytes_to_transfer\n- ) as overall_progress_update:\n- for f in files:\n-\n- if self.is_cancelled():\n- break\n-\n- dest = self.export_file(f, data_dir, overall_progress_update)\n- if dest:\n- exported_files.append(dest)\n+ # dont copy files if we are only exporting the manifest\n+ if not options[\"manifest_only\"]:\n+ self.copy_content_files(\n+ channel_id, data_dir, files, total_bytes_to_transfer\n+ )\n \n # Reraise any cancellation\n self.check_for_cancel()\n@@ -120,6 +115,18 @@\n )\n content_manifest.write(manifest_path)\n \n+ def copy_content_files(self, channel_id, data_dir, files, total_bytes_to_transfer):\n+ logger.info(\n+ \"Exporting content for channel id {} to {}\".format(channel_id, data_dir)\n+ )\n+ with self.start_progress(\n+ total=total_bytes_to_transfer\n+ ) as overall_progress_update:\n+ for f in files:\n+ if self.is_cancelled():\n+ break\n+ self.export_file(f, data_dir, overall_progress_update)\n+\n def export_file(self, f, data_dir, overall_progress_update):\n filename = get_content_file_name(f)\n try:\n", "issue": "Feature Request: Add --manifest-only option to exportcontent\nMy understanding is that 0.16 will generate a channel manifest during \r\n\r\n`kolibri manage exportcontent`\r\n\r\nMy request is that you add an option that will not do the export of content but only generate the manifest. This manifest could then be used on another remote install to import from network the same set of content.\r\n\r\n\r\n```[tasklist]\r\n### Tasks\r\n- [ ] Add --manifest-only command line option to the exportcontent management command\r\n- [ ] If this option is selected, generate the manifest, but skip copying any files (channel database files, and content files)\r\n- [ ] Write tests to confirm the --manifest-only behaviour\r\n```\r\n\n", "before_files": [{"content": "import logging\nimport os\n\nfrom django.core.management.base import CommandError\n\nfrom ...utils import paths\nfrom kolibri.core.content.errors import InvalidStorageFilenameError\nfrom kolibri.core.content.models import ChannelMetadata\nfrom kolibri.core.content.utils.content_manifest import ContentManifest\nfrom kolibri.core.content.utils.import_export_content import get_content_nodes_data\nfrom kolibri.core.content.utils.import_export_content import get_import_export_nodes\nfrom kolibri.core.content.utils.paths import get_content_file_name\nfrom kolibri.core.tasks.management.commands.base import AsyncCommand\nfrom kolibri.core.tasks.utils import get_current_job\nfrom kolibri.utils import file_transfer as transfer\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n exported_size = 0\n total_resources = 0\n\n def add_arguments(self, parser):\n node_ids_help_text = \"\"\"\n Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.\n Make sure to call this near the end of the argument list.\n\n e.g.\n\n kolibri manage importcontent network <channel id> --node_ids <id1>,<id2>, [<ids>,...]\n \"\"\"\n parser.add_argument(\n \"--node_ids\",\n \"-n\",\n # Split the comma separated string we get, into a list of strings\n type=lambda x: x.split(\",\") if x else [],\n default=None,\n required=False,\n dest=\"node_ids\",\n help=node_ids_help_text,\n )\n\n exclude_node_ids_help_text = \"\"\"\n Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.\n Make sure to call this near the end of the argument list.\n\n e.g.\n\n kolibri manage importcontent network <channel id> --exclude_node_ids <id1>,<id2>, [<ids>,...]\n \"\"\"\n parser.add_argument(\n \"--exclude_node_ids\",\n type=lambda x: x.split(\",\") if x else [],\n default=None,\n required=False,\n dest=\"exclude_node_ids\",\n help=exclude_node_ids_help_text,\n )\n\n parser.add_argument(\"channel_id\", type=str)\n parser.add_argument(\"destination\", type=str)\n\n def update_job_metadata(self, total_bytes_to_transfer, total_resource_count):\n job = get_current_job()\n if job:\n job.extra_metadata[\"file_size\"] = total_bytes_to_transfer\n job.extra_metadata[\"total_resources\"] = total_resource_count\n job.save_meta()\n\n def handle_async(self, *args, **options):\n if paths.using_remote_storage():\n raise CommandError(\"Cannot export files when using remote file storage\")\n channel_id = options[\"channel_id\"]\n data_dir = os.path.realpath(options[\"destination\"])\n node_ids = options[\"node_ids\"]\n exclude_node_ids = options[\"exclude_node_ids\"]\n logger.info(\n \"Exporting content for channel id {} to {}\".format(channel_id, data_dir)\n )\n\n channel_metadata = ChannelMetadata.objects.get(id=channel_id)\n\n nodes_queries_list = get_import_export_nodes(\n channel_id, node_ids, exclude_node_ids, available=True\n )\n\n (total_resource_count, files, total_bytes_to_transfer) = get_content_nodes_data(\n channel_id, nodes_queries_list, available=True\n )\n\n self.update_job_metadata(total_bytes_to_transfer, total_resource_count)\n\n exported_files = []\n\n with self.start_progress(\n total=total_bytes_to_transfer\n ) as overall_progress_update:\n for f in files:\n\n if self.is_cancelled():\n break\n\n dest = self.export_file(f, data_dir, overall_progress_update)\n if dest:\n exported_files.append(dest)\n\n # Reraise any cancellation\n self.check_for_cancel()\n\n logger.info(\n \"Exporting manifest for channel id {} to {}\".format(channel_id, data_dir)\n )\n\n manifest_path = os.path.join(data_dir, \"content\", \"manifest.json\")\n content_manifest = ContentManifest()\n content_manifest.read(manifest_path)\n content_manifest.add_content_nodes(\n channel_id, channel_metadata.version, nodes_queries_list\n )\n content_manifest.write(manifest_path)\n\n def export_file(self, f, data_dir, overall_progress_update):\n filename = get_content_file_name(f)\n try:\n srcpath = paths.get_content_storage_file_path(filename)\n dest = paths.get_content_storage_file_path(filename, datafolder=data_dir)\n except InvalidStorageFilenameError:\n # If any files have an invalid storage file name, don't export them.\n overall_progress_update(f[\"file_size\"])\n return\n\n # if the file already exists, add its size to our overall progress, and skip\n if os.path.isfile(dest) and os.path.getsize(dest) == f[\"file_size\"]:\n overall_progress_update(f[\"file_size\"])\n return\n copy = transfer.FileCopy(srcpath, dest, cancel_check=self.is_cancelled)\n with copy, self.start_progress(\n total=copy.transfer_size\n ) as file_cp_progress_update:\n\n def progress_update(length):\n self.exported_size = self.exported_size + length\n overall_progress_update(length)\n file_cp_progress_update(length)\n\n try:\n copy.run(progress_update=progress_update)\n except transfer.TransferCanceled:\n job = get_current_job()\n if job:\n job.extra_metadata[\"file_size\"] = self.exported_size\n job.extra_metadata[\"total_resources\"] = 0\n job.save_meta()\n return\n return dest\n", "path": "kolibri/core/content/management/commands/exportcontent.py"}], "after_files": [{"content": "import logging\nimport os\n\nfrom django.core.management.base import CommandError\n\nfrom ...utils import paths\nfrom kolibri.core.content.errors import InvalidStorageFilenameError\nfrom kolibri.core.content.models import ChannelMetadata\nfrom kolibri.core.content.utils.content_manifest import ContentManifest\nfrom kolibri.core.content.utils.import_export_content import get_content_nodes_data\nfrom kolibri.core.content.utils.import_export_content import get_import_export_nodes\nfrom kolibri.core.content.utils.paths import get_content_file_name\nfrom kolibri.core.tasks.management.commands.base import AsyncCommand\nfrom kolibri.core.tasks.utils import get_current_job\nfrom kolibri.utils import file_transfer as transfer\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n exported_size = 0\n total_resources = 0\n\n def add_arguments(self, parser):\n node_ids_help_text = \"\"\"\n Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.\n Make sure to call this near the end of the argument list.\n\n e.g.\n\n kolibri manage importcontent network <channel id> --node_ids <id1>,<id2>, [<ids>,...]\n \"\"\"\n parser.add_argument(\n \"--node_ids\",\n \"-n\",\n # Split the comma separated string we get, into a list of strings\n type=lambda x: x.split(\",\") if x else [],\n default=None,\n required=False,\n dest=\"node_ids\",\n help=node_ids_help_text,\n )\n\n exclude_node_ids_help_text = \"\"\"\n Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.\n Make sure to call this near the end of the argument list.\n\n e.g.\n\n kolibri manage importcontent network <channel id> --exclude_node_ids <id1>,<id2>, [<ids>,...]\n \"\"\"\n parser.add_argument(\n \"--exclude_node_ids\",\n type=lambda x: x.split(\",\") if x else [],\n default=None,\n required=False,\n dest=\"exclude_node_ids\",\n help=exclude_node_ids_help_text,\n )\n\n parser.add_argument(\"channel_id\", type=str)\n parser.add_argument(\"destination\", type=str)\n parser.add_argument(\n \"--manifest-only\",\n action=\"store_true\",\n default=False,\n help=\"Generate only the manifest.json file\",\n )\n\n def update_job_metadata(self, total_bytes_to_transfer, total_resource_count):\n job = get_current_job()\n if job:\n job.extra_metadata[\"file_size\"] = total_bytes_to_transfer\n job.extra_metadata[\"total_resources\"] = total_resource_count\n job.save_meta()\n\n def handle_async(self, *args, **options):\n if paths.using_remote_storage():\n raise CommandError(\"Cannot export files when using remote file storage\")\n channel_id = options[\"channel_id\"]\n data_dir = os.path.realpath(options[\"destination\"])\n node_ids = options[\"node_ids\"]\n exclude_node_ids = options[\"exclude_node_ids\"]\n\n channel_metadata = ChannelMetadata.objects.get(id=channel_id)\n\n nodes_queries_list = get_import_export_nodes(\n channel_id, node_ids, exclude_node_ids, available=True\n )\n\n (total_resource_count, files, total_bytes_to_transfer) = get_content_nodes_data(\n channel_id, nodes_queries_list, available=True\n )\n\n self.update_job_metadata(total_bytes_to_transfer, total_resource_count)\n\n # dont copy files if we are only exporting the manifest\n if not options[\"manifest_only\"]:\n self.copy_content_files(\n channel_id, data_dir, files, total_bytes_to_transfer\n )\n\n # Reraise any cancellation\n self.check_for_cancel()\n\n logger.info(\n \"Exporting manifest for channel id {} to {}\".format(channel_id, data_dir)\n )\n\n manifest_path = os.path.join(data_dir, \"content\", \"manifest.json\")\n content_manifest = ContentManifest()\n content_manifest.read(manifest_path)\n content_manifest.add_content_nodes(\n channel_id, channel_metadata.version, nodes_queries_list\n )\n content_manifest.write(manifest_path)\n\n def copy_content_files(self, channel_id, data_dir, files, total_bytes_to_transfer):\n logger.info(\n \"Exporting content for channel id {} to {}\".format(channel_id, data_dir)\n )\n with self.start_progress(\n total=total_bytes_to_transfer\n ) as overall_progress_update:\n for f in files:\n if self.is_cancelled():\n break\n self.export_file(f, data_dir, overall_progress_update)\n\n def export_file(self, f, data_dir, overall_progress_update):\n filename = get_content_file_name(f)\n try:\n srcpath = paths.get_content_storage_file_path(filename)\n dest = paths.get_content_storage_file_path(filename, datafolder=data_dir)\n except InvalidStorageFilenameError:\n # If any files have an invalid storage file name, don't export them.\n overall_progress_update(f[\"file_size\"])\n return\n\n # if the file already exists, add its size to our overall progress, and skip\n if os.path.isfile(dest) and os.path.getsize(dest) == f[\"file_size\"]:\n overall_progress_update(f[\"file_size\"])\n return\n copy = transfer.FileCopy(srcpath, dest, cancel_check=self.is_cancelled)\n with copy, self.start_progress(\n total=copy.transfer_size\n ) as file_cp_progress_update:\n\n def progress_update(length):\n self.exported_size = self.exported_size + length\n overall_progress_update(length)\n file_cp_progress_update(length)\n\n try:\n copy.run(progress_update=progress_update)\n except transfer.TransferCanceled:\n job = get_current_job()\n if job:\n job.extra_metadata[\"file_size\"] = self.exported_size\n job.extra_metadata[\"total_resources\"] = 0\n job.save_meta()\n return\n return dest\n", "path": "kolibri/core/content/management/commands/exportcontent.py"}]} | 1,951 | 574 |
gh_patches_debug_38180 | rasdani/github-patches | git_diff | streamlink__streamlink-5147 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.nimotv: live stream stops after couple of seconds
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
try any live streams of nimo.tv, the live stream will stop in couple of seconds.
### Debug log
```text
c:\temp>streamlink --loglevel debug https://www.nimo.tv/live/97747608 best
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.7.8
[cli][debug] Streamlink: 5.1.2
[cli][debug] Dependencies:
[cli][debug] certifi: 2018.11.29
[cli][debug] isodate: 0.6.0
[cli][debug] lxml: 4.6.4
[cli][debug] pycountry: 20.7.3
[cli][debug] pycryptodome: 3.7.3
[cli][debug] PySocks: 1.6.8
[cli][debug] requests: 2.26.0
[cli][debug] urllib3: 1.26.12
[cli][debug] websocket-client: 1.2.1
[cli][debug] importlib-metadata: 3.10.0
[cli][debug] Arguments:
[cli][debug] url=https://www.nimo.tv/live/97747608
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin nimotv for URL https://www.nimo.tv/live/97747608
[plugins.nimotv][debug] URL=http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8
[cli][info] Available streams: 240p (worst), 360p, 480p, 720p, 1080p (best)
[cli][info] Opening stream: 1080p (hls)
[cli][info] Starting player: "C:\Program Files\VideoLAN\VLC\vlc.exe"
[stream.hls][debug] Reloading playlist
[cli][debug] Pre-buffering 8192 bytes
[stream.hls][debug] First Sequence: 1674443953; Last Sequence: 1674443955
[stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 1674443953; End Sequence: None
[stream.hls][debug] Adding segment 1674443953 to queue
[stream.hls][debug] Adding segment 1674443954 to queue
[stream.hls][debug] Adding segment 1674443955 to queue
[stream.hls][debug] Writing segment 1674443953 to output
[stream.hls][debug] Segment 1674443953 complete
[cli.output][debug] Opening subprocess: "C:\Program Files\VideoLAN\VLC\vlc.exe" --input-title-format https://www.nimo.tv/live/97747608 -
[cli][debug] Writing stream to output
[stream.hls][debug] Writing segment 1674443954 to output
[stream.hls][debug] Segment 1674443954 complete
[stream.hls][debug] Writing segment 1674443955 to output
[stream.hls][debug] Segment 1674443955 complete
[stream.hls][debug] Reloading playlist
[stream.hls][warning] Failed to reload playlist: Unable to open URL: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000 (403 Client Error: Forbidden for url: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000&appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000)
[stream.hls][debug] Reloading playlist
[stream.hls][warning] Failed to reload playlist: Unable to open URL: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000 (403 Client Error: Forbidden for url: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000&appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000)
[stream.hls][debug] Reloading playlist
[stream.hls][warning] Failed to reload playlist: Unable to open URL: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000 (403 Client Error: Forbidden for url: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000&appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/nimotv.py`
Content:
```
1 """
2 $description Chinese, global live-streaming platform run by Huya Live.
3 $url nimo.tv
4 $type live
5 """
6
7 import logging
8 import re
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import useragents, validate
12 from streamlink.stream.hls import HLSStream
13
14 log = logging.getLogger(__name__)
15
16
17 @pluginmatcher(re.compile(
18 r'https?://(?:www\.|m\.)?nimo\.tv/(?P<username>.*)'
19 ))
20 class NimoTV(Plugin):
21 data_url = 'https://m.nimo.tv/{0}'
22
23 video_qualities = {
24 250: '240p',
25 500: '360p',
26 1000: '480p',
27 2500: '720p',
28 6000: '1080p',
29 }
30
31 _re_appid = re.compile(br'appid=(\d+)')
32 _re_domain = re.compile(br'(https?:\/\/[A-Za-z]{2,3}.hls[A-Za-z\.\/]+)(?:V|&)')
33 _re_id = re.compile(br'id=([^|\\]+)')
34 _re_tp = re.compile(br'tp=(\d+)')
35
36 def _get_streams(self):
37 username = self.match.group('username')
38 if not username:
39 return
40
41 data = self.session.http.get(
42 self.data_url.format(username),
43 headers={
44 "User-Agent": useragents.ANDROID,
45 },
46 schema=validate.Schema(
47 re.compile(r"<script>var G_roomBaseInfo = ({.*?});</script>"),
48 validate.none_or_all(
49 validate.get(1),
50 validate.parse_json(),
51 {
52 "title": str,
53 "nickname": str,
54 "game": str,
55 "liveStreamStatus": int,
56 validate.optional("mStreamPkg"): str,
57 },
58 ),
59 ),
60 )
61
62 if data['liveStreamStatus'] == 0:
63 log.info('This stream is currently offline')
64 return
65
66 mStreamPkg = data.get('mStreamPkg')
67 if not mStreamPkg:
68 log.debug('missing mStreamPkg')
69 return
70
71 mStreamPkg = bytes.fromhex(mStreamPkg)
72 try:
73 _appid = self._re_appid.search(mStreamPkg).group(1).decode('utf-8')
74 _domain = self._re_domain.search(mStreamPkg).group(1).decode('utf-8')
75 _id = self._re_id.search(mStreamPkg).group(1).decode('utf-8')
76 _tp = self._re_tp.search(mStreamPkg).group(1).decode('utf-8')
77 except AttributeError:
78 log.error('invalid mStreamPkg')
79 return
80
81 params = {
82 'appid': _appid,
83 'id': _id,
84 'tp': _tp,
85 'u': '0',
86 't': '100',
87 'needwm': 1,
88 }
89 url = f'{_domain}{_id}.m3u8'
90 log.debug(f'URL={url}')
91 for k, v in self.video_qualities.items():
92 _params = params.copy()
93 _params["ratio"] = k
94 if v == '1080p':
95 _params["needwm"] = 0
96 elif v in ('720p', '480p', '360p'):
97 _params["sphd"] = 1
98
99 log.trace(f'{v} params={_params!r}')
100 # some qualities might not exist, but it will select a different lower quality
101 yield v, HLSStream(self.session, url, params=_params)
102
103 self.author = data['nickname']
104 self.category = data['game']
105 self.title = data['title']
106
107
108 __plugin__ = NimoTV
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/nimotv.py b/src/streamlink/plugins/nimotv.py
--- a/src/streamlink/plugins/nimotv.py
+++ b/src/streamlink/plugins/nimotv.py
@@ -9,7 +9,7 @@
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import useragents, validate
-from streamlink.stream.hls import HLSStream
+from streamlink.stream.http import HTTPStream
log = logging.getLogger(__name__)
@@ -32,6 +32,8 @@
_re_domain = re.compile(br'(https?:\/\/[A-Za-z]{2,3}.hls[A-Za-z\.\/]+)(?:V|&)')
_re_id = re.compile(br'id=([^|\\]+)')
_re_tp = re.compile(br'tp=(\d+)')
+ _re_wsSecret = re.compile(br'wsSecret=(\w+)')
+ _re_wsTime = re.compile(br'wsTime=(\w+)')
def _get_streams(self):
username = self.match.group('username')
@@ -74,6 +76,8 @@
_domain = self._re_domain.search(mStreamPkg).group(1).decode('utf-8')
_id = self._re_id.search(mStreamPkg).group(1).decode('utf-8')
_tp = self._re_tp.search(mStreamPkg).group(1).decode('utf-8')
+ _wsSecret = self._re_wsSecret.search(mStreamPkg).group(1).decode('utf-8')
+ _wsTime = self._re_wsTime.search(mStreamPkg).group(1).decode('utf-8')
except AttributeError:
log.error('invalid mStreamPkg')
return
@@ -82,11 +86,14 @@
'appid': _appid,
'id': _id,
'tp': _tp,
+ 'wsSecret': _wsSecret,
+ 'wsTime': _wsTime,
'u': '0',
't': '100',
'needwm': 1,
}
- url = f'{_domain}{_id}.m3u8'
+ url = f'{_domain}{_id}.flv'
+ url = url.replace('hls.nimo.tv', 'flv.nimo.tv')
log.debug(f'URL={url}')
for k, v in self.video_qualities.items():
_params = params.copy()
@@ -98,7 +105,7 @@
log.trace(f'{v} params={_params!r}')
# some qualities might not exist, but it will select a different lower quality
- yield v, HLSStream(self.session, url, params=_params)
+ yield v, HTTPStream(self.session, url, params=_params)
self.author = data['nickname']
self.category = data['game']
| {"golden_diff": "diff --git a/src/streamlink/plugins/nimotv.py b/src/streamlink/plugins/nimotv.py\n--- a/src/streamlink/plugins/nimotv.py\n+++ b/src/streamlink/plugins/nimotv.py\n@@ -9,7 +9,7 @@\n \n from streamlink.plugin import Plugin, pluginmatcher\n from streamlink.plugin.api import useragents, validate\n-from streamlink.stream.hls import HLSStream\n+from streamlink.stream.http import HTTPStream\n \n log = logging.getLogger(__name__)\n \n@@ -32,6 +32,8 @@\n _re_domain = re.compile(br'(https?:\\/\\/[A-Za-z]{2,3}.hls[A-Za-z\\.\\/]+)(?:V|&)')\n _re_id = re.compile(br'id=([^|\\\\]+)')\n _re_tp = re.compile(br'tp=(\\d+)')\n+ _re_wsSecret = re.compile(br'wsSecret=(\\w+)')\n+ _re_wsTime = re.compile(br'wsTime=(\\w+)')\n \n def _get_streams(self):\n username = self.match.group('username')\n@@ -74,6 +76,8 @@\n _domain = self._re_domain.search(mStreamPkg).group(1).decode('utf-8')\n _id = self._re_id.search(mStreamPkg).group(1).decode('utf-8')\n _tp = self._re_tp.search(mStreamPkg).group(1).decode('utf-8')\n+ _wsSecret = self._re_wsSecret.search(mStreamPkg).group(1).decode('utf-8')\n+ _wsTime = self._re_wsTime.search(mStreamPkg).group(1).decode('utf-8')\n except AttributeError:\n log.error('invalid mStreamPkg')\n return\n@@ -82,11 +86,14 @@\n 'appid': _appid,\n 'id': _id,\n 'tp': _tp,\n+ 'wsSecret': _wsSecret,\n+ 'wsTime': _wsTime,\n 'u': '0',\n 't': '100',\n 'needwm': 1,\n }\n- url = f'{_domain}{_id}.m3u8'\n+ url = f'{_domain}{_id}.flv'\n+ url = url.replace('hls.nimo.tv', 'flv.nimo.tv')\n log.debug(f'URL={url}')\n for k, v in self.video_qualities.items():\n _params = params.copy()\n@@ -98,7 +105,7 @@\n \n log.trace(f'{v} params={_params!r}')\n # some qualities might not exist, but it will select a different lower quality\n- yield v, HLSStream(self.session, url, params=_params)\n+ yield v, HTTPStream(self.session, url, params=_params)\n \n self.author = data['nickname']\n self.category = data['game']\n", "issue": "plugins.nimotv: live stream stops after couple of seconds\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\ntry any live streams of nimo.tv, the live stream will stop in couple of seconds.\n\n### Debug log\n\n```text\nc:\\temp>streamlink --loglevel debug https://www.nimo.tv/live/97747608 best\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.7.8\r\n[cli][debug] Streamlink: 5.1.2\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2018.11.29\r\n[cli][debug] isodate: 0.6.0\r\n[cli][debug] lxml: 4.6.4\r\n[cli][debug] pycountry: 20.7.3\r\n[cli][debug] pycryptodome: 3.7.3\r\n[cli][debug] PySocks: 1.6.8\r\n[cli][debug] requests: 2.26.0\r\n[cli][debug] urllib3: 1.26.12\r\n[cli][debug] websocket-client: 1.2.1\r\n[cli][debug] importlib-metadata: 3.10.0\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.nimo.tv/live/97747608\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin nimotv for URL https://www.nimo.tv/live/97747608\r\n[plugins.nimotv][debug] URL=http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8\r\n[cli][info] Available streams: 240p (worst), 360p, 480p, 720p, 1080p (best)\r\n[cli][info] Opening stream: 1080p (hls)\r\n[cli][info] Starting player: \"C:\\Program Files\\VideoLAN\\VLC\\vlc.exe\"\r\n[stream.hls][debug] Reloading playlist\r\n[cli][debug] Pre-buffering 8192 bytes\r\n[stream.hls][debug] First Sequence: 1674443953; Last Sequence: 1674443955\r\n[stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 1674443953; End Sequence: None\r\n[stream.hls][debug] Adding segment 1674443953 to queue\r\n[stream.hls][debug] Adding segment 1674443954 to queue\r\n[stream.hls][debug] Adding segment 1674443955 to queue\r\n[stream.hls][debug] Writing segment 1674443953 to output\r\n[stream.hls][debug] Segment 1674443953 complete\r\n[cli.output][debug] Opening subprocess: \"C:\\Program Files\\VideoLAN\\VLC\\vlc.exe\" --input-title-format https://www.nimo.tv/live/97747608 -\r\n[cli][debug] Writing stream to output\r\n[stream.hls][debug] Writing segment 1674443954 to output\r\n[stream.hls][debug] Segment 1674443954 complete\r\n[stream.hls][debug] Writing segment 1674443955 to output\r\n[stream.hls][debug] Segment 1674443955 complete\r\n[stream.hls][debug] Reloading playlist\r\n[stream.hls][warning] Failed to reload playlist: Unable to open URL: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000 (403 Client Error: Forbidden for url: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000&appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000)\r\n[stream.hls][debug] Reloading playlist\r\n[stream.hls][warning] Failed to reload playlist: Unable to open URL: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000 (403 Client Error: Forbidden for url: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000&appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000)\r\n[stream.hls][debug] Reloading playlist\r\n[stream.hls][warning] Failed to reload playlist: Unable to open URL: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000 (403 Client Error: Forbidden for url: http://tx.hls.nimo.tv/live/su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7.m3u8?appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000&appid=81&id=su1629530778249rea7ea30592d8ab7ce78a1a13e3037be7&tp=1674443951824&u=0&t=100&needwm=0&ratio=6000)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Chinese, global live-streaming platform run by Huya Live.\n$url nimo.tv\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r'https?://(?:www\\.|m\\.)?nimo\\.tv/(?P<username>.*)'\n))\nclass NimoTV(Plugin):\n data_url = 'https://m.nimo.tv/{0}'\n\n video_qualities = {\n 250: '240p',\n 500: '360p',\n 1000: '480p',\n 2500: '720p',\n 6000: '1080p',\n }\n\n _re_appid = re.compile(br'appid=(\\d+)')\n _re_domain = re.compile(br'(https?:\\/\\/[A-Za-z]{2,3}.hls[A-Za-z\\.\\/]+)(?:V|&)')\n _re_id = re.compile(br'id=([^|\\\\]+)')\n _re_tp = re.compile(br'tp=(\\d+)')\n\n def _get_streams(self):\n username = self.match.group('username')\n if not username:\n return\n\n data = self.session.http.get(\n self.data_url.format(username),\n headers={\n \"User-Agent\": useragents.ANDROID,\n },\n schema=validate.Schema(\n re.compile(r\"<script>var G_roomBaseInfo = ({.*?});</script>\"),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n {\n \"title\": str,\n \"nickname\": str,\n \"game\": str,\n \"liveStreamStatus\": int,\n validate.optional(\"mStreamPkg\"): str,\n },\n ),\n ),\n )\n\n if data['liveStreamStatus'] == 0:\n log.info('This stream is currently offline')\n return\n\n mStreamPkg = data.get('mStreamPkg')\n if not mStreamPkg:\n log.debug('missing mStreamPkg')\n return\n\n mStreamPkg = bytes.fromhex(mStreamPkg)\n try:\n _appid = self._re_appid.search(mStreamPkg).group(1).decode('utf-8')\n _domain = self._re_domain.search(mStreamPkg).group(1).decode('utf-8')\n _id = self._re_id.search(mStreamPkg).group(1).decode('utf-8')\n _tp = self._re_tp.search(mStreamPkg).group(1).decode('utf-8')\n except AttributeError:\n log.error('invalid mStreamPkg')\n return\n\n params = {\n 'appid': _appid,\n 'id': _id,\n 'tp': _tp,\n 'u': '0',\n 't': '100',\n 'needwm': 1,\n }\n url = f'{_domain}{_id}.m3u8'\n log.debug(f'URL={url}')\n for k, v in self.video_qualities.items():\n _params = params.copy()\n _params[\"ratio\"] = k\n if v == '1080p':\n _params[\"needwm\"] = 0\n elif v in ('720p', '480p', '360p'):\n _params[\"sphd\"] = 1\n\n log.trace(f'{v} params={_params!r}')\n # some qualities might not exist, but it will select a different lower quality\n yield v, HLSStream(self.session, url, params=_params)\n\n self.author = data['nickname']\n self.category = data['game']\n self.title = data['title']\n\n\n__plugin__ = NimoTV\n", "path": "src/streamlink/plugins/nimotv.py"}], "after_files": [{"content": "\"\"\"\n$description Chinese, global live-streaming platform run by Huya Live.\n$url nimo.tv\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream.http import HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r'https?://(?:www\\.|m\\.)?nimo\\.tv/(?P<username>.*)'\n))\nclass NimoTV(Plugin):\n data_url = 'https://m.nimo.tv/{0}'\n\n video_qualities = {\n 250: '240p',\n 500: '360p',\n 1000: '480p',\n 2500: '720p',\n 6000: '1080p',\n }\n\n _re_appid = re.compile(br'appid=(\\d+)')\n _re_domain = re.compile(br'(https?:\\/\\/[A-Za-z]{2,3}.hls[A-Za-z\\.\\/]+)(?:V|&)')\n _re_id = re.compile(br'id=([^|\\\\]+)')\n _re_tp = re.compile(br'tp=(\\d+)')\n _re_wsSecret = re.compile(br'wsSecret=(\\w+)')\n _re_wsTime = re.compile(br'wsTime=(\\w+)')\n\n def _get_streams(self):\n username = self.match.group('username')\n if not username:\n return\n\n data = self.session.http.get(\n self.data_url.format(username),\n headers={\n \"User-Agent\": useragents.ANDROID,\n },\n schema=validate.Schema(\n re.compile(r\"<script>var G_roomBaseInfo = ({.*?});</script>\"),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n {\n \"title\": str,\n \"nickname\": str,\n \"game\": str,\n \"liveStreamStatus\": int,\n validate.optional(\"mStreamPkg\"): str,\n },\n ),\n ),\n )\n\n if data['liveStreamStatus'] == 0:\n log.info('This stream is currently offline')\n return\n\n mStreamPkg = data.get('mStreamPkg')\n if not mStreamPkg:\n log.debug('missing mStreamPkg')\n return\n\n mStreamPkg = bytes.fromhex(mStreamPkg)\n try:\n _appid = self._re_appid.search(mStreamPkg).group(1).decode('utf-8')\n _domain = self._re_domain.search(mStreamPkg).group(1).decode('utf-8')\n _id = self._re_id.search(mStreamPkg).group(1).decode('utf-8')\n _tp = self._re_tp.search(mStreamPkg).group(1).decode('utf-8')\n _wsSecret = self._re_wsSecret.search(mStreamPkg).group(1).decode('utf-8')\n _wsTime = self._re_wsTime.search(mStreamPkg).group(1).decode('utf-8')\n except AttributeError:\n log.error('invalid mStreamPkg')\n return\n\n params = {\n 'appid': _appid,\n 'id': _id,\n 'tp': _tp,\n 'wsSecret': _wsSecret,\n 'wsTime': _wsTime,\n 'u': '0',\n 't': '100',\n 'needwm': 1,\n }\n url = f'{_domain}{_id}.flv'\n url = url.replace('hls.nimo.tv', 'flv.nimo.tv')\n log.debug(f'URL={url}')\n for k, v in self.video_qualities.items():\n _params = params.copy()\n _params[\"ratio\"] = k\n if v == '1080p':\n _params[\"needwm\"] = 0\n elif v in ('720p', '480p', '360p'):\n _params[\"sphd\"] = 1\n\n log.trace(f'{v} params={_params!r}')\n # some qualities might not exist, but it will select a different lower quality\n yield v, HTTPStream(self.session, url, params=_params)\n\n self.author = data['nickname']\n self.category = data['game']\n self.title = data['title']\n\n\n__plugin__ = NimoTV\n", "path": "src/streamlink/plugins/nimotv.py"}]} | 3,596 | 648 |
gh_patches_debug_227 | rasdani/github-patches | git_diff | sktime__sktime-3618 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] ShapeletTransformClassifier numba error when dtype is not float64
**Describe the bug**
Seems that when using `ShapeletTransformClassifier` there is some Numba accelerated functions that break if the data in the input data frame are of type `int32`.
**To Reproduce**
MRE as below:
```python
import warnings
warnings.simplefilter('ignore', category=FutureWarning)
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sktime.classification.shapelet_based import ShapeletTransformClassifier
from sktime.contrib.vector_classifiers._rotation_forest import RotationForest
# make fake data
data = pd.DataFrame(np.random.random((5000, 250))).astype(np.float32)
# reshape to input into Shapelet Classifier
data4train = data.apply(lambda row: pd.Series({
'time-series': pd.Series(row.values)
}), axis=1)
# make targets
targets = pd.Series(2500 * [1] + 2500 * [0])
# train test split
X_train, X_test, y_train, y_test = train_test_split(
data4train, targets, test_size=0.7, random_state=42
)
# train
clf = ShapeletTransformClassifier(
estimator=RotationForest(n_estimators=3),
n_shapelet_samples=500,
max_shapelets=20,
batch_size=100,
)
clf.fit(X_train, y_train)
```
**Expected behavior**
will not throw an error, and also enforce conversion to float32 or float64 within the classifier?
**Additional context**
removing conversion to `float32` (hence `dtype == float64`) will make the code running without issues.
**Versions**
numba 0.55.1
sklearn 0.24.1
sktime 0.11.0
pandas 1.4.2
python 3.8.10
**Stacktrace output**
```bash
TypingError: Failed in nopython mode pipeline (step: nopython frontend)
Cannot unify array(float64, 1d, C) and array(float32, 1d, C) for 'X_n.2', defined at /path_to_mypython/python/lib/python3.8/site-packages/sktime/utils/numba/general.py (39)
File "../python/lib/python3.8/site-packages/sktime/utils/numba/general.py", line 39:
def z_normalise_series(X):
<source elided>
return X_n
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/utils/numba/general.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """General numba utilities."""
3
4 import numpy as np
5 from numba import njit
6
7
8 @njit(fastmath=True, cache=True)
9 def unique_count(X):
10 """Numba unique count function for a 1D array."""
11 if len(X) > 0:
12 X = np.sort(X)
13 unique = np.zeros(len(X))
14 unique[0] = X[0]
15 counts = np.zeros(len(X), dtype=np.int_)
16 counts[0] = 1
17 unique_count = 0
18
19 for i in X[1:]:
20 if i != unique[unique_count]:
21 unique_count += 1
22 unique[unique_count] = i
23 counts[unique_count] = 1
24 else:
25 counts[unique_count] += 1
26 return unique[: unique_count + 1], counts[: unique_count + 1]
27 return None, np.zeros(0, dtype=np.int_)
28
29
30 @njit(fastmath=True, cache=True)
31 def z_normalise_series(X):
32 """Numba z-normalisation function for a single time series."""
33 std = np.std(X)
34 if std > 0:
35 X_n = (X - np.mean(X)) / std
36 else:
37 X_n = np.zeros(len(X))
38
39 return X_n
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/utils/numba/general.py b/sktime/utils/numba/general.py
--- a/sktime/utils/numba/general.py
+++ b/sktime/utils/numba/general.py
@@ -34,6 +34,5 @@
if std > 0:
X_n = (X - np.mean(X)) / std
else:
- X_n = np.zeros(len(X))
-
+ X_n = X - np.mean(X)
return X_n
| {"golden_diff": "diff --git a/sktime/utils/numba/general.py b/sktime/utils/numba/general.py\n--- a/sktime/utils/numba/general.py\n+++ b/sktime/utils/numba/general.py\n@@ -34,6 +34,5 @@\n if std > 0:\n X_n = (X - np.mean(X)) / std\n else:\n- X_n = np.zeros(len(X))\n-\n+ X_n = X - np.mean(X)\n return X_n\n", "issue": "[BUG] ShapeletTransformClassifier numba error when dtype is not float64\n**Describe the bug**\r\nSeems that when using `ShapeletTransformClassifier` there is some Numba accelerated functions that break if the data in the input data frame are of type `int32`.\r\n\r\n**To Reproduce**\r\nMRE as below:\r\n\r\n```python\r\nimport warnings\r\nwarnings.simplefilter('ignore', category=FutureWarning)\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom sktime.classification.shapelet_based import ShapeletTransformClassifier\r\nfrom sktime.contrib.vector_classifiers._rotation_forest import RotationForest\r\n\r\n# make fake data\r\ndata = pd.DataFrame(np.random.random((5000, 250))).astype(np.float32)\r\n\r\n# reshape to input into Shapelet Classifier\r\ndata4train = data.apply(lambda row: pd.Series({\r\n 'time-series': pd.Series(row.values)\r\n}), axis=1)\r\n\r\n# make targets\r\ntargets = pd.Series(2500 * [1] + 2500 * [0])\r\n\r\n# train test split\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n data4train, targets, test_size=0.7, random_state=42\r\n)\r\n\r\n# train\r\nclf = ShapeletTransformClassifier(\r\n estimator=RotationForest(n_estimators=3),\r\n n_shapelet_samples=500,\r\n max_shapelets=20,\r\n batch_size=100,\r\n)\r\n\r\nclf.fit(X_train, y_train)\r\n```\r\n\r\n**Expected behavior**\r\nwill not throw an error, and also enforce conversion to float32 or float64 within the classifier?\r\n**Additional context**\r\nremoving conversion to `float32` (hence `dtype == float64`) will make the code running without issues.\r\n\r\n**Versions**\r\nnumba 0.55.1\r\nsklearn 0.24.1\r\nsktime 0.11.0\r\npandas 1.4.2\r\npython 3.8.10\r\n\r\n**Stacktrace output**\r\n```bash\r\nTypingError: Failed in nopython mode pipeline (step: nopython frontend)\r\nCannot unify array(float64, 1d, C) and array(float32, 1d, C) for 'X_n.2', defined at /path_to_mypython/python/lib/python3.8/site-packages/sktime/utils/numba/general.py (39)\r\n\r\nFile \"../python/lib/python3.8/site-packages/sktime/utils/numba/general.py\", line 39:\r\ndef z_normalise_series(X):\r\n <source elided>\r\n\r\n return X_n\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"General numba utilities.\"\"\"\n\nimport numpy as np\nfrom numba import njit\n\n\n@njit(fastmath=True, cache=True)\ndef unique_count(X):\n \"\"\"Numba unique count function for a 1D array.\"\"\"\n if len(X) > 0:\n X = np.sort(X)\n unique = np.zeros(len(X))\n unique[0] = X[0]\n counts = np.zeros(len(X), dtype=np.int_)\n counts[0] = 1\n unique_count = 0\n\n for i in X[1:]:\n if i != unique[unique_count]:\n unique_count += 1\n unique[unique_count] = i\n counts[unique_count] = 1\n else:\n counts[unique_count] += 1\n return unique[: unique_count + 1], counts[: unique_count + 1]\n return None, np.zeros(0, dtype=np.int_)\n\n\n@njit(fastmath=True, cache=True)\ndef z_normalise_series(X):\n \"\"\"Numba z-normalisation function for a single time series.\"\"\"\n std = np.std(X)\n if std > 0:\n X_n = (X - np.mean(X)) / std\n else:\n X_n = np.zeros(len(X))\n\n return X_n\n", "path": "sktime/utils/numba/general.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"General numba utilities.\"\"\"\n\nimport numpy as np\nfrom numba import njit\n\n\n@njit(fastmath=True, cache=True)\ndef unique_count(X):\n \"\"\"Numba unique count function for a 1D array.\"\"\"\n if len(X) > 0:\n X = np.sort(X)\n unique = np.zeros(len(X))\n unique[0] = X[0]\n counts = np.zeros(len(X), dtype=np.int_)\n counts[0] = 1\n unique_count = 0\n\n for i in X[1:]:\n if i != unique[unique_count]:\n unique_count += 1\n unique[unique_count] = i\n counts[unique_count] = 1\n else:\n counts[unique_count] += 1\n return unique[: unique_count + 1], counts[: unique_count + 1]\n return None, np.zeros(0, dtype=np.int_)\n\n\n@njit(fastmath=True, cache=True)\ndef z_normalise_series(X):\n \"\"\"Numba z-normalisation function for a single time series.\"\"\"\n std = np.std(X)\n if std > 0:\n X_n = (X - np.mean(X)) / std\n else:\n X_n = X - np.mean(X)\n return X_n\n", "path": "sktime/utils/numba/general.py"}]} | 1,189 | 111 |
gh_patches_debug_20253 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-901 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`mathesar_temp_schema` should be hidden
## Description
<!-- A clear and concise description of what the bug is. -->
Currently, the system schema `mathesar_temp_schema` is returned as a standard schema, and ends up displayed as a result in the UI. This is confusing, since that schema is used for system operations, and shouldn't be available to the user.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
The schema `mathesar_temp_schema` should be hidden.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
After starting the service and doing type inference on at least one CSV loading into a table, go to `http://localhost:8000/api/v0/schemas/`. Note that `mathesar_temp_schema` will be one of the schemata in the `mathesar_tables` DB.
## Additional context
<!-- Add any other context about the problem or screenshots here. -->
We're already hiding some schemata, e.g., `mathesar_types`. The implementer should figure out where the list of such schemata is, and add `mathesar_temp_schema` to that list.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/tables/operations/infer_types.py`
Content:
```
1 from time import time
2
3 from sqlalchemy import select
4
5 from db import constants
6 from db.columns.base import MathesarColumn
7 from db.columns.operations.infer_types import infer_column_type
8 from db.schemas.operations.create import create_schema
9 from db.tables.operations.create import CreateTableAs
10 from db.tables.operations.select import reflect_table
11
12
13 TEMP_SCHEMA = f"{constants.MATHESAR_PREFIX}temp_schema"
14 TEMP_TABLE = f"{constants.MATHESAR_PREFIX}temp_table_%s"
15
16
17 def update_table_column_types(schema, table_name, engine):
18 table = reflect_table(table_name, schema, engine)
19 # we only want to infer (modify) the type of non-default columns
20 inferable_column_names = (
21 col.name for col in table.columns
22 if not MathesarColumn.from_column(col).is_default
23 and not col.primary_key
24 and not col.foreign_keys
25 )
26 for column_name in inferable_column_names:
27 infer_column_type(
28 schema,
29 table_name,
30 column_name,
31 engine,
32 )
33
34
35 def infer_table_column_types(schema, table_name, engine):
36 table = reflect_table(table_name, schema, engine)
37
38 temp_name = TEMP_TABLE % (int(time()))
39 create_schema(TEMP_SCHEMA, engine)
40 with engine.begin() as conn:
41 while engine.dialect.has_table(conn, temp_name, schema=TEMP_SCHEMA):
42 temp_name = TEMP_TABLE.format(int(time()))
43
44 full_temp_name = f"{TEMP_SCHEMA}.{temp_name}"
45
46 select_table = select(table)
47 with engine.begin() as conn:
48 conn.execute(CreateTableAs(full_temp_name, select_table))
49 temp_table = reflect_table(temp_name, TEMP_SCHEMA, engine)
50
51 try:
52 update_table_column_types(
53 TEMP_SCHEMA, temp_table.name, engine,
54 )
55 except Exception as e:
56 # Ensure the temp table is deleted
57 temp_table.drop()
58 raise e
59 else:
60 temp_table = reflect_table(temp_name, TEMP_SCHEMA, engine)
61 types = [c.type.__class__ for c in temp_table.columns]
62 temp_table.drop()
63 return types
64
```
Path: `db/schemas/operations/select.py`
Content:
```
1 import warnings
2
3 from sqlalchemy import MetaData, select, and_, not_, or_, Table
4
5 from db import types
6
7
8 TYPES_SCHEMA = types.base.SCHEMA
9 EXCLUDED_SCHEMATA = [TYPES_SCHEMA, "information_schema"]
10
11
12 def reflect_schema(engine, name=None, oid=None):
13 # If we have both arguments, the behavior is undefined.
14 try:
15 assert name is None or oid is None
16 except AssertionError as e:
17 raise e
18 metadata = MetaData()
19 with warnings.catch_warnings():
20 warnings.filterwarnings("ignore", message="Did not recognize type")
21 pg_namespace = Table("pg_namespace", metadata, autoload_with=engine)
22 sel = (
23 select(pg_namespace.c.oid, pg_namespace.c.nspname.label("name"))
24 .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid))
25 )
26 with engine.begin() as conn:
27 schema_info = conn.execute(sel).fetchone()
28 return schema_info
29
30
31 def get_mathesar_schemas_with_oids(engine):
32 metadata = MetaData()
33 with warnings.catch_warnings():
34 warnings.filterwarnings("ignore", message="Did not recognize type")
35 pg_namespace = Table("pg_namespace", metadata, autoload_with=engine)
36 sel = (
37 select(pg_namespace.c.nspname.label('schema'), pg_namespace.c.oid)
38 .where(
39 and_(
40 *[pg_namespace.c.nspname != schema for schema in EXCLUDED_SCHEMATA],
41 not_(pg_namespace.c.nspname.like("pg_%"))
42 )
43 )
44 )
45 with engine.begin() as conn:
46 result = conn.execute(sel).fetchall()
47 return result
48
```
Path: `db/constants.py`
Content:
```
1 MATHESAR_PREFIX = "mathesar_"
2 ID = "id"
3 ID_ORIGINAL = "id_original"
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/constants.py b/db/constants.py
--- a/db/constants.py
+++ b/db/constants.py
@@ -1,3 +1,4 @@
MATHESAR_PREFIX = "mathesar_"
ID = "id"
ID_ORIGINAL = "id_original"
+INFERENCE_SCHEMA = f"{MATHESAR_PREFIX}inference_schema"
diff --git a/db/schemas/operations/select.py b/db/schemas/operations/select.py
--- a/db/schemas/operations/select.py
+++ b/db/schemas/operations/select.py
@@ -2,11 +2,12 @@
from sqlalchemy import MetaData, select, and_, not_, or_, Table
+from db import constants
from db import types
-
TYPES_SCHEMA = types.base.SCHEMA
-EXCLUDED_SCHEMATA = [TYPES_SCHEMA, "information_schema"]
+TEMP_INFER_SCHEMA = constants.INFERENCE_SCHEMA
+EXCLUDED_SCHEMATA = [TYPES_SCHEMA, TEMP_INFER_SCHEMA, "information_schema"]
def reflect_schema(engine, name=None, oid=None):
diff --git a/db/tables/operations/infer_types.py b/db/tables/operations/infer_types.py
--- a/db/tables/operations/infer_types.py
+++ b/db/tables/operations/infer_types.py
@@ -10,7 +10,7 @@
from db.tables.operations.select import reflect_table
-TEMP_SCHEMA = f"{constants.MATHESAR_PREFIX}temp_schema"
+TEMP_SCHEMA = constants.INFERENCE_SCHEMA
TEMP_TABLE = f"{constants.MATHESAR_PREFIX}temp_table_%s"
| {"golden_diff": "diff --git a/db/constants.py b/db/constants.py\n--- a/db/constants.py\n+++ b/db/constants.py\n@@ -1,3 +1,4 @@\n MATHESAR_PREFIX = \"mathesar_\"\n ID = \"id\"\n ID_ORIGINAL = \"id_original\"\n+INFERENCE_SCHEMA = f\"{MATHESAR_PREFIX}inference_schema\"\ndiff --git a/db/schemas/operations/select.py b/db/schemas/operations/select.py\n--- a/db/schemas/operations/select.py\n+++ b/db/schemas/operations/select.py\n@@ -2,11 +2,12 @@\n \n from sqlalchemy import MetaData, select, and_, not_, or_, Table\n \n+from db import constants\n from db import types\n \n-\n TYPES_SCHEMA = types.base.SCHEMA\n-EXCLUDED_SCHEMATA = [TYPES_SCHEMA, \"information_schema\"]\n+TEMP_INFER_SCHEMA = constants.INFERENCE_SCHEMA\n+EXCLUDED_SCHEMATA = [TYPES_SCHEMA, TEMP_INFER_SCHEMA, \"information_schema\"]\n \n \n def reflect_schema(engine, name=None, oid=None):\ndiff --git a/db/tables/operations/infer_types.py b/db/tables/operations/infer_types.py\n--- a/db/tables/operations/infer_types.py\n+++ b/db/tables/operations/infer_types.py\n@@ -10,7 +10,7 @@\n from db.tables.operations.select import reflect_table\n \n \n-TEMP_SCHEMA = f\"{constants.MATHESAR_PREFIX}temp_schema\"\n+TEMP_SCHEMA = constants.INFERENCE_SCHEMA\n TEMP_TABLE = f\"{constants.MATHESAR_PREFIX}temp_table_%s\"\n", "issue": "`mathesar_temp_schema` should be hidden\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nCurrently, the system schema `mathesar_temp_schema` is returned as a standard schema, and ends up displayed as a result in the UI. This is confusing, since that schema is used for system operations, and shouldn't be available to the user.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nThe schema `mathesar_temp_schema` should be hidden.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\nAfter starting the service and doing type inference on at least one CSV loading into a table, go to `http://localhost:8000/api/v0/schemas/`. Note that `mathesar_temp_schema` will be one of the schemata in the `mathesar_tables` DB.\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\n\r\nWe're already hiding some schemata, e.g., `mathesar_types`. The implementer should figure out where the list of such schemata is, and add `mathesar_temp_schema` to that list.\n", "before_files": [{"content": "from time import time\n\nfrom sqlalchemy import select\n\nfrom db import constants\nfrom db.columns.base import MathesarColumn\nfrom db.columns.operations.infer_types import infer_column_type\nfrom db.schemas.operations.create import create_schema\nfrom db.tables.operations.create import CreateTableAs\nfrom db.tables.operations.select import reflect_table\n\n\nTEMP_SCHEMA = f\"{constants.MATHESAR_PREFIX}temp_schema\"\nTEMP_TABLE = f\"{constants.MATHESAR_PREFIX}temp_table_%s\"\n\n\ndef update_table_column_types(schema, table_name, engine):\n table = reflect_table(table_name, schema, engine)\n # we only want to infer (modify) the type of non-default columns\n inferable_column_names = (\n col.name for col in table.columns\n if not MathesarColumn.from_column(col).is_default\n and not col.primary_key\n and not col.foreign_keys\n )\n for column_name in inferable_column_names:\n infer_column_type(\n schema,\n table_name,\n column_name,\n engine,\n )\n\n\ndef infer_table_column_types(schema, table_name, engine):\n table = reflect_table(table_name, schema, engine)\n\n temp_name = TEMP_TABLE % (int(time()))\n create_schema(TEMP_SCHEMA, engine)\n with engine.begin() as conn:\n while engine.dialect.has_table(conn, temp_name, schema=TEMP_SCHEMA):\n temp_name = TEMP_TABLE.format(int(time()))\n\n full_temp_name = f\"{TEMP_SCHEMA}.{temp_name}\"\n\n select_table = select(table)\n with engine.begin() as conn:\n conn.execute(CreateTableAs(full_temp_name, select_table))\n temp_table = reflect_table(temp_name, TEMP_SCHEMA, engine)\n\n try:\n update_table_column_types(\n TEMP_SCHEMA, temp_table.name, engine,\n )\n except Exception as e:\n # Ensure the temp table is deleted\n temp_table.drop()\n raise e\n else:\n temp_table = reflect_table(temp_name, TEMP_SCHEMA, engine)\n types = [c.type.__class__ for c in temp_table.columns]\n temp_table.drop()\n return types\n", "path": "db/tables/operations/infer_types.py"}, {"content": "import warnings\n\nfrom sqlalchemy import MetaData, select, and_, not_, or_, Table\n\nfrom db import types\n\n\nTYPES_SCHEMA = types.base.SCHEMA\nEXCLUDED_SCHEMATA = [TYPES_SCHEMA, \"information_schema\"]\n\n\ndef reflect_schema(engine, name=None, oid=None):\n # If we have both arguments, the behavior is undefined.\n try:\n assert name is None or oid is None\n except AssertionError as e:\n raise e\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.oid, pg_namespace.c.nspname.label(\"name\"))\n .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid))\n )\n with engine.begin() as conn:\n schema_info = conn.execute(sel).fetchone()\n return schema_info\n\n\ndef get_mathesar_schemas_with_oids(engine):\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.nspname.label('schema'), pg_namespace.c.oid)\n .where(\n and_(\n *[pg_namespace.c.nspname != schema for schema in EXCLUDED_SCHEMATA],\n not_(pg_namespace.c.nspname.like(\"pg_%\"))\n )\n )\n )\n with engine.begin() as conn:\n result = conn.execute(sel).fetchall()\n return result\n", "path": "db/schemas/operations/select.py"}, {"content": "MATHESAR_PREFIX = \"mathesar_\"\nID = \"id\"\nID_ORIGINAL = \"id_original\"\n", "path": "db/constants.py"}], "after_files": [{"content": "from time import time\n\nfrom sqlalchemy import select\n\nfrom db import constants\nfrom db.columns.base import MathesarColumn\nfrom db.columns.operations.infer_types import infer_column_type\nfrom db.schemas.operations.create import create_schema\nfrom db.tables.operations.create import CreateTableAs\nfrom db.tables.operations.select import reflect_table\n\n\nTEMP_SCHEMA = constants.INFERENCE_SCHEMA\nTEMP_TABLE = f\"{constants.MATHESAR_PREFIX}temp_table_%s\"\n\n\ndef update_table_column_types(schema, table_name, engine):\n table = reflect_table(table_name, schema, engine)\n # we only want to infer (modify) the type of non-default columns\n inferable_column_names = (\n col.name for col in table.columns\n if not MathesarColumn.from_column(col).is_default\n and not col.primary_key\n and not col.foreign_keys\n )\n for column_name in inferable_column_names:\n infer_column_type(\n schema,\n table_name,\n column_name,\n engine,\n )\n\n\ndef infer_table_column_types(schema, table_name, engine):\n table = reflect_table(table_name, schema, engine)\n\n temp_name = TEMP_TABLE % (int(time()))\n create_schema(TEMP_SCHEMA, engine)\n with engine.begin() as conn:\n while engine.dialect.has_table(conn, temp_name, schema=TEMP_SCHEMA):\n temp_name = TEMP_TABLE.format(int(time()))\n\n full_temp_name = f\"{TEMP_SCHEMA}.{temp_name}\"\n\n select_table = select(table)\n with engine.begin() as conn:\n conn.execute(CreateTableAs(full_temp_name, select_table))\n temp_table = reflect_table(temp_name, TEMP_SCHEMA, engine)\n\n try:\n update_table_column_types(\n TEMP_SCHEMA, temp_table.name, engine,\n )\n except Exception as e:\n # Ensure the temp table is deleted\n temp_table.drop()\n raise e\n else:\n temp_table = reflect_table(temp_name, TEMP_SCHEMA, engine)\n types = [c.type.__class__ for c in temp_table.columns]\n temp_table.drop()\n return types\n", "path": "db/tables/operations/infer_types.py"}, {"content": "import warnings\n\nfrom sqlalchemy import MetaData, select, and_, not_, or_, Table\n\nfrom db import constants\nfrom db import types\n\nTYPES_SCHEMA = types.base.SCHEMA\nTEMP_INFER_SCHEMA = constants.INFERENCE_SCHEMA\nEXCLUDED_SCHEMATA = [TYPES_SCHEMA, TEMP_INFER_SCHEMA, \"information_schema\"]\n\n\ndef reflect_schema(engine, name=None, oid=None):\n # If we have both arguments, the behavior is undefined.\n try:\n assert name is None or oid is None\n except AssertionError as e:\n raise e\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.oid, pg_namespace.c.nspname.label(\"name\"))\n .where(or_(pg_namespace.c.nspname == name, pg_namespace.c.oid == oid))\n )\n with engine.begin() as conn:\n schema_info = conn.execute(sel).fetchone()\n return schema_info\n\n\ndef get_mathesar_schemas_with_oids(engine):\n metadata = MetaData()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", message=\"Did not recognize type\")\n pg_namespace = Table(\"pg_namespace\", metadata, autoload_with=engine)\n sel = (\n select(pg_namespace.c.nspname.label('schema'), pg_namespace.c.oid)\n .where(\n and_(\n *[pg_namespace.c.nspname != schema for schema in EXCLUDED_SCHEMATA],\n not_(pg_namespace.c.nspname.like(\"pg_%\"))\n )\n )\n )\n with engine.begin() as conn:\n result = conn.execute(sel).fetchall()\n return result\n", "path": "db/schemas/operations/select.py"}, {"content": "MATHESAR_PREFIX = \"mathesar_\"\nID = \"id\"\nID_ORIGINAL = \"id_original\"\nINFERENCE_SCHEMA = f\"{MATHESAR_PREFIX}inference_schema\"\n", "path": "db/constants.py"}]} | 1,606 | 338 |
gh_patches_debug_2157 | rasdani/github-patches | git_diff | scipy__scipy-10353 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: interpolate.NearestNDInterpolator with pandas
interpolate.NearestNDInterpolator does not work as expected when used with selected pandas dataframe.
This is due to the index being maintained when making selections in pandas.
### Reproducing code example:
```
import numpy as np
import pandas as pd
from scipy import interpolate
df = pd.DataFrame(np.array([[0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 2]]).T, columns=['x', 'y', 'z'])
df_select = df[3:]
NI = interpolate.NearestNDInterpolator((df_select.x, df_select.y), df_select.z)
print(NI([0.1, 0.9], [0.1, 0.9]))
```
I expect [0, 2] to be output.
But output is [Nan, 0] as pandas.Series.
This is due to the index being maintained when making selections in pandas.
Specifically, `df_select.z` has index[3, 4, 5, 6].
But, self.tree.query (xi) line 81, in scipy/interpolate/ndgriddata.py returns a index that assumes that the index starts from zero.
So, self.tree.query (xi) return [0, 3]
Therefore, self.values[i] line 82, in scipy/interpolate/ndgriddata.py using Invalid index.
### Note
if case of
```
df_select = df[3:].reset_index()
```
or
```
NI = interpolate.NearestNDInterpolator((df_select.x, df_select.y), np.array(df_select.z))
```
it works as expected.
Also, this bug does not occur in interpolate.LinearNDInterpolator.
### Scipy/Numpy/Python version information:
```
1.3.0 1.16.4 sys.version_info(major=3, minor=6, micro=8, releaselevel='final', serial=0)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scipy/interpolate/ndgriddata.py`
Content:
```
1 """
2 Convenience interface to N-D interpolation
3
4 .. versionadded:: 0.9
5
6 """
7 from __future__ import division, print_function, absolute_import
8
9 import numpy as np
10 from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
11 CloughTocher2DInterpolator, _ndim_coords_from_arrays
12 from scipy.spatial import cKDTree
13
14 __all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
15 'CloughTocher2DInterpolator']
16
17 #------------------------------------------------------------------------------
18 # Nearest-neighbour interpolation
19 #------------------------------------------------------------------------------
20
21
22 class NearestNDInterpolator(NDInterpolatorBase):
23 """
24 NearestNDInterpolator(x, y)
25
26 Nearest-neighbour interpolation in N dimensions.
27
28 .. versionadded:: 0.9
29
30 Methods
31 -------
32 __call__
33
34 Parameters
35 ----------
36 x : (Npoints, Ndims) ndarray of floats
37 Data point coordinates.
38 y : (Npoints,) ndarray of float or complex
39 Data values.
40 rescale : boolean, optional
41 Rescale points to unit cube before performing interpolation.
42 This is useful if some of the input dimensions have
43 incommensurable units and differ by many orders of magnitude.
44
45 .. versionadded:: 0.14.0
46 tree_options : dict, optional
47 Options passed to the underlying ``cKDTree``.
48
49 .. versionadded:: 0.17.0
50
51
52 Notes
53 -----
54 Uses ``scipy.spatial.cKDTree``
55
56 """
57
58 def __init__(self, x, y, rescale=False, tree_options=None):
59 NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
60 need_contiguous=False,
61 need_values=False)
62 if tree_options is None:
63 tree_options = dict()
64 self.tree = cKDTree(self.points, **tree_options)
65 self.values = y
66
67 def __call__(self, *args):
68 """
69 Evaluate interpolator at given points.
70
71 Parameters
72 ----------
73 xi : ndarray of float, shape (..., ndim)
74 Points where to interpolate data at.
75
76 """
77 xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
78 xi = self._check_call_shape(xi)
79 xi = self._scale_x(xi)
80 dist, i = self.tree.query(xi)
81 return self.values[i]
82
83
84 #------------------------------------------------------------------------------
85 # Convenience interface function
86 #------------------------------------------------------------------------------
87
88 def griddata(points, values, xi, method='linear', fill_value=np.nan,
89 rescale=False):
90 """
91 Interpolate unstructured D-dimensional data.
92
93 Parameters
94 ----------
95 points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).
96 Data point coordinates.
97 values : ndarray of float or complex, shape (n,)
98 Data values.
99 xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.
100 Points at which to interpolate data.
101 method : {'linear', 'nearest', 'cubic'}, optional
102 Method of interpolation. One of
103
104 ``nearest``
105 return the value at the data point closest to
106 the point of interpolation. See `NearestNDInterpolator` for
107 more details.
108
109 ``linear``
110 tessellate the input point set to n-dimensional
111 simplices, and interpolate linearly on each simplex. See
112 `LinearNDInterpolator` for more details.
113
114 ``cubic`` (1-D)
115 return the value determined from a cubic
116 spline.
117
118 ``cubic`` (2-D)
119 return the value determined from a
120 piecewise cubic, continuously differentiable (C1), and
121 approximately curvature-minimizing polynomial surface. See
122 `CloughTocher2DInterpolator` for more details.
123 fill_value : float, optional
124 Value used to fill in for requested points outside of the
125 convex hull of the input points. If not provided, then the
126 default is ``nan``. This option has no effect for the
127 'nearest' method.
128 rescale : bool, optional
129 Rescale points to unit cube before performing interpolation.
130 This is useful if some of the input dimensions have
131 incommensurable units and differ by many orders of magnitude.
132
133 .. versionadded:: 0.14.0
134
135 Returns
136 -------
137 ndarray
138 Array of interpolated values.
139
140 Notes
141 -----
142
143 .. versionadded:: 0.9
144
145 Examples
146 --------
147
148 Suppose we want to interpolate the 2-D function
149
150 >>> def func(x, y):
151 ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
152
153 on a grid in [0, 1]x[0, 1]
154
155 >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
156
157 but we only know its values at 1000 data points:
158
159 >>> points = np.random.rand(1000, 2)
160 >>> values = func(points[:,0], points[:,1])
161
162 This can be done with `griddata` -- below we try out all of the
163 interpolation methods:
164
165 >>> from scipy.interpolate import griddata
166 >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
167 >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
168 >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
169
170 One can see that the exact result is reproduced by all of the
171 methods to some degree, but for this smooth function the piecewise
172 cubic interpolant gives the best results:
173
174 >>> import matplotlib.pyplot as plt
175 >>> plt.subplot(221)
176 >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
177 >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
178 >>> plt.title('Original')
179 >>> plt.subplot(222)
180 >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
181 >>> plt.title('Nearest')
182 >>> plt.subplot(223)
183 >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
184 >>> plt.title('Linear')
185 >>> plt.subplot(224)
186 >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
187 >>> plt.title('Cubic')
188 >>> plt.gcf().set_size_inches(6, 6)
189 >>> plt.show()
190
191 """
192
193 points = _ndim_coords_from_arrays(points)
194
195 if points.ndim < 2:
196 ndim = points.ndim
197 else:
198 ndim = points.shape[-1]
199
200 if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
201 from .interpolate import interp1d
202 points = points.ravel()
203 if isinstance(xi, tuple):
204 if len(xi) != 1:
205 raise ValueError("invalid number of dimensions in xi")
206 xi, = xi
207 # Sort points/values together, necessary as input for interp1d
208 idx = np.argsort(points)
209 points = points[idx]
210 values = values[idx]
211 if method == 'nearest':
212 fill_value = 'extrapolate'
213 ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
214 fill_value=fill_value)
215 return ip(xi)
216 elif method == 'nearest':
217 ip = NearestNDInterpolator(points, values, rescale=rescale)
218 return ip(xi)
219 elif method == 'linear':
220 ip = LinearNDInterpolator(points, values, fill_value=fill_value,
221 rescale=rescale)
222 return ip(xi)
223 elif method == 'cubic' and ndim == 2:
224 ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
225 rescale=rescale)
226 return ip(xi)
227 else:
228 raise ValueError("Unknown interpolation method %r for "
229 "%d dimensional data" % (method, ndim))
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scipy/interpolate/ndgriddata.py b/scipy/interpolate/ndgriddata.py
--- a/scipy/interpolate/ndgriddata.py
+++ b/scipy/interpolate/ndgriddata.py
@@ -62,7 +62,7 @@
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
- self.values = y
+ self.values = np.asarray(y)
def __call__(self, *args):
"""
| {"golden_diff": "diff --git a/scipy/interpolate/ndgriddata.py b/scipy/interpolate/ndgriddata.py\n--- a/scipy/interpolate/ndgriddata.py\n+++ b/scipy/interpolate/ndgriddata.py\n@@ -62,7 +62,7 @@\n if tree_options is None:\n tree_options = dict()\n self.tree = cKDTree(self.points, **tree_options)\n- self.values = y\n+ self.values = np.asarray(y)\n \n def __call__(self, *args):\n \"\"\"\n", "issue": "BUG: interpolate.NearestNDInterpolator with pandas\ninterpolate.NearestNDInterpolator does not work as expected when used with selected pandas dataframe.\r\nThis is due to the index being maintained when making selections in pandas.\r\n\r\n### Reproducing code example:\r\n```\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy import interpolate\r\n\r\ndf = pd.DataFrame(np.array([[0, 0, 0, 0, 1, 0, 1],\r\n [0, 0, 0, 0, 0, 1, 1],\r\n [0, 0, 0, 0, 1, 1, 2]]).T, columns=['x', 'y', 'z'])\r\ndf_select = df[3:]\r\nNI = interpolate.NearestNDInterpolator((df_select.x, df_select.y), df_select.z)\r\nprint(NI([0.1, 0.9], [0.1, 0.9]))\r\n```\r\nI expect [0, 2] to be output.\r\nBut output is [Nan, 0] as pandas.Series.\r\n\r\nThis is due to the index being maintained when making selections in pandas.\r\nSpecifically, `df_select.z` has index[3, 4, 5, 6].\r\nBut, self.tree.query (xi) line 81, in scipy/interpolate/ndgriddata.py returns a index that assumes that the index starts from zero.\r\nSo, self.tree.query (xi) return [0, 3]\r\nTherefore, self.values[i] line 82, in scipy/interpolate/ndgriddata.py using Invalid index.\r\n\r\n### Note\r\nif case of\r\n```\r\ndf_select = df[3:].reset_index()\r\n```\r\nor\r\n```\r\nNI = interpolate.NearestNDInterpolator((df_select.x, df_select.y), np.array(df_select.z))\r\n```\r\nit works as expected.\r\n\r\nAlso, this bug does not occur in interpolate.LinearNDInterpolator.\r\n\r\n### Scipy/Numpy/Python version information:\r\n```\r\n1.3.0 1.16.4 sys.version_info(major=3, minor=6, micro=8, releaselevel='final', serial=0)\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nConvenience interface to N-D interpolation\n\n.. versionadded:: 0.9\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom .interpnd import LinearNDInterpolator, NDInterpolatorBase, \\\n CloughTocher2DInterpolator, _ndim_coords_from_arrays\nfrom scipy.spatial import cKDTree\n\n__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',\n 'CloughTocher2DInterpolator']\n\n#------------------------------------------------------------------------------\n# Nearest-neighbour interpolation\n#------------------------------------------------------------------------------\n\n\nclass NearestNDInterpolator(NDInterpolatorBase):\n \"\"\"\n NearestNDInterpolator(x, y)\n\n Nearest-neighbour interpolation in N dimensions.\n\n .. versionadded:: 0.9\n\n Methods\n -------\n __call__\n\n Parameters\n ----------\n x : (Npoints, Ndims) ndarray of floats\n Data point coordinates.\n y : (Npoints,) ndarray of float or complex\n Data values.\n rescale : boolean, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n tree_options : dict, optional\n Options passed to the underlying ``cKDTree``.\n\n .. versionadded:: 0.17.0\n\n\n Notes\n -----\n Uses ``scipy.spatial.cKDTree``\n\n \"\"\"\n\n def __init__(self, x, y, rescale=False, tree_options=None):\n NDInterpolatorBase.__init__(self, x, y, rescale=rescale,\n need_contiguous=False,\n need_values=False)\n if tree_options is None:\n tree_options = dict()\n self.tree = cKDTree(self.points, **tree_options)\n self.values = y\n\n def __call__(self, *args):\n \"\"\"\n Evaluate interpolator at given points.\n\n Parameters\n ----------\n xi : ndarray of float, shape (..., ndim)\n Points where to interpolate data at.\n\n \"\"\"\n xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])\n xi = self._check_call_shape(xi)\n xi = self._scale_x(xi)\n dist, i = self.tree.query(xi)\n return self.values[i]\n\n\n#------------------------------------------------------------------------------\n# Convenience interface function\n#------------------------------------------------------------------------------\n\ndef griddata(points, values, xi, method='linear', fill_value=np.nan,\n rescale=False):\n \"\"\"\n Interpolate unstructured D-dimensional data.\n\n Parameters\n ----------\n points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).\n Data point coordinates. \n values : ndarray of float or complex, shape (n,)\n Data values.\n xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.\n Points at which to interpolate data.\n method : {'linear', 'nearest', 'cubic'}, optional\n Method of interpolation. One of\n\n ``nearest``\n return the value at the data point closest to\n the point of interpolation. See `NearestNDInterpolator` for\n more details.\n\n ``linear``\n tessellate the input point set to n-dimensional\n simplices, and interpolate linearly on each simplex. See\n `LinearNDInterpolator` for more details.\n\n ``cubic`` (1-D)\n return the value determined from a cubic\n spline.\n\n ``cubic`` (2-D)\n return the value determined from a\n piecewise cubic, continuously differentiable (C1), and\n approximately curvature-minimizing polynomial surface. See\n `CloughTocher2DInterpolator` for more details.\n fill_value : float, optional\n Value used to fill in for requested points outside of the\n convex hull of the input points. If not provided, then the\n default is ``nan``. This option has no effect for the\n 'nearest' method.\n rescale : bool, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n \n Returns\n -------\n ndarray\n Array of interpolated values.\n\n Notes\n -----\n\n .. versionadded:: 0.9\n\n Examples\n --------\n\n Suppose we want to interpolate the 2-D function\n\n >>> def func(x, y):\n ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2\n\n on a grid in [0, 1]x[0, 1]\n\n >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]\n\n but we only know its values at 1000 data points:\n\n >>> points = np.random.rand(1000, 2)\n >>> values = func(points[:,0], points[:,1])\n\n This can be done with `griddata` -- below we try out all of the\n interpolation methods:\n\n >>> from scipy.interpolate import griddata\n >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')\n >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')\n >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')\n\n One can see that the exact result is reproduced by all of the\n methods to some degree, but for this smooth function the piecewise\n cubic interpolant gives the best results:\n\n >>> import matplotlib.pyplot as plt\n >>> plt.subplot(221)\n >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')\n >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)\n >>> plt.title('Original')\n >>> plt.subplot(222)\n >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Nearest')\n >>> plt.subplot(223)\n >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Linear')\n >>> plt.subplot(224)\n >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Cubic')\n >>> plt.gcf().set_size_inches(6, 6)\n >>> plt.show()\n\n \"\"\"\n\n points = _ndim_coords_from_arrays(points)\n\n if points.ndim < 2:\n ndim = points.ndim\n else:\n ndim = points.shape[-1]\n\n if ndim == 1 and method in ('nearest', 'linear', 'cubic'):\n from .interpolate import interp1d\n points = points.ravel()\n if isinstance(xi, tuple):\n if len(xi) != 1:\n raise ValueError(\"invalid number of dimensions in xi\")\n xi, = xi\n # Sort points/values together, necessary as input for interp1d\n idx = np.argsort(points)\n points = points[idx]\n values = values[idx]\n if method == 'nearest':\n fill_value = 'extrapolate'\n ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,\n fill_value=fill_value)\n return ip(xi)\n elif method == 'nearest':\n ip = NearestNDInterpolator(points, values, rescale=rescale)\n return ip(xi)\n elif method == 'linear':\n ip = LinearNDInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n elif method == 'cubic' and ndim == 2:\n ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n else:\n raise ValueError(\"Unknown interpolation method %r for \"\n \"%d dimensional data\" % (method, ndim))\n", "path": "scipy/interpolate/ndgriddata.py"}], "after_files": [{"content": "\"\"\"\nConvenience interface to N-D interpolation\n\n.. versionadded:: 0.9\n\n\"\"\"\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom .interpnd import LinearNDInterpolator, NDInterpolatorBase, \\\n CloughTocher2DInterpolator, _ndim_coords_from_arrays\nfrom scipy.spatial import cKDTree\n\n__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',\n 'CloughTocher2DInterpolator']\n\n#------------------------------------------------------------------------------\n# Nearest-neighbour interpolation\n#------------------------------------------------------------------------------\n\n\nclass NearestNDInterpolator(NDInterpolatorBase):\n \"\"\"\n NearestNDInterpolator(x, y)\n\n Nearest-neighbour interpolation in N dimensions.\n\n .. versionadded:: 0.9\n\n Methods\n -------\n __call__\n\n Parameters\n ----------\n x : (Npoints, Ndims) ndarray of floats\n Data point coordinates.\n y : (Npoints,) ndarray of float or complex\n Data values.\n rescale : boolean, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n tree_options : dict, optional\n Options passed to the underlying ``cKDTree``.\n\n .. versionadded:: 0.17.0\n\n\n Notes\n -----\n Uses ``scipy.spatial.cKDTree``\n\n \"\"\"\n\n def __init__(self, x, y, rescale=False, tree_options=None):\n NDInterpolatorBase.__init__(self, x, y, rescale=rescale,\n need_contiguous=False,\n need_values=False)\n if tree_options is None:\n tree_options = dict()\n self.tree = cKDTree(self.points, **tree_options)\n self.values = np.asarray(y)\n\n def __call__(self, *args):\n \"\"\"\n Evaluate interpolator at given points.\n\n Parameters\n ----------\n xi : ndarray of float, shape (..., ndim)\n Points where to interpolate data at.\n\n \"\"\"\n xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])\n xi = self._check_call_shape(xi)\n xi = self._scale_x(xi)\n dist, i = self.tree.query(xi)\n return self.values[i]\n\n\n#------------------------------------------------------------------------------\n# Convenience interface function\n#------------------------------------------------------------------------------\n\ndef griddata(points, values, xi, method='linear', fill_value=np.nan,\n rescale=False):\n \"\"\"\n Interpolate unstructured D-dimensional data.\n\n Parameters\n ----------\n points : 2-D ndarray of floats with shape (n, D), or length D tuple of 1-D ndarrays with shape (n,).\n Data point coordinates. \n values : ndarray of float or complex, shape (n,)\n Data values.\n xi : 2-D ndarray of floats with shape (m, D), or length D tuple of ndarrays broadcastable to the same shape.\n Points at which to interpolate data.\n method : {'linear', 'nearest', 'cubic'}, optional\n Method of interpolation. One of\n\n ``nearest``\n return the value at the data point closest to\n the point of interpolation. See `NearestNDInterpolator` for\n more details.\n\n ``linear``\n tessellate the input point set to n-dimensional\n simplices, and interpolate linearly on each simplex. See\n `LinearNDInterpolator` for more details.\n\n ``cubic`` (1-D)\n return the value determined from a cubic\n spline.\n\n ``cubic`` (2-D)\n return the value determined from a\n piecewise cubic, continuously differentiable (C1), and\n approximately curvature-minimizing polynomial surface. See\n `CloughTocher2DInterpolator` for more details.\n fill_value : float, optional\n Value used to fill in for requested points outside of the\n convex hull of the input points. If not provided, then the\n default is ``nan``. This option has no effect for the\n 'nearest' method.\n rescale : bool, optional\n Rescale points to unit cube before performing interpolation.\n This is useful if some of the input dimensions have\n incommensurable units and differ by many orders of magnitude.\n\n .. versionadded:: 0.14.0\n \n Returns\n -------\n ndarray\n Array of interpolated values.\n\n Notes\n -----\n\n .. versionadded:: 0.9\n\n Examples\n --------\n\n Suppose we want to interpolate the 2-D function\n\n >>> def func(x, y):\n ... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2\n\n on a grid in [0, 1]x[0, 1]\n\n >>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]\n\n but we only know its values at 1000 data points:\n\n >>> points = np.random.rand(1000, 2)\n >>> values = func(points[:,0], points[:,1])\n\n This can be done with `griddata` -- below we try out all of the\n interpolation methods:\n\n >>> from scipy.interpolate import griddata\n >>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')\n >>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')\n >>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')\n\n One can see that the exact result is reproduced by all of the\n methods to some degree, but for this smooth function the piecewise\n cubic interpolant gives the best results:\n\n >>> import matplotlib.pyplot as plt\n >>> plt.subplot(221)\n >>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')\n >>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)\n >>> plt.title('Original')\n >>> plt.subplot(222)\n >>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Nearest')\n >>> plt.subplot(223)\n >>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Linear')\n >>> plt.subplot(224)\n >>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')\n >>> plt.title('Cubic')\n >>> plt.gcf().set_size_inches(6, 6)\n >>> plt.show()\n\n \"\"\"\n\n points = _ndim_coords_from_arrays(points)\n\n if points.ndim < 2:\n ndim = points.ndim\n else:\n ndim = points.shape[-1]\n\n if ndim == 1 and method in ('nearest', 'linear', 'cubic'):\n from .interpolate import interp1d\n points = points.ravel()\n if isinstance(xi, tuple):\n if len(xi) != 1:\n raise ValueError(\"invalid number of dimensions in xi\")\n xi, = xi\n # Sort points/values together, necessary as input for interp1d\n idx = np.argsort(points)\n points = points[idx]\n values = values[idx]\n if method == 'nearest':\n fill_value = 'extrapolate'\n ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,\n fill_value=fill_value)\n return ip(xi)\n elif method == 'nearest':\n ip = NearestNDInterpolator(points, values, rescale=rescale)\n return ip(xi)\n elif method == 'linear':\n ip = LinearNDInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n elif method == 'cubic' and ndim == 2:\n ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,\n rescale=rescale)\n return ip(xi)\n else:\n raise ValueError(\"Unknown interpolation method %r for \"\n \"%d dimensional data\" % (method, ndim))\n", "path": "scipy/interpolate/ndgriddata.py"}]} | 3,146 | 120 |
gh_patches_debug_41454 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-6009 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ability to remove individual account IDs from the AMI launch permissions with aws.ami
**Is your feature request related to a problem? Please describe.**
I have a use case where I want Cloud Custodian to find AMIs that are shared with accounts not in a whitelist, but in many cases, the AMI might be shared with both authorised and unauthorised accounts. When I use Cloud Custodian to remediate this (i.e. `remove-launch-permissions`) the permissions for _all_ accounts are removed, including the accounts that do have valid access to the AMI.
**Describe the solution you'd like**
I'd like the remediation available in Cloud Custodian to remove launch permissions to the AMI for accounts that are _not_ defined in the whitelist; all other accounts (i.e. the ones that _are_ in the whitelist) should not have their launch permissions removed.
**Describe alternatives you've considered**
I haven't tried it yet, but a Lambda function may be useful here to remove individual account IDs from the launch permissions. I'm not sure how much I like the idea of maintaining both a Cloud Custodian policy _and_ a Lambda function just to remove individual account IDs from the launch permissions rather than all of them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/ami.py`
Content:
```
1 # Copyright 2015-2019 Capital One Services, LLC
2 # Copyright The Cloud Custodian Authors.
3 # SPDX-License-Identifier: Apache-2.0
4 import itertools
5 import logging
6
7 from concurrent.futures import as_completed
8 import jmespath
9
10 from c7n.actions import BaseAction
11 from c7n.exceptions import ClientError
12 from c7n.filters import (
13 AgeFilter, Filter, CrossAccountAccessFilter)
14 from c7n.manager import resources
15 from c7n.query import QueryResourceManager, DescribeSource, TypeInfo
16 from c7n.resolver import ValuesFrom
17 from c7n.utils import local_session, type_schema, chunks, merge_dict_list
18
19
20 log = logging.getLogger('custodian.ami')
21
22
23 class DescribeImageSource(DescribeSource):
24
25 def get_resources(self, ids, cache=True):
26 while ids:
27 try:
28 return super(DescribeImageSource, self).get_resources(ids, cache)
29 except ClientError as e:
30 bad_ami_ids = ErrorHandler.extract_bad_ami(e)
31 if bad_ami_ids:
32 for b in bad_ami_ids:
33 ids.remove(b)
34 continue
35 raise
36 return []
37
38
39 @resources.register('ami')
40 class AMI(QueryResourceManager):
41
42 class resource_type(TypeInfo):
43 service = 'ec2'
44 arn_type = 'image'
45 enum_spec = (
46 'describe_images', 'Images', None)
47 id = 'ImageId'
48 filter_name = 'ImageIds'
49 filter_type = 'list'
50 name = 'Name'
51 date = 'CreationDate'
52
53 source_mapping = {
54 'describe': DescribeImageSource
55 }
56
57 def resources(self, query=None):
58 if query is None and 'query' in self.data:
59 query = merge_dict_list(self.data['query'])
60 elif query is None:
61 query = {}
62 if query.get('Owners') is None:
63 query['Owners'] = ['self']
64 return super(AMI, self).resources(query=query)
65
66
67 class ErrorHandler:
68
69 @staticmethod
70 def extract_bad_ami(e):
71 """Handle various client side errors when describing images"""
72 msg = e.response['Error']['Message']
73 error = e.response['Error']['Code']
74 e_ami_ids = None
75 if error == 'InvalidAMIID.NotFound':
76 e_ami_ids = [
77 e_ami_id.strip() for e_ami_id
78 in msg[msg.find("'[") + 2:msg.rfind("]'")].split(',')]
79 log.warning("Image not found %s" % e_ami_ids)
80 elif error == 'InvalidAMIID.Malformed':
81 e_ami_ids = [msg[msg.find('"') + 1:msg.rfind('"')]]
82 log.warning("Image id malformed %s" % e_ami_ids)
83 return e_ami_ids
84
85
86 @AMI.action_registry.register('deregister')
87 class Deregister(BaseAction):
88 """Action to deregister AMI
89
90 To prevent deregistering all AMI, it is advised to use in conjunction with
91 a filter (such as image-age)
92
93 :example:
94
95 .. code-block:: yaml
96
97 policies:
98 - name: ami-deregister-old
99 resource: ami
100 filters:
101 - type: image-age
102 days: 90
103 actions:
104 - deregister
105 """
106
107 schema = type_schema('deregister', **{'delete-snapshots': {'type': 'boolean'}})
108 permissions = ('ec2:DeregisterImage',)
109 snap_expr = jmespath.compile('BlockDeviceMappings[].Ebs.SnapshotId')
110
111 def process(self, images):
112 client = local_session(self.manager.session_factory).client('ec2')
113 image_count = len(images)
114 images = [i for i in images if self.manager.ctx.options.account_id == i['OwnerId']]
115 if len(images) != image_count:
116 self.log.info("Implicitly filtered %d non owned images", image_count - len(images))
117
118 for i in images:
119 self.manager.retry(client.deregister_image, ImageId=i['ImageId'])
120
121 if not self.data.get('delete-snapshots'):
122 continue
123 snap_ids = self.snap_expr.search(i) or ()
124 for s in snap_ids:
125 try:
126 self.manager.retry(client.delete_snapshot, SnapshotId=s)
127 except ClientError as e:
128 if e.error['Code'] == 'InvalidSnapshot.InUse':
129 continue
130
131
132 @AMI.action_registry.register('remove-launch-permissions')
133 class RemoveLaunchPermissions(BaseAction):
134 """Action to remove the ability to launch an instance from an AMI
135
136 This action will remove any launch permissions granted to other
137 AWS accounts from the image, leaving only the owner capable of
138 launching it
139
140 :example:
141
142 .. code-block:: yaml
143
144 policies:
145 - name: ami-stop-share-old
146 resource: ami
147 filters:
148 - type: image-age
149 days: 60
150 actions:
151 - remove-launch-permissions
152
153 """
154
155 schema = type_schema('remove-launch-permissions')
156 permissions = ('ec2:ResetImageAttribute',)
157
158 def process(self, images):
159 client = local_session(self.manager.session_factory).client('ec2')
160 for i in images:
161 self.process_image(client, i)
162
163 def process_image(self, client, image):
164 client.reset_image_attribute(
165 ImageId=image['ImageId'], Attribute="launchPermission")
166
167
168 @AMI.action_registry.register('copy')
169 class Copy(BaseAction):
170 """Action to copy AMIs with optional encryption
171
172 This action can copy AMIs while optionally encrypting or decrypting
173 the target AMI. It is advised to use in conjunction with a filter.
174
175 Note there is a max in flight of 5 per account/region.
176
177 :example:
178
179 .. code-block:: yaml
180
181 policies:
182 - name: ami-ensure-encrypted
183 resource: ami
184 filters:
185 - type: value
186 key: encrypted
187 value: true
188 actions:
189 - type: copy
190 encrypt: true
191 key-id: 00000000-0000-0000-0000-000000000000
192 """
193
194 permissions = ('ec2:CopyImage',)
195 schema = {
196 'type': 'object',
197 'additionalProperties': False,
198 'properties': {
199 'type': {'enum': ['copy']},
200 'name': {'type': 'string'},
201 'description': {'type': 'string'},
202 'region': {'type': 'string'},
203 'encrypt': {'type': 'boolean'},
204 'key-id': {'type': 'string'}
205 }
206 }
207
208 def process(self, images):
209 session = local_session(self.manager.session_factory)
210 client = session.client(
211 'ec2',
212 region_name=self.data.get('region', None))
213
214 for image in images:
215 client.copy_image(
216 Name=self.data.get('name', image['Name']),
217 Description=self.data.get('description', image['Description']),
218 SourceRegion=session.region_name,
219 SourceImageId=image['ImageId'],
220 Encrypted=self.data.get('encrypt', False),
221 KmsKeyId=self.data.get('key-id', ''))
222
223
224 @AMI.filter_registry.register('image-age')
225 class ImageAgeFilter(AgeFilter):
226 """Filters images based on the age (in days)
227
228 :example:
229
230 .. code-block:: yaml
231
232 policies:
233 - name: ami-remove-launch-permissions
234 resource: ami
235 filters:
236 - type: image-age
237 days: 30
238 """
239
240 date_attribute = "CreationDate"
241 schema = type_schema(
242 'image-age',
243 op={'$ref': '#/definitions/filters_common/comparison_operators'},
244 days={'type': 'number', 'minimum': 0})
245
246
247 @AMI.filter_registry.register('unused')
248 class ImageUnusedFilter(Filter):
249 """Filters images based on usage
250
251 true: image has no instances spawned from it
252 false: image has instances spawned from it
253
254 :example:
255
256 .. code-block:: yaml
257
258 policies:
259 - name: ami-unused
260 resource: ami
261 filters:
262 - type: unused
263 value: true
264 """
265
266 schema = type_schema('unused', value={'type': 'boolean'})
267
268 def get_permissions(self):
269 return list(itertools.chain(*[
270 self.manager.get_resource_manager(m).get_permissions()
271 for m in ('asg', 'launch-config', 'ec2')]))
272
273 def _pull_asg_images(self):
274 asgs = self.manager.get_resource_manager('asg').resources()
275 image_ids = set()
276 lcfgs = set(a['LaunchConfigurationName'] for a in asgs if 'LaunchConfigurationName' in a)
277 lcfg_mgr = self.manager.get_resource_manager('launch-config')
278
279 if lcfgs:
280 image_ids.update([
281 lcfg['ImageId'] for lcfg in lcfg_mgr.resources()
282 if lcfg['LaunchConfigurationName'] in lcfgs])
283
284 tmpl_mgr = self.manager.get_resource_manager('launch-template-version')
285 for tversion in tmpl_mgr.get_resources(
286 list(tmpl_mgr.get_asg_templates(asgs).keys())):
287 image_ids.add(tversion['LaunchTemplateData'].get('ImageId'))
288 return image_ids
289
290 def _pull_ec2_images(self):
291 ec2_manager = self.manager.get_resource_manager('ec2')
292 return {i['ImageId'] for i in ec2_manager.resources()}
293
294 def process(self, resources, event=None):
295 images = self._pull_ec2_images().union(self._pull_asg_images())
296 if self.data.get('value', True):
297 return [r for r in resources if r['ImageId'] not in images]
298 return [r for r in resources if r['ImageId'] in images]
299
300
301 @AMI.filter_registry.register('cross-account')
302 class AmiCrossAccountFilter(CrossAccountAccessFilter):
303
304 schema = type_schema(
305 'cross-account',
306 # white list accounts
307 whitelist_from=ValuesFrom.schema,
308 whitelist={'type': 'array', 'items': {'type': 'string'}})
309
310 permissions = ('ec2:DescribeImageAttribute',)
311
312 def process_resource_set(self, client, accounts, resource_set):
313 results = []
314 for r in resource_set:
315 attrs = self.manager.retry(
316 client.describe_image_attribute,
317 ImageId=r['ImageId'],
318 Attribute='launchPermission')['LaunchPermissions']
319 image_accounts = {a.get('Group') or a.get('UserId') for a in attrs}
320 delta_accounts = image_accounts.difference(accounts)
321 if delta_accounts:
322 r['c7n:CrossAccountViolations'] = list(delta_accounts)
323 results.append(r)
324 return results
325
326 def process(self, resources, event=None):
327 results = []
328 client = local_session(self.manager.session_factory).client('ec2')
329 accounts = self.get_accounts()
330
331 with self.executor_factory(max_workers=2) as w:
332 futures = []
333 for resource_set in chunks(resources, 20):
334 futures.append(
335 w.submit(
336 self.process_resource_set, client, accounts, resource_set))
337 for f in as_completed(futures):
338 if f.exception():
339 self.log.error(
340 "Exception checking cross account access \n %s" % (
341 f.exception()))
342 continue
343 results.extend(f.result())
344 return results
345
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/ami.py b/c7n/resources/ami.py
--- a/c7n/resources/ami.py
+++ b/c7n/resources/ami.py
@@ -18,7 +18,7 @@
import jmespath
from c7n.actions import BaseAction
-from c7n.exceptions import ClientError
+from c7n.exceptions import ClientError, PolicyValidationError
from c7n.filters import (
AgeFilter, Filter, CrossAccountAccessFilter)
from c7n.manager import resources
@@ -162,8 +162,25 @@
"""
- schema = type_schema('remove-launch-permissions')
- permissions = ('ec2:ResetImageAttribute',)
+ schema = type_schema(
+ 'remove-launch-permissions',
+ accounts={'oneOf': [
+ {'enum': ['matched']},
+ {'type': 'string', 'minLength': 12, 'maxLength': 12}]})
+
+ permissions = ('ec2:ResetImageAttribute', 'ec2:ModifyImageAttribute',)
+
+ def validate(self):
+ if 'accounts' in self.data and self.data['accounts'] == 'matched':
+ found = False
+ for f in self.manager.iter_filters():
+ if isinstance(f, AmiCrossAccountFilter):
+ found = True
+ break
+ if not found:
+ raise PolicyValidationError(
+ "policy:%s filter:%s with matched requires cross-account filter" % (
+ self.manager.ctx.policy.name, self.type))
def process(self, images):
client = local_session(self.manager.session_factory).client('ec2')
@@ -171,8 +188,25 @@
self.process_image(client, i)
def process_image(self, client, image):
- client.reset_image_attribute(
- ImageId=image['ImageId'], Attribute="launchPermission")
+ accounts = self.data.get('accounts')
+ if not accounts:
+ return client.reset_image_attribute(
+ ImageId=image['ImageId'], Attribute="launchPermission")
+ if accounts == 'matched':
+ accounts = image.get(AmiCrossAccountFilter.annotation_key)
+ if not accounts:
+ return
+ remove = []
+ if 'all' in accounts:
+ remove.append({'Group': 'all'})
+ accounts.remove('all')
+ remove.extend([{'UserId': a} for a in accounts])
+ if not remove:
+ return
+ client.modify_image_attribute(
+ ImageId=image['ImageId'],
+ LaunchPermission={'Remove': remove},
+ OperationType='remove')
@AMI.action_registry.register('copy')
@@ -318,6 +352,7 @@
whitelist={'type': 'array', 'items': {'type': 'string'}})
permissions = ('ec2:DescribeImageAttribute',)
+ annotation_key = 'c7n:CrossAccountViolations'
def process_resource_set(self, client, accounts, resource_set):
results = []
@@ -326,10 +361,11 @@
client.describe_image_attribute,
ImageId=r['ImageId'],
Attribute='launchPermission')['LaunchPermissions']
+ r['c7n:LaunchPermissions'] = attrs
image_accounts = {a.get('Group') or a.get('UserId') for a in attrs}
delta_accounts = image_accounts.difference(accounts)
if delta_accounts:
- r['c7n:CrossAccountViolations'] = list(delta_accounts)
+ r[self.annotation_key] = list(delta_accounts)
results.append(r)
return results
| {"golden_diff": "diff --git a/c7n/resources/ami.py b/c7n/resources/ami.py\n--- a/c7n/resources/ami.py\n+++ b/c7n/resources/ami.py\n@@ -18,7 +18,7 @@\n import jmespath\n \n from c7n.actions import BaseAction\n-from c7n.exceptions import ClientError\n+from c7n.exceptions import ClientError, PolicyValidationError\n from c7n.filters import (\n AgeFilter, Filter, CrossAccountAccessFilter)\n from c7n.manager import resources\n@@ -162,8 +162,25 @@\n \n \"\"\"\n \n- schema = type_schema('remove-launch-permissions')\n- permissions = ('ec2:ResetImageAttribute',)\n+ schema = type_schema(\n+ 'remove-launch-permissions',\n+ accounts={'oneOf': [\n+ {'enum': ['matched']},\n+ {'type': 'string', 'minLength': 12, 'maxLength': 12}]})\n+\n+ permissions = ('ec2:ResetImageAttribute', 'ec2:ModifyImageAttribute',)\n+\n+ def validate(self):\n+ if 'accounts' in self.data and self.data['accounts'] == 'matched':\n+ found = False\n+ for f in self.manager.iter_filters():\n+ if isinstance(f, AmiCrossAccountFilter):\n+ found = True\n+ break\n+ if not found:\n+ raise PolicyValidationError(\n+ \"policy:%s filter:%s with matched requires cross-account filter\" % (\n+ self.manager.ctx.policy.name, self.type))\n \n def process(self, images):\n client = local_session(self.manager.session_factory).client('ec2')\n@@ -171,8 +188,25 @@\n self.process_image(client, i)\n \n def process_image(self, client, image):\n- client.reset_image_attribute(\n- ImageId=image['ImageId'], Attribute=\"launchPermission\")\n+ accounts = self.data.get('accounts')\n+ if not accounts:\n+ return client.reset_image_attribute(\n+ ImageId=image['ImageId'], Attribute=\"launchPermission\")\n+ if accounts == 'matched':\n+ accounts = image.get(AmiCrossAccountFilter.annotation_key)\n+ if not accounts:\n+ return\n+ remove = []\n+ if 'all' in accounts:\n+ remove.append({'Group': 'all'})\n+ accounts.remove('all')\n+ remove.extend([{'UserId': a} for a in accounts])\n+ if not remove:\n+ return\n+ client.modify_image_attribute(\n+ ImageId=image['ImageId'],\n+ LaunchPermission={'Remove': remove},\n+ OperationType='remove')\n \n \n @AMI.action_registry.register('copy')\n@@ -318,6 +352,7 @@\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n \n permissions = ('ec2:DescribeImageAttribute',)\n+ annotation_key = 'c7n:CrossAccountViolations'\n \n def process_resource_set(self, client, accounts, resource_set):\n results = []\n@@ -326,10 +361,11 @@\n client.describe_image_attribute,\n ImageId=r['ImageId'],\n Attribute='launchPermission')['LaunchPermissions']\n+ r['c7n:LaunchPermissions'] = attrs\n image_accounts = {a.get('Group') or a.get('UserId') for a in attrs}\n delta_accounts = image_accounts.difference(accounts)\n if delta_accounts:\n- r['c7n:CrossAccountViolations'] = list(delta_accounts)\n+ r[self.annotation_key] = list(delta_accounts)\n results.append(r)\n return results\n", "issue": "Ability to remove individual account IDs from the AMI launch permissions with aws.ami\n**Is your feature request related to a problem? Please describe.**\r\nI have a use case where I want Cloud Custodian to find AMIs that are shared with accounts not in a whitelist, but in many cases, the AMI might be shared with both authorised and unauthorised accounts. When I use Cloud Custodian to remediate this (i.e. `remove-launch-permissions`) the permissions for _all_ accounts are removed, including the accounts that do have valid access to the AMI.\r\n\r\n**Describe the solution you'd like**\r\nI'd like the remediation available in Cloud Custodian to remove launch permissions to the AMI for accounts that are _not_ defined in the whitelist; all other accounts (i.e. the ones that _are_ in the whitelist) should not have their launch permissions removed.\r\n\r\n**Describe alternatives you've considered**\r\nI haven't tried it yet, but a Lambda function may be useful here to remove individual account IDs from the launch permissions. I'm not sure how much I like the idea of maintaining both a Cloud Custodian policy _and_ a Lambda function just to remove individual account IDs from the launch permissions rather than all of them.\n", "before_files": [{"content": "# Copyright 2015-2019 Capital One Services, LLC\n# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nimport itertools\nimport logging\n\nfrom concurrent.futures import as_completed\nimport jmespath\n\nfrom c7n.actions import BaseAction\nfrom c7n.exceptions import ClientError\nfrom c7n.filters import (\n AgeFilter, Filter, CrossAccountAccessFilter)\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, DescribeSource, TypeInfo\nfrom c7n.resolver import ValuesFrom\nfrom c7n.utils import local_session, type_schema, chunks, merge_dict_list\n\n\nlog = logging.getLogger('custodian.ami')\n\n\nclass DescribeImageSource(DescribeSource):\n\n def get_resources(self, ids, cache=True):\n while ids:\n try:\n return super(DescribeImageSource, self).get_resources(ids, cache)\n except ClientError as e:\n bad_ami_ids = ErrorHandler.extract_bad_ami(e)\n if bad_ami_ids:\n for b in bad_ami_ids:\n ids.remove(b)\n continue\n raise\n return []\n\n\[email protected]('ami')\nclass AMI(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'ec2'\n arn_type = 'image'\n enum_spec = (\n 'describe_images', 'Images', None)\n id = 'ImageId'\n filter_name = 'ImageIds'\n filter_type = 'list'\n name = 'Name'\n date = 'CreationDate'\n\n source_mapping = {\n 'describe': DescribeImageSource\n }\n\n def resources(self, query=None):\n if query is None and 'query' in self.data:\n query = merge_dict_list(self.data['query'])\n elif query is None:\n query = {}\n if query.get('Owners') is None:\n query['Owners'] = ['self']\n return super(AMI, self).resources(query=query)\n\n\nclass ErrorHandler:\n\n @staticmethod\n def extract_bad_ami(e):\n \"\"\"Handle various client side errors when describing images\"\"\"\n msg = e.response['Error']['Message']\n error = e.response['Error']['Code']\n e_ami_ids = None\n if error == 'InvalidAMIID.NotFound':\n e_ami_ids = [\n e_ami_id.strip() for e_ami_id\n in msg[msg.find(\"'[\") + 2:msg.rfind(\"]'\")].split(',')]\n log.warning(\"Image not found %s\" % e_ami_ids)\n elif error == 'InvalidAMIID.Malformed':\n e_ami_ids = [msg[msg.find('\"') + 1:msg.rfind('\"')]]\n log.warning(\"Image id malformed %s\" % e_ami_ids)\n return e_ami_ids\n\n\[email protected]_registry.register('deregister')\nclass Deregister(BaseAction):\n \"\"\"Action to deregister AMI\n\n To prevent deregistering all AMI, it is advised to use in conjunction with\n a filter (such as image-age)\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-deregister-old\n resource: ami\n filters:\n - type: image-age\n days: 90\n actions:\n - deregister\n \"\"\"\n\n schema = type_schema('deregister', **{'delete-snapshots': {'type': 'boolean'}})\n permissions = ('ec2:DeregisterImage',)\n snap_expr = jmespath.compile('BlockDeviceMappings[].Ebs.SnapshotId')\n\n def process(self, images):\n client = local_session(self.manager.session_factory).client('ec2')\n image_count = len(images)\n images = [i for i in images if self.manager.ctx.options.account_id == i['OwnerId']]\n if len(images) != image_count:\n self.log.info(\"Implicitly filtered %d non owned images\", image_count - len(images))\n\n for i in images:\n self.manager.retry(client.deregister_image, ImageId=i['ImageId'])\n\n if not self.data.get('delete-snapshots'):\n continue\n snap_ids = self.snap_expr.search(i) or ()\n for s in snap_ids:\n try:\n self.manager.retry(client.delete_snapshot, SnapshotId=s)\n except ClientError as e:\n if e.error['Code'] == 'InvalidSnapshot.InUse':\n continue\n\n\[email protected]_registry.register('remove-launch-permissions')\nclass RemoveLaunchPermissions(BaseAction):\n \"\"\"Action to remove the ability to launch an instance from an AMI\n\n This action will remove any launch permissions granted to other\n AWS accounts from the image, leaving only the owner capable of\n launching it\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-stop-share-old\n resource: ami\n filters:\n - type: image-age\n days: 60\n actions:\n - remove-launch-permissions\n\n \"\"\"\n\n schema = type_schema('remove-launch-permissions')\n permissions = ('ec2:ResetImageAttribute',)\n\n def process(self, images):\n client = local_session(self.manager.session_factory).client('ec2')\n for i in images:\n self.process_image(client, i)\n\n def process_image(self, client, image):\n client.reset_image_attribute(\n ImageId=image['ImageId'], Attribute=\"launchPermission\")\n\n\[email protected]_registry.register('copy')\nclass Copy(BaseAction):\n \"\"\"Action to copy AMIs with optional encryption\n\n This action can copy AMIs while optionally encrypting or decrypting\n the target AMI. It is advised to use in conjunction with a filter.\n\n Note there is a max in flight of 5 per account/region.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-ensure-encrypted\n resource: ami\n filters:\n - type: value\n key: encrypted\n value: true\n actions:\n - type: copy\n encrypt: true\n key-id: 00000000-0000-0000-0000-000000000000\n \"\"\"\n\n permissions = ('ec2:CopyImage',)\n schema = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'type': {'enum': ['copy']},\n 'name': {'type': 'string'},\n 'description': {'type': 'string'},\n 'region': {'type': 'string'},\n 'encrypt': {'type': 'boolean'},\n 'key-id': {'type': 'string'}\n }\n }\n\n def process(self, images):\n session = local_session(self.manager.session_factory)\n client = session.client(\n 'ec2',\n region_name=self.data.get('region', None))\n\n for image in images:\n client.copy_image(\n Name=self.data.get('name', image['Name']),\n Description=self.data.get('description', image['Description']),\n SourceRegion=session.region_name,\n SourceImageId=image['ImageId'],\n Encrypted=self.data.get('encrypt', False),\n KmsKeyId=self.data.get('key-id', ''))\n\n\[email protected]_registry.register('image-age')\nclass ImageAgeFilter(AgeFilter):\n \"\"\"Filters images based on the age (in days)\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-remove-launch-permissions\n resource: ami\n filters:\n - type: image-age\n days: 30\n \"\"\"\n\n date_attribute = \"CreationDate\"\n schema = type_schema(\n 'image-age',\n op={'$ref': '#/definitions/filters_common/comparison_operators'},\n days={'type': 'number', 'minimum': 0})\n\n\[email protected]_registry.register('unused')\nclass ImageUnusedFilter(Filter):\n \"\"\"Filters images based on usage\n\n true: image has no instances spawned from it\n false: image has instances spawned from it\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-unused\n resource: ami\n filters:\n - type: unused\n value: true\n \"\"\"\n\n schema = type_schema('unused', value={'type': 'boolean'})\n\n def get_permissions(self):\n return list(itertools.chain(*[\n self.manager.get_resource_manager(m).get_permissions()\n for m in ('asg', 'launch-config', 'ec2')]))\n\n def _pull_asg_images(self):\n asgs = self.manager.get_resource_manager('asg').resources()\n image_ids = set()\n lcfgs = set(a['LaunchConfigurationName'] for a in asgs if 'LaunchConfigurationName' in a)\n lcfg_mgr = self.manager.get_resource_manager('launch-config')\n\n if lcfgs:\n image_ids.update([\n lcfg['ImageId'] for lcfg in lcfg_mgr.resources()\n if lcfg['LaunchConfigurationName'] in lcfgs])\n\n tmpl_mgr = self.manager.get_resource_manager('launch-template-version')\n for tversion in tmpl_mgr.get_resources(\n list(tmpl_mgr.get_asg_templates(asgs).keys())):\n image_ids.add(tversion['LaunchTemplateData'].get('ImageId'))\n return image_ids\n\n def _pull_ec2_images(self):\n ec2_manager = self.manager.get_resource_manager('ec2')\n return {i['ImageId'] for i in ec2_manager.resources()}\n\n def process(self, resources, event=None):\n images = self._pull_ec2_images().union(self._pull_asg_images())\n if self.data.get('value', True):\n return [r for r in resources if r['ImageId'] not in images]\n return [r for r in resources if r['ImageId'] in images]\n\n\[email protected]_registry.register('cross-account')\nclass AmiCrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n permissions = ('ec2:DescribeImageAttribute',)\n\n def process_resource_set(self, client, accounts, resource_set):\n results = []\n for r in resource_set:\n attrs = self.manager.retry(\n client.describe_image_attribute,\n ImageId=r['ImageId'],\n Attribute='launchPermission')['LaunchPermissions']\n image_accounts = {a.get('Group') or a.get('UserId') for a in attrs}\n delta_accounts = image_accounts.difference(accounts)\n if delta_accounts:\n r['c7n:CrossAccountViolations'] = list(delta_accounts)\n results.append(r)\n return results\n\n def process(self, resources, event=None):\n results = []\n client = local_session(self.manager.session_factory).client('ec2')\n accounts = self.get_accounts()\n\n with self.executor_factory(max_workers=2) as w:\n futures = []\n for resource_set in chunks(resources, 20):\n futures.append(\n w.submit(\n self.process_resource_set, client, accounts, resource_set))\n for f in as_completed(futures):\n if f.exception():\n self.log.error(\n \"Exception checking cross account access \\n %s\" % (\n f.exception()))\n continue\n results.extend(f.result())\n return results\n", "path": "c7n/resources/ami.py"}], "after_files": [{"content": "# Copyright 2015-2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport itertools\nimport logging\n\nfrom concurrent.futures import as_completed\nimport jmespath\n\nfrom c7n.actions import BaseAction\nfrom c7n.exceptions import ClientError, PolicyValidationError\nfrom c7n.filters import (\n AgeFilter, Filter, CrossAccountAccessFilter)\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, DescribeSource, TypeInfo\nfrom c7n.resolver import ValuesFrom\nfrom c7n.utils import local_session, type_schema, chunks, merge_dict_list\n\n\nlog = logging.getLogger('custodian.ami')\n\n\nclass DescribeImageSource(DescribeSource):\n\n def get_resources(self, ids, cache=True):\n while ids:\n try:\n return super(DescribeImageSource, self).get_resources(ids, cache)\n except ClientError as e:\n bad_ami_ids = ErrorHandler.extract_bad_ami(e)\n if bad_ami_ids:\n for b in bad_ami_ids:\n ids.remove(b)\n continue\n raise\n return []\n\n\[email protected]('ami')\nclass AMI(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'ec2'\n arn_type = 'image'\n enum_spec = (\n 'describe_images', 'Images', None)\n id = 'ImageId'\n filter_name = 'ImageIds'\n filter_type = 'list'\n name = 'Name'\n date = 'CreationDate'\n\n source_mapping = {\n 'describe': DescribeImageSource\n }\n\n def resources(self, query=None):\n if query is None and 'query' in self.data:\n query = merge_dict_list(self.data['query'])\n elif query is None:\n query = {}\n if query.get('Owners') is None:\n query['Owners'] = ['self']\n return super(AMI, self).resources(query=query)\n\n\nclass ErrorHandler:\n\n @staticmethod\n def extract_bad_ami(e):\n \"\"\"Handle various client side errors when describing images\"\"\"\n msg = e.response['Error']['Message']\n error = e.response['Error']['Code']\n e_ami_ids = None\n if error == 'InvalidAMIID.NotFound':\n e_ami_ids = [\n e_ami_id.strip() for e_ami_id\n in msg[msg.find(\"'[\") + 2:msg.rfind(\"]'\")].split(',')]\n log.warning(\"Image not found %s\" % e_ami_ids)\n elif error == 'InvalidAMIID.Malformed':\n e_ami_ids = [msg[msg.find('\"') + 1:msg.rfind('\"')]]\n log.warning(\"Image id malformed %s\" % e_ami_ids)\n return e_ami_ids\n\n\[email protected]_registry.register('deregister')\nclass Deregister(BaseAction):\n \"\"\"Action to deregister AMI\n\n To prevent deregistering all AMI, it is advised to use in conjunction with\n a filter (such as image-age)\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-deregister-old\n resource: ami\n filters:\n - type: image-age\n days: 90\n actions:\n - deregister\n \"\"\"\n\n schema = type_schema('deregister', **{'delete-snapshots': {'type': 'boolean'}})\n permissions = ('ec2:DeregisterImage',)\n snap_expr = jmespath.compile('BlockDeviceMappings[].Ebs.SnapshotId')\n\n def process(self, images):\n client = local_session(self.manager.session_factory).client('ec2')\n image_count = len(images)\n images = [i for i in images if self.manager.ctx.options.account_id == i['OwnerId']]\n if len(images) != image_count:\n self.log.info(\"Implicitly filtered %d non owned images\", image_count - len(images))\n\n for i in images:\n self.manager.retry(client.deregister_image, ImageId=i['ImageId'])\n\n if not self.data.get('delete-snapshots'):\n continue\n snap_ids = self.snap_expr.search(i) or ()\n for s in snap_ids:\n try:\n self.manager.retry(client.delete_snapshot, SnapshotId=s)\n except ClientError as e:\n if e.error['Code'] == 'InvalidSnapshot.InUse':\n continue\n\n\[email protected]_registry.register('remove-launch-permissions')\nclass RemoveLaunchPermissions(BaseAction):\n \"\"\"Action to remove the ability to launch an instance from an AMI\n\n This action will remove any launch permissions granted to other\n AWS accounts from the image, leaving only the owner capable of\n launching it\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-stop-share-old\n resource: ami\n filters:\n - type: image-age\n days: 60\n actions:\n - remove-launch-permissions\n\n \"\"\"\n\n schema = type_schema(\n 'remove-launch-permissions',\n accounts={'oneOf': [\n {'enum': ['matched']},\n {'type': 'string', 'minLength': 12, 'maxLength': 12}]})\n\n permissions = ('ec2:ResetImageAttribute', 'ec2:ModifyImageAttribute',)\n\n def validate(self):\n if 'accounts' in self.data and self.data['accounts'] == 'matched':\n found = False\n for f in self.manager.iter_filters():\n if isinstance(f, AmiCrossAccountFilter):\n found = True\n break\n if not found:\n raise PolicyValidationError(\n \"policy:%s filter:%s with matched requires cross-account filter\" % (\n self.manager.ctx.policy.name, self.type))\n\n def process(self, images):\n client = local_session(self.manager.session_factory).client('ec2')\n for i in images:\n self.process_image(client, i)\n\n def process_image(self, client, image):\n accounts = self.data.get('accounts')\n if not accounts:\n return client.reset_image_attribute(\n ImageId=image['ImageId'], Attribute=\"launchPermission\")\n if accounts == 'matched':\n accounts = image.get(AmiCrossAccountFilter.annotation_key)\n if not accounts:\n return\n remove = []\n if 'all' in accounts:\n remove.append({'Group': 'all'})\n accounts.remove('all')\n remove.extend([{'UserId': a} for a in accounts])\n if not remove:\n return\n client.modify_image_attribute(\n ImageId=image['ImageId'],\n LaunchPermission={'Remove': remove},\n OperationType='remove')\n\n\[email protected]_registry.register('copy')\nclass Copy(BaseAction):\n \"\"\"Action to copy AMIs with optional encryption\n\n This action can copy AMIs while optionally encrypting or decrypting\n the target AMI. It is advised to use in conjunction with a filter.\n\n Note there is a max in flight of 5 per account/region.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-ensure-encrypted\n resource: ami\n filters:\n - type: value\n key: encrypted\n value: true\n actions:\n - type: copy\n encrypt: true\n key-id: 00000000-0000-0000-0000-000000000000\n \"\"\"\n\n permissions = ('ec2:CopyImage',)\n schema = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'type': {'enum': ['copy']},\n 'name': {'type': 'string'},\n 'description': {'type': 'string'},\n 'region': {'type': 'string'},\n 'encrypt': {'type': 'boolean'},\n 'key-id': {'type': 'string'}\n }\n }\n\n def process(self, images):\n session = local_session(self.manager.session_factory)\n client = session.client(\n 'ec2',\n region_name=self.data.get('region', None))\n\n for image in images:\n client.copy_image(\n Name=self.data.get('name', image['Name']),\n Description=self.data.get('description', image['Description']),\n SourceRegion=session.region_name,\n SourceImageId=image['ImageId'],\n Encrypted=self.data.get('encrypt', False),\n KmsKeyId=self.data.get('key-id', ''))\n\n\[email protected]_registry.register('image-age')\nclass ImageAgeFilter(AgeFilter):\n \"\"\"Filters images based on the age (in days)\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-remove-launch-permissions\n resource: ami\n filters:\n - type: image-age\n days: 30\n \"\"\"\n\n date_attribute = \"CreationDate\"\n schema = type_schema(\n 'image-age',\n op={'$ref': '#/definitions/filters_common/comparison_operators'},\n days={'type': 'number', 'minimum': 0})\n\n\[email protected]_registry.register('unused')\nclass ImageUnusedFilter(Filter):\n \"\"\"Filters images based on usage\n\n true: image has no instances spawned from it\n false: image has instances spawned from it\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: ami-unused\n resource: ami\n filters:\n - type: unused\n value: true\n \"\"\"\n\n schema = type_schema('unused', value={'type': 'boolean'})\n\n def get_permissions(self):\n return list(itertools.chain(*[\n self.manager.get_resource_manager(m).get_permissions()\n for m in ('asg', 'launch-config', 'ec2')]))\n\n def _pull_asg_images(self):\n asgs = self.manager.get_resource_manager('asg').resources()\n image_ids = set()\n lcfgs = set(a['LaunchConfigurationName'] for a in asgs if 'LaunchConfigurationName' in a)\n lcfg_mgr = self.manager.get_resource_manager('launch-config')\n\n if lcfgs:\n image_ids.update([\n lcfg['ImageId'] for lcfg in lcfg_mgr.resources()\n if lcfg['LaunchConfigurationName'] in lcfgs])\n\n tmpl_mgr = self.manager.get_resource_manager('launch-template-version')\n for tversion in tmpl_mgr.get_resources(\n list(tmpl_mgr.get_asg_templates(asgs).keys())):\n image_ids.add(tversion['LaunchTemplateData'].get('ImageId'))\n return image_ids\n\n def _pull_ec2_images(self):\n ec2_manager = self.manager.get_resource_manager('ec2')\n return {i['ImageId'] for i in ec2_manager.resources()}\n\n def process(self, resources, event=None):\n images = self._pull_ec2_images().union(self._pull_asg_images())\n if self.data.get('value', True):\n return [r for r in resources if r['ImageId'] not in images]\n return [r for r in resources if r['ImageId'] in images]\n\n\[email protected]_registry.register('cross-account')\nclass AmiCrossAccountFilter(CrossAccountAccessFilter):\n\n schema = type_schema(\n 'cross-account',\n # white list accounts\n whitelist_from=ValuesFrom.schema,\n whitelist={'type': 'array', 'items': {'type': 'string'}})\n\n permissions = ('ec2:DescribeImageAttribute',)\n annotation_key = 'c7n:CrossAccountViolations'\n\n def process_resource_set(self, client, accounts, resource_set):\n results = []\n for r in resource_set:\n attrs = self.manager.retry(\n client.describe_image_attribute,\n ImageId=r['ImageId'],\n Attribute='launchPermission')['LaunchPermissions']\n r['c7n:LaunchPermissions'] = attrs\n image_accounts = {a.get('Group') or a.get('UserId') for a in attrs}\n delta_accounts = image_accounts.difference(accounts)\n if delta_accounts:\n r[self.annotation_key] = list(delta_accounts)\n results.append(r)\n return results\n\n def process(self, resources, event=None):\n results = []\n client = local_session(self.manager.session_factory).client('ec2')\n accounts = self.get_accounts()\n\n with self.executor_factory(max_workers=2) as w:\n futures = []\n for resource_set in chunks(resources, 20):\n futures.append(\n w.submit(\n self.process_resource_set, client, accounts, resource_set))\n for f in as_completed(futures):\n if f.exception():\n self.log.error(\n \"Exception checking cross account access \\n %s\" % (\n f.exception()))\n continue\n results.extend(f.result())\n return results\n", "path": "c7n/resources/ami.py"}]} | 3,926 | 789 |
gh_patches_debug_60593 | rasdani/github-patches | git_diff | pytorch__TensorRT-196 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🐛 [Bug] UnicodeDecodeError running setup.py
## Bug Description
Trying to run "python setup.py install" fails with a unicode error when reading README.md.
## To Reproduce
Steps to reproduce the behavior:
1. docker run --gpus=all -it nvcr.io/nvidia/tensorrt:20.03-py3 /bin/bash
2. (cd /usr/bin && wget -O bazel https://github.com/bazelbuild/bazelisk/releases/download/v1.7.3/bazelisk-linux-amd64 && chmod +x bazel)
3. git clone https://github.com/NVIDIA/TRTorch.git
4. cd TRTorch/py
5. pip install -r requirements.txt
6. python setup.py install
The error follows:
> root@320583666d0c:/workspace/TRTorch/py# python setup.py install
> Traceback (most recent call last):
> File "setup.py", line 194, in <module>
> long_description = fh.read()
> File "/usr/lib/python3.6/encodings/ascii.py", line 26, in decode
> return codecs.ascii_decode(input, self.errors)[0]
> UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 7349: ordinal not in range(128)
## Expected behavior
No unicode error
## Environment
- PyTorch Version (e.g., 1.0): 1.6.0
- CPU Architecture: x86
- OS (e.g., Linux): Linux
- How you installed PyTorch (`conda`, `pip`, `libtorch`, source): pip
- Build command you used (if compiling from source): python setup.py install
- Are you using local sources or building from archives: local sources (git clone)
- Python version: 3.6.9
- CUDA version: 10.2.89
- GPU models and configuration: gtx 970
## Additional context
The following appears to resolve the issue:
```
diff --git a/py/setup.py b/py/setup.py
index 53f85da..8344c0a 100644
--- a/py/setup.py
+++ b/py/setup.py
@@ -190,7 +190,7 @@ ext_modules = [
)
]
-with open("README.md", "r") as fh:
+with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py/setup.py`
Content:
```
1 import os
2 import sys
3 import glob
4 import setuptools
5 from setuptools import setup, Extension, find_packages
6 from setuptools.command.build_ext import build_ext
7 from setuptools.command.develop import develop
8 from setuptools.command.install import install
9 from distutils.cmd import Command
10 from wheel.bdist_wheel import bdist_wheel
11
12 from torch.utils import cpp_extension
13 from shutil import copyfile, rmtree
14
15 import subprocess
16
17 dir_path = os.path.dirname(os.path.realpath(__file__))
18
19 __version__ = '0.1.0a0'
20
21 CXX11_ABI = False
22
23 if "--use-cxx11-abi" in sys.argv:
24 sys.argv.remove("--use-cxx11-abi")
25 CXX11_ABI = True
26
27 def which(program):
28 import os
29 def is_exe(fpath):
30 return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
31
32 fpath, fname = os.path.split(program)
33 if fpath:
34 if is_exe(program):
35 return program
36 else:
37 for path in os.environ["PATH"].split(os.pathsep):
38 exe_file = os.path.join(path, program)
39 if is_exe(exe_file):
40 return exe_file
41
42 return None
43
44 BAZEL_EXE = which("bazel")
45
46 def build_libtrtorch_pre_cxx11_abi(develop=True, use_dist_dir=True, cxx11_abi=False):
47 cmd = [BAZEL_EXE, "build"]
48 cmd.append("//cpp/api/lib:libtrtorch.so")
49 if develop:
50 cmd.append("--compilation_mode=dbg")
51 else:
52 cmd.append("--compilation_mode=opt")
53 if use_dist_dir:
54 cmd.append("--distdir=third_party/dist_dir/x86_64-linux-gnu")
55 if not cxx11_abi:
56 cmd.append("--config=python")
57 else:
58 print("using CXX11 ABI build")
59
60 print("building libtrtorch")
61 status_code = subprocess.run(cmd).returncode
62
63 if status_code != 0:
64 sys.exit(status_code)
65
66
67 def gen_version_file():
68 if not os.path.exists(dir_path + '/trtorch/_version.py'):
69 os.mknod(dir_path + '/trtorch/_version.py')
70
71 with open(dir_path + '/trtorch/_version.py', 'w') as f:
72 print("creating version file")
73 f.write("__version__ = \"" + __version__ + '\"')
74
75 def copy_libtrtorch(multilinux=False):
76 if not os.path.exists(dir_path + '/trtorch/lib'):
77 os.makedirs(dir_path + '/trtorch/lib')
78
79 print("copying library into module")
80 if multilinux:
81 copyfile(dir_path + "/build/libtrtorch_build/libtrtorch.so", dir_path + '/trtorch/lib/libtrtorch.so')
82 else:
83 copyfile(dir_path + "/../bazel-bin/cpp/api/lib/libtrtorch.so", dir_path + '/trtorch/lib/libtrtorch.so')
84
85 class DevelopCommand(develop):
86 description = "Builds the package and symlinks it into the PYTHONPATH"
87
88 def initialize_options(self):
89 develop.initialize_options(self)
90
91 def finalize_options(self):
92 develop.finalize_options(self)
93
94 def run(self):
95 global CXX11_ABI
96 build_libtrtorch_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)
97 gen_version_file()
98 copy_libtrtorch()
99 develop.run(self)
100
101
102 class InstallCommand(install):
103 description = "Builds the package"
104
105 def initialize_options(self):
106 install.initialize_options(self)
107
108 def finalize_options(self):
109 install.finalize_options(self)
110
111 def run(self):
112 global CXX11_ABI
113 build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)
114 gen_version_file()
115 copy_libtrtorch()
116 install.run(self)
117
118 class BdistCommand(bdist_wheel):
119 description = "Builds the package"
120
121 def initialize_options(self):
122 bdist_wheel.initialize_options(self)
123
124 def finalize_options(self):
125 bdist_wheel.finalize_options(self)
126
127 def run(self):
128 global CXX11_ABI
129 build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)
130 gen_version_file()
131 copy_libtrtorch()
132 bdist_wheel.run(self)
133
134 class CleanCommand(Command):
135 """Custom clean command to tidy up the project root."""
136 PY_CLEAN_FILES = ['./build', './dist', './trtorch/__pycache__', './trtorch/lib', './*.pyc', './*.tgz', './*.egg-info']
137 description = "Command to tidy up the project root"
138 user_options = []
139
140 def initialize_options(self):
141 pass
142
143 def finalize_options(self):
144 pass
145
146 def run(self):
147 for path_spec in self.PY_CLEAN_FILES:
148 # Make paths absolute and relative to this path
149 abs_paths = glob.glob(os.path.normpath(os.path.join(dir_path, path_spec)))
150 for path in [str(p) for p in abs_paths]:
151 if not path.startswith(dir_path):
152 # Die if path in CLEAN_FILES is absolute + outside this directory
153 raise ValueError("%s is not a path inside %s" % (path, dir_path))
154 print('Removing %s' % os.path.relpath(path))
155 rmtree(path)
156
157 ext_modules = [
158 cpp_extension.CUDAExtension('trtorch._C',
159 [
160 'trtorch/csrc/trtorch_py.cpp',
161 'trtorch/csrc/tensorrt_backend.cpp',
162 'trtorch/csrc/tensorrt_classes.cpp',
163 'trtorch/csrc/register_tensorrt_classes.cpp',
164 ],
165 library_dirs=[
166 (dir_path + '/trtorch/lib/'),
167 "/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu"
168 ],
169 libraries=[
170 "trtorch"
171 ],
172 include_dirs=[
173 dir_path + "trtorch/csrc",
174 dir_path + "/../",
175 dir_path + "/../bazel-TRTorch/external/tensorrt/include",
176 ],
177 extra_compile_args=[
178 "-Wno-deprecated",
179 "-Wno-deprecated-declarations",
180 ] + (["-D_GLIBCXX_USE_CXX11_ABI=1"] if CXX11_ABI else ["-D_GLIBCXX_USE_CXX11_ABI=0"]),
181 extra_link_args=[
182 "-Wno-deprecated",
183 "-Wno-deprecated-declarations",
184 "-Wl,--no-as-needed",
185 "-ltrtorch",
186 "-Wl,-rpath,$ORIGIN/lib",
187 "-lpthread",
188 "-ldl",
189 "-lutil",
190 "-lrt",
191 "-lm",
192 "-Xlinker",
193 "-export-dynamic"
194 ] + (["-D_GLIBCXX_USE_CXX11_ABI=1"] if CXX11_ABI else ["-D_GLIBCXX_USE_CXX11_ABI=0"]),
195 undef_macros=[ "NDEBUG" ]
196 )
197 ]
198
199 with open("README.md", "r") as fh:
200 long_description = fh.read()
201
202 setup(
203 name='trtorch',
204 version=__version__,
205 author='NVIDIA',
206 author_email='[email protected]',
207 url='https://nvidia.github.io/TRTorch',
208 description='A compiler backend for PyTorch JIT targeting NVIDIA GPUs',
209 long_description_content_type='text/markdown',
210 long_description=long_description,
211 ext_modules=ext_modules,
212 install_requires=[
213 'torch==1.6.0',
214 ],
215 setup_requires=[],
216 cmdclass={
217 'install': InstallCommand,
218 'clean': CleanCommand,
219 'develop': DevelopCommand,
220 'build_ext': cpp_extension.BuildExtension,
221 'bdist_wheel': BdistCommand,
222 },
223 zip_safe=False,
224 license="BSD",
225 packages=find_packages(),
226 classifiers=[
227 "Development Status :: 4 - Beta",
228 "Environment :: GPU :: NVIDIA CUDA",
229 "License :: OSI Approved :: BSD License",
230 "Intended Audience :: Developers",
231 "Intended Audience :: Science/Research",
232 "Operating System :: POSIX :: Linux",
233 "Programming Language :: C++",
234 "Programming Language :: Python",
235 "Programming Language :: Python :: Implementation :: CPython",
236 "Topic :: Scientific/Engineering",
237 "Topic :: Scientific/Engineering :: Artificial Intelligence",
238 "Topic :: Software Development",
239 "Topic :: Software Development :: Libraries"
240 ],
241 python_requires='>=3.6',
242 include_package_data=True,
243 package_data={
244 'trtorch': ['lib/*.so'],
245 },
246 exclude_package_data={
247 '': ['*.cpp', '*.h'],
248 'trtorch': ['csrc/*.cpp'],
249 }
250 )
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py/setup.py b/py/setup.py
--- a/py/setup.py
+++ b/py/setup.py
@@ -190,7 +190,7 @@
)
]
-with open("README.md", "r") as fh:
+with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
| {"golden_diff": "diff --git a/py/setup.py b/py/setup.py\n--- a/py/setup.py\n+++ b/py/setup.py\n@@ -190,7 +190,7 @@\n )\n ]\n \n-with open(\"README.md\", \"r\") as fh:\n+with open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n \n setup(\n", "issue": "\ud83d\udc1b [Bug] UnicodeDecodeError running setup.py\n## Bug Description\r\n\r\nTrying to run \"python setup.py install\" fails with a unicode error when reading README.md.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. docker run --gpus=all -it nvcr.io/nvidia/tensorrt:20.03-py3 /bin/bash\r\n2. (cd /usr/bin && wget -O bazel https://github.com/bazelbuild/bazelisk/releases/download/v1.7.3/bazelisk-linux-amd64 && chmod +x bazel)\r\n3. git clone https://github.com/NVIDIA/TRTorch.git\r\n4. cd TRTorch/py\r\n5. pip install -r requirements.txt\r\n6. python setup.py install\r\n\r\nThe error follows:\r\n> root@320583666d0c:/workspace/TRTorch/py# python setup.py install \r\n> Traceback (most recent call last):\r\n> File \"setup.py\", line 194, in <module>\r\n> long_description = fh.read()\r\n> File \"/usr/lib/python3.6/encodings/ascii.py\", line 26, in decode\r\n> return codecs.ascii_decode(input, self.errors)[0]\r\n> UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 7349: ordinal not in range(128)\r\n\r\n## Expected behavior\r\n\r\nNo unicode error\r\n\r\n## Environment\r\n\r\n - PyTorch Version (e.g., 1.0): 1.6.0\r\n - CPU Architecture: x86\r\n - OS (e.g., Linux): Linux\r\n - How you installed PyTorch (`conda`, `pip`, `libtorch`, source): pip\r\n - Build command you used (if compiling from source): python setup.py install\r\n - Are you using local sources or building from archives: local sources (git clone)\r\n - Python version: 3.6.9\r\n - CUDA version: 10.2.89\r\n - GPU models and configuration: gtx 970\r\n\r\n## Additional context\r\n\r\nThe following appears to resolve the issue:\r\n\r\n```\r\ndiff --git a/py/setup.py b/py/setup.py\r\nindex 53f85da..8344c0a 100644\r\n--- a/py/setup.py\r\n+++ b/py/setup.py\r\n@@ -190,7 +190,7 @@ ext_modules = [\r\n )\r\n ]\r\n \r\n-with open(\"README.md\", \"r\") as fh:\r\n+with open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\r\n long_description = fh.read()\r\n \r\n setup(\r\n```\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport glob\nimport setuptools\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\nfrom distutils.cmd import Command\nfrom wheel.bdist_wheel import bdist_wheel\n\nfrom torch.utils import cpp_extension\nfrom shutil import copyfile, rmtree\n\nimport subprocess\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n__version__ = '0.1.0a0'\n\nCXX11_ABI = False\n\nif \"--use-cxx11-abi\" in sys.argv:\n sys.argv.remove(\"--use-cxx11-abi\")\n CXX11_ABI = True\n\ndef which(program):\n import os\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\nBAZEL_EXE = which(\"bazel\")\n\ndef build_libtrtorch_pre_cxx11_abi(develop=True, use_dist_dir=True, cxx11_abi=False):\n cmd = [BAZEL_EXE, \"build\"]\n cmd.append(\"//cpp/api/lib:libtrtorch.so\")\n if develop:\n cmd.append(\"--compilation_mode=dbg\")\n else:\n cmd.append(\"--compilation_mode=opt\")\n if use_dist_dir:\n cmd.append(\"--distdir=third_party/dist_dir/x86_64-linux-gnu\")\n if not cxx11_abi:\n cmd.append(\"--config=python\")\n else:\n print(\"using CXX11 ABI build\")\n\n print(\"building libtrtorch\")\n status_code = subprocess.run(cmd).returncode\n\n if status_code != 0:\n sys.exit(status_code)\n\n\ndef gen_version_file():\n if not os.path.exists(dir_path + '/trtorch/_version.py'):\n os.mknod(dir_path + '/trtorch/_version.py')\n\n with open(dir_path + '/trtorch/_version.py', 'w') as f:\n print(\"creating version file\")\n f.write(\"__version__ = \\\"\" + __version__ + '\\\"')\n\ndef copy_libtrtorch(multilinux=False):\n if not os.path.exists(dir_path + '/trtorch/lib'):\n os.makedirs(dir_path + '/trtorch/lib')\n\n print(\"copying library into module\")\n if multilinux:\n copyfile(dir_path + \"/build/libtrtorch_build/libtrtorch.so\", dir_path + '/trtorch/lib/libtrtorch.so')\n else:\n copyfile(dir_path + \"/../bazel-bin/cpp/api/lib/libtrtorch.so\", dir_path + '/trtorch/lib/libtrtorch.so')\n\nclass DevelopCommand(develop):\n description = \"Builds the package and symlinks it into the PYTHONPATH\"\n\n def initialize_options(self):\n develop.initialize_options(self)\n\n def finalize_options(self):\n develop.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n develop.run(self)\n\n\nclass InstallCommand(install):\n description = \"Builds the package\"\n\n def initialize_options(self):\n install.initialize_options(self)\n\n def finalize_options(self):\n install.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n install.run(self)\n\nclass BdistCommand(bdist_wheel):\n description = \"Builds the package\"\n\n def initialize_options(self):\n bdist_wheel.initialize_options(self)\n\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n bdist_wheel.run(self)\n\nclass CleanCommand(Command):\n \"\"\"Custom clean command to tidy up the project root.\"\"\"\n PY_CLEAN_FILES = ['./build', './dist', './trtorch/__pycache__', './trtorch/lib', './*.pyc', './*.tgz', './*.egg-info']\n description = \"Command to tidy up the project root\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for path_spec in self.PY_CLEAN_FILES:\n # Make paths absolute and relative to this path\n abs_paths = glob.glob(os.path.normpath(os.path.join(dir_path, path_spec)))\n for path in [str(p) for p in abs_paths]:\n if not path.startswith(dir_path):\n # Die if path in CLEAN_FILES is absolute + outside this directory\n raise ValueError(\"%s is not a path inside %s\" % (path, dir_path))\n print('Removing %s' % os.path.relpath(path))\n rmtree(path)\n\next_modules = [\n cpp_extension.CUDAExtension('trtorch._C',\n [\n 'trtorch/csrc/trtorch_py.cpp',\n 'trtorch/csrc/tensorrt_backend.cpp',\n 'trtorch/csrc/tensorrt_classes.cpp',\n 'trtorch/csrc/register_tensorrt_classes.cpp',\n ],\n library_dirs=[\n (dir_path + '/trtorch/lib/'),\n \"/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu\"\n ],\n libraries=[\n \"trtorch\"\n ],\n include_dirs=[\n dir_path + \"trtorch/csrc\",\n dir_path + \"/../\",\n dir_path + \"/../bazel-TRTorch/external/tensorrt/include\",\n ],\n extra_compile_args=[\n \"-Wno-deprecated\",\n \"-Wno-deprecated-declarations\",\n ] + ([\"-D_GLIBCXX_USE_CXX11_ABI=1\"] if CXX11_ABI else [\"-D_GLIBCXX_USE_CXX11_ABI=0\"]),\n extra_link_args=[\n \"-Wno-deprecated\",\n \"-Wno-deprecated-declarations\",\n \"-Wl,--no-as-needed\",\n \"-ltrtorch\",\n \"-Wl,-rpath,$ORIGIN/lib\",\n \"-lpthread\",\n \"-ldl\",\n \"-lutil\",\n \"-lrt\",\n \"-lm\",\n \"-Xlinker\",\n \"-export-dynamic\"\n ] + ([\"-D_GLIBCXX_USE_CXX11_ABI=1\"] if CXX11_ABI else [\"-D_GLIBCXX_USE_CXX11_ABI=0\"]),\n undef_macros=[ \"NDEBUG\" ]\n )\n]\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='trtorch',\n version=__version__,\n author='NVIDIA',\n author_email='[email protected]',\n url='https://nvidia.github.io/TRTorch',\n description='A compiler backend for PyTorch JIT targeting NVIDIA GPUs',\n long_description_content_type='text/markdown',\n long_description=long_description,\n ext_modules=ext_modules,\n install_requires=[\n 'torch==1.6.0',\n ],\n setup_requires=[],\n cmdclass={\n 'install': InstallCommand,\n 'clean': CleanCommand,\n 'develop': DevelopCommand,\n 'build_ext': cpp_extension.BuildExtension,\n 'bdist_wheel': BdistCommand,\n },\n zip_safe=False,\n license=\"BSD\",\n packages=find_packages(),\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: GPU :: NVIDIA CUDA\",\n \"License :: OSI Approved :: BSD License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\"\n ],\n python_requires='>=3.6',\n include_package_data=True,\n package_data={\n 'trtorch': ['lib/*.so'],\n },\n exclude_package_data={\n '': ['*.cpp', '*.h'],\n 'trtorch': ['csrc/*.cpp'],\n }\n)\n", "path": "py/setup.py"}], "after_files": [{"content": "import os\nimport sys\nimport glob\nimport setuptools\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\nfrom distutils.cmd import Command\nfrom wheel.bdist_wheel import bdist_wheel\n\nfrom torch.utils import cpp_extension\nfrom shutil import copyfile, rmtree\n\nimport subprocess\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n__version__ = '0.1.0a0'\n\nCXX11_ABI = False\n\nif \"--use-cxx11-abi\" in sys.argv:\n sys.argv.remove(\"--use-cxx11-abi\")\n CXX11_ABI = True\n\ndef which(program):\n import os\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\nBAZEL_EXE = which(\"bazel\")\n\ndef build_libtrtorch_pre_cxx11_abi(develop=True, use_dist_dir=True, cxx11_abi=False):\n cmd = [BAZEL_EXE, \"build\"]\n cmd.append(\"//cpp/api/lib:libtrtorch.so\")\n if develop:\n cmd.append(\"--compilation_mode=dbg\")\n else:\n cmd.append(\"--compilation_mode=opt\")\n if use_dist_dir:\n cmd.append(\"--distdir=third_party/dist_dir/x86_64-linux-gnu\")\n if not cxx11_abi:\n cmd.append(\"--config=python\")\n else:\n print(\"using CXX11 ABI build\")\n\n print(\"building libtrtorch\")\n status_code = subprocess.run(cmd).returncode\n\n if status_code != 0:\n sys.exit(status_code)\n\n\ndef gen_version_file():\n if not os.path.exists(dir_path + '/trtorch/_version.py'):\n os.mknod(dir_path + '/trtorch/_version.py')\n\n with open(dir_path + '/trtorch/_version.py', 'w') as f:\n print(\"creating version file\")\n f.write(\"__version__ = \\\"\" + __version__ + '\\\"')\n\ndef copy_libtrtorch(multilinux=False):\n if not os.path.exists(dir_path + '/trtorch/lib'):\n os.makedirs(dir_path + '/trtorch/lib')\n\n print(\"copying library into module\")\n if multilinux:\n copyfile(dir_path + \"/build/libtrtorch_build/libtrtorch.so\", dir_path + '/trtorch/lib/libtrtorch.so')\n else:\n copyfile(dir_path + \"/../bazel-bin/cpp/api/lib/libtrtorch.so\", dir_path + '/trtorch/lib/libtrtorch.so')\n\nclass DevelopCommand(develop):\n description = \"Builds the package and symlinks it into the PYTHONPATH\"\n\n def initialize_options(self):\n develop.initialize_options(self)\n\n def finalize_options(self):\n develop.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=True, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n develop.run(self)\n\n\nclass InstallCommand(install):\n description = \"Builds the package\"\n\n def initialize_options(self):\n install.initialize_options(self)\n\n def finalize_options(self):\n install.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n install.run(self)\n\nclass BdistCommand(bdist_wheel):\n description = \"Builds the package\"\n\n def initialize_options(self):\n bdist_wheel.initialize_options(self)\n\n def finalize_options(self):\n bdist_wheel.finalize_options(self)\n\n def run(self):\n global CXX11_ABI\n build_libtrtorch_pre_cxx11_abi(develop=False, cxx11_abi=CXX11_ABI)\n gen_version_file()\n copy_libtrtorch()\n bdist_wheel.run(self)\n\nclass CleanCommand(Command):\n \"\"\"Custom clean command to tidy up the project root.\"\"\"\n PY_CLEAN_FILES = ['./build', './dist', './trtorch/__pycache__', './trtorch/lib', './*.pyc', './*.tgz', './*.egg-info']\n description = \"Command to tidy up the project root\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n for path_spec in self.PY_CLEAN_FILES:\n # Make paths absolute and relative to this path\n abs_paths = glob.glob(os.path.normpath(os.path.join(dir_path, path_spec)))\n for path in [str(p) for p in abs_paths]:\n if not path.startswith(dir_path):\n # Die if path in CLEAN_FILES is absolute + outside this directory\n raise ValueError(\"%s is not a path inside %s\" % (path, dir_path))\n print('Removing %s' % os.path.relpath(path))\n rmtree(path)\n\next_modules = [\n cpp_extension.CUDAExtension('trtorch._C',\n ['trtorch/csrc/trtorch_py.cpp'],\n library_dirs=[\n (dir_path + '/trtorch/lib/'),\n \"/opt/conda/lib/python3.6/config-3.6m-x86_64-linux-gnu\"\n ],\n libraries=[\n \"trtorch\"\n ],\n include_dirs=[\n dir_path + \"/../\",\n dir_path + \"/../bazel-TRTorch/external/tensorrt/include\",\n ],\n extra_compile_args=[\n \"-Wno-deprecated\",\n \"-Wno-deprecated-declarations\",\n ] + ([\"-D_GLIBCXX_USE_CXX11_ABI=1\"] if CXX11_ABI else [\"-D_GLIBCXX_USE_CXX11_ABI=0\"]),\n extra_link_args=[\n \"-Wno-deprecated\",\n \"-Wno-deprecated-declarations\",\n \"-Wl,--no-as-needed\",\n \"-ltrtorch\",\n \"-Wl,-rpath,$ORIGIN/lib\",\n \"-lpthread\",\n \"-ldl\",\n \"-lutil\",\n \"-lrt\",\n \"-lm\",\n \"-Xlinker\",\n \"-export-dynamic\"\n ] + ([\"-D_GLIBCXX_USE_CXX11_ABI=1\"] if CXX11_ABI else [\"-D_GLIBCXX_USE_CXX11_ABI=0\"]),\n undef_macros=[ \"NDEBUG\" ]\n )\n]\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetup(\n name='trtorch',\n version=__version__,\n author='NVIDIA',\n author_email='[email protected]',\n url='https://nvidia.github.io/TRTorch',\n description='A compiler backend for PyTorch JIT targeting NVIDIA GPUs',\n long_description_content_type='text/markdown',\n long_description=long_description,\n ext_modules=ext_modules,\n install_requires=[\n 'torch==1.6.0',\n ],\n setup_requires=[],\n cmdclass={\n 'install': InstallCommand,\n 'clean': CleanCommand,\n 'develop': DevelopCommand,\n 'build_ext': cpp_extension.BuildExtension,\n 'bdist_wheel': BdistCommand,\n },\n zip_safe=False,\n license=\"BSD\",\n packages=find_packages(),\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: GPU :: NVIDIA CUDA\",\n \"License :: OSI Approved :: BSD License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\"\n ],\n python_requires='>=3.6',\n include_package_data=True,\n package_data={\n 'trtorch': ['lib/*.so'],\n },\n exclude_package_data={\n '': ['*.cpp', '*.h'],\n 'trtorch': ['csrc/*.cpp'],\n }\n)\n", "path": "py/setup.py"}]} | 3,390 | 83 |
gh_patches_debug_50801 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-6841 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Container: Regional Cluster support for GKE clusters
I'm unable to get or create regional clusters using the container_v1 client APIs. The [documentation](https://googleapis.github.io/google-cloud-python/latest/container/gapic/v1/api.html#google.cloud.container_v1.ClusterManagerClient.create_cluster) suggests that this is possible by using the `parent` parameter to describe the project/region to launch the cluster but I get the following errors:
```bash
(env) david@ ~ $ which python
~/dev/env/bin/python
(env) david@ ~ $ pip freeze
...
google-api-core==1.6.0
google-auth==1.6.1
google-cloud==0.34.0
google-cloud-container==0.1.1
googleapis-common-protos==1.5.5
grpcio==1.16.1
...
(env) david@ ~ $ python --version
Python 2.7.10
(env) david@ ~ $ python ./get_cluster.py
Traceback (most recent call last):
File "./get_cluster.py", line 6, in <module>
cluster = client.get_cluster(project_id=credentials.project_id, parent='projects/<project_id>/locations/us-east1', cluster_id='ha-cluster-1')
TypeError: get_cluster() got an unexpected keyword argument 'parent'
```
Is it possible that the API documentation has been updated before the feature was merged or is it more likely an environment issue on my end? Any insight into this would be appreciated
I have also looked at using the [google-api-python-client](https://github.com/googleapis/google-api-python-client#google-api-client) to launch regional clusters but I would prefer to use this library if the feature is supported. Are there any known workarounds for this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `container/setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = 'google-cloud-container'
24 description = 'Google Container Engine API client library'
25 version = '0.1.1'
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = 'Development Status :: 3 - Alpha'
31 dependencies = [
32 'google-api-core[grpc] >= 1.6.0, < 2.0.0dev',
33 ]
34 extras = {
35 }
36
37
38 # Setup boilerplate below this line.
39
40 package_root = os.path.abspath(os.path.dirname(__file__))
41
42 readme_filename = os.path.join(package_root, 'README.rst')
43 with io.open(readme_filename, encoding='utf-8') as readme_file:
44 readme = readme_file.read()
45
46 # Only include packages under the 'google' namespace. Do not include tests,
47 # benchmarks, etc.
48 packages = [
49 package for package in setuptools.find_packages()
50 if package.startswith('google')]
51
52 # Determine which namespaces are needed.
53 namespaces = ['google']
54 if 'google.cloud' in packages:
55 namespaces.append('google.cloud')
56
57
58 setuptools.setup(
59 name=name,
60 version=version,
61 description=description,
62 long_description=readme,
63 author='Google LLC',
64 author_email='[email protected]',
65 license='Apache 2.0',
66 url='https://github.com/GoogleCloudPlatform/google-cloud-python',
67 classifiers=[
68 release_status,
69 'Intended Audience :: Developers',
70 'License :: OSI Approved :: Apache Software License',
71 'Programming Language :: Python',
72 'Programming Language :: Python :: 2',
73 'Programming Language :: Python :: 2.7',
74 'Programming Language :: Python :: 3',
75 'Programming Language :: Python :: 3.4',
76 'Programming Language :: Python :: 3.5',
77 'Programming Language :: Python :: 3.6',
78 'Operating System :: OS Independent',
79 'Topic :: Internet',
80 ],
81 platforms='Posix; MacOS X; Windows',
82 packages=packages,
83 namespace_packages=namespaces,
84 install_requires=dependencies,
85 extras_require=extras,
86 include_package_data=True,
87 zip_safe=False,
88 )
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/container/setup.py b/container/setup.py
--- a/container/setup.py
+++ b/container/setup.py
@@ -22,7 +22,7 @@
name = 'google-cloud-container'
description = 'Google Container Engine API client library'
-version = '0.1.1'
+version = '0.2.0'
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
| {"golden_diff": "diff --git a/container/setup.py b/container/setup.py\n--- a/container/setup.py\n+++ b/container/setup.py\n@@ -22,7 +22,7 @@\n \n name = 'google-cloud-container'\n description = 'Google Container Engine API client library'\n-version = '0.1.1'\n+version = '0.2.0'\n # Should be one of:\n # 'Development Status :: 3 - Alpha'\n # 'Development Status :: 4 - Beta'\n", "issue": "Container: Regional Cluster support for GKE clusters\n\r\nI'm unable to get or create regional clusters using the container_v1 client APIs. The [documentation](https://googleapis.github.io/google-cloud-python/latest/container/gapic/v1/api.html#google.cloud.container_v1.ClusterManagerClient.create_cluster) suggests that this is possible by using the `parent` parameter to describe the project/region to launch the cluster but I get the following errors:\r\n\r\n```bash\r\n(env) david@ ~ $ which python\r\n~/dev/env/bin/python \r\n\r\n(env) david@ ~ $ pip freeze\r\n...\r\ngoogle-api-core==1.6.0\r\ngoogle-auth==1.6.1\r\ngoogle-cloud==0.34.0\r\ngoogle-cloud-container==0.1.1\r\ngoogleapis-common-protos==1.5.5\r\ngrpcio==1.16.1\r\n...\r\n\r\n(env) david@ ~ $ python --version\r\nPython 2.7.10\r\n\r\n(env) david@ ~ $ python ./get_cluster.py\r\nTraceback (most recent call last):\r\n File \"./get_cluster.py\", line 6, in <module>\r\n cluster = client.get_cluster(project_id=credentials.project_id, parent='projects/<project_id>/locations/us-east1', cluster_id='ha-cluster-1')\r\nTypeError: get_cluster() got an unexpected keyword argument 'parent'\r\n```\r\n \r\nIs it possible that the API documentation has been updated before the feature was merged or is it more likely an environment issue on my end? Any insight into this would be appreciated\r\n\r\nI have also looked at using the [google-api-python-client](https://github.com/googleapis/google-api-python-client#google-api-client) to launch regional clusters but I would prefer to use this library if the feature is supported. Are there any known workarounds for this?\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-container'\ndescription = 'Google Container Engine API client library'\nversion = '0.1.1'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 3 - Alpha'\ndependencies = [\n 'google-api-core[grpc] >= 1.6.0, < 2.0.0dev',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "container/setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-container'\ndescription = 'Google Container Engine API client library'\nversion = '0.2.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 3 - Alpha'\ndependencies = [\n 'google-api-core[grpc] >= 1.6.0, < 2.0.0dev',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "container/setup.py"}]} | 1,424 | 99 |
gh_patches_debug_21296 | rasdani/github-patches | git_diff | open-mmlab__mmpose-258 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pylint: C0325
```bash
mmpose/core/evaluation/mesh_eval.py:27:0: C0325: Unnecessary parens after 'assert' keyword (superfluous-parens)
mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py:94:0: C0325: Unnecessary parens after 'assert' keyword (superfluous-parens)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py`
Content:
```
1 import os
2 from collections import OrderedDict
3
4 import json_tricks as json
5 import numpy as np
6
7 from mmpose.datasets.builder import DATASETS
8 from ....core.evaluation import compute_similarity_transform
9 from .mesh_base_dataset import MeshBaseDataset
10
11
12 @DATASETS.register_module()
13 class MeshH36MDataset(MeshBaseDataset):
14 """Human3.6M Dataset for 3D human mesh estimation. It inherits all function
15 from MeshBaseDataset and has its own evaluate fuction.
16
17 The dataset loads raw features and apply specified transforms
18 to return a dict containing the image tensors and other information.
19
20 Args:
21 ann_file (str): Path to the annotation file.
22 img_prefix (str): Path to a directory where images are held.
23 Default: None.
24 data_cfg (dict): config
25 pipeline (list[dict | callable]): A sequence of data transforms.
26 test_mode (bool): Store True when building test or
27 validation dataset. Default: False.
28 """
29
30 def evaluate(self, outputs, res_folder, metric='joint_error', logger=None):
31 """Evaluate 3D keypoint results."""
32 metrics = metric if isinstance(metric, list) else [metric]
33 allowed_metrics = ['joint_error']
34 for metric in metrics:
35 if metric not in allowed_metrics:
36 raise KeyError(f'metric {metric} is not supported')
37
38 res_file = os.path.join(res_folder, 'result_keypoints.json')
39 kpts = []
40 for preds, boxes, image_path in outputs:
41 kpts.append({
42 'keypoints': preds[0].tolist(),
43 'center': boxes[0][0:2].tolist(),
44 'scale': boxes[0][2:4].tolist(),
45 'area': float(boxes[0][4]),
46 'score': float(boxes[0][5]),
47 'image': image_path,
48 })
49
50 self._write_keypoint_results(kpts, res_file)
51 info_str = self._report_metric(res_file)
52 name_value = OrderedDict(info_str)
53 return name_value
54
55 def _write_keypoint_results(self, keypoints, res_file):
56 """Write results into a json file."""
57
58 with open(res_file, 'w') as f:
59 json.dump(keypoints, f, sort_keys=True, indent=4)
60
61 def _report_metric(self, res_file):
62 """Keypoint evaluation.
63
64 Report mean per joint position error (MPJPE) and mean per joint
65 position error after rigid alignment (MPJPE-PA)
66 """
67
68 with open(res_file, 'r') as fin:
69 preds = json.load(fin)
70 assert len(preds) == len(self.db)
71
72 joint_error = []
73 joint_error_pa = []
74
75 for pred, item in zip(preds, self.db):
76 error, error_pa = self.evaluate_kernel(pred['keypoints'][0],
77 item['joints_3d'],
78 item['joints_3d_visible'])
79 joint_error.append(error)
80 joint_error_pa.append(error_pa)
81
82 mpjpe = np.array(joint_error).mean()
83 mpjpe_pa = np.array(joint_error_pa).mean()
84
85 info_str = []
86 info_str.append(('MPJPE', mpjpe * 1000))
87 info_str.append(('MPJPE-PA', mpjpe_pa * 1000))
88 return info_str
89
90 def evaluate_kernel(self, pred_joints_3d, joints_3d, joints_3d_visible):
91 """Evaluate one example."""
92 # Only 14 lsp joints are used for evaluation
93 joint_mapper = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]
94 assert (joints_3d_visible[joint_mapper].min() > 0)
95
96 pred_joints_3d = np.array(pred_joints_3d)
97 pred_joints_3d = pred_joints_3d[joint_mapper, :]
98 pred_pelvis = (pred_joints_3d[[2]] + pred_joints_3d[[3]]) / 2
99 pred_joints_3d = pred_joints_3d - pred_pelvis
100
101 gt_joints_3d = joints_3d[joint_mapper, :]
102 gt_pelvis = (gt_joints_3d[[2]] + gt_joints_3d[[3]]) / 2
103 gt_joints_3d = gt_joints_3d - gt_pelvis
104
105 error = pred_joints_3d - gt_joints_3d
106 error = np.linalg.norm(error, ord=2, axis=-1).mean(axis=-1)
107
108 pred_joints_3d_aligned = compute_similarity_transform(
109 pred_joints_3d, gt_joints_3d)
110 error_pa = pred_joints_3d_aligned - gt_joints_3d
111 error_pa = np.linalg.norm(error_pa, ord=2, axis=-1).mean(axis=-1)
112
113 return error, error_pa
114
```
Path: `mmpose/core/evaluation/mesh_eval.py`
Content:
```
1 # ------------------------------------------------------------------------------
2 # Adapted from https://github.com/akanazawa/hmr
3 # Original licence: Copyright (c) 2018 akanazawa, under the MIT License.
4 # ------------------------------------------------------------------------------
5
6 import numpy as np
7
8
9 def compute_similarity_transform(source_points, target_points):
10 """Computes a similarity transform (sR, t) that takes a set of 3D points
11 source_points (N x 3) closest to a set of 3D points target_points, where R
12 is an 3x3 rotation matrix, t 3x1 translation, s scale. And return the
13 transformed 3D points source_points_hat (N x 3). i.e. solves the orthogonal
14 Procrutes problem.
15
16 Notes:
17 Points number: N
18
19 Args:
20 source_points (np.ndarray([N, 3])): Source point set.
21 target_points (np.ndarray([N, 3])): Target point set.
22
23 Returns:
24 source_points_hat (np.ndarray([N, 3])): Transformed source point set.
25 """
26
27 assert (target_points.shape[0] == source_points.shape[0])
28 assert (target_points.shape[1] == 3 and source_points.shape[1] == 3)
29
30 source_points = source_points.T
31 target_points = target_points.T
32
33 # 1. Remove mean.
34 mu1 = source_points.mean(axis=1, keepdims=True)
35 mu2 = target_points.mean(axis=1, keepdims=True)
36 X1 = source_points - mu1
37 X2 = target_points - mu2
38
39 # 2. Compute variance of X1 used for scale.
40 var1 = np.sum(X1**2)
41
42 # 3. The outer product of X1 and X2.
43 K = X1.dot(X2.T)
44
45 # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are
46 # singular vectors of K.
47 U, _, Vh = np.linalg.svd(K)
48 V = Vh.T
49 # Construct Z that fixes the orientation of R to get det(R)=1.
50 Z = np.eye(U.shape[0])
51 Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))
52 # Construct R.
53 R = V.dot(Z.dot(U.T))
54
55 # 5. Recover scale.
56 scale = np.trace(R.dot(K)) / var1
57
58 # 6. Recover translation.
59 t = mu2 - scale * (R.dot(mu1))
60
61 # 7. Transform the source points:
62 source_points_hat = scale * R.dot(source_points) + t
63
64 source_points_hat = source_points_hat.T
65
66 return source_points_hat
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmpose/core/evaluation/mesh_eval.py b/mmpose/core/evaluation/mesh_eval.py
--- a/mmpose/core/evaluation/mesh_eval.py
+++ b/mmpose/core/evaluation/mesh_eval.py
@@ -24,8 +24,8 @@
source_points_hat (np.ndarray([N, 3])): Transformed source point set.
"""
- assert (target_points.shape[0] == source_points.shape[0])
- assert (target_points.shape[1] == 3 and source_points.shape[1] == 3)
+ assert target_points.shape[0] == source_points.shape[0]
+ assert target_points.shape[1] == 3 and source_points.shape[1] == 3
source_points = source_points.T
target_points = target_points.T
diff --git a/mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py b/mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py
--- a/mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py
+++ b/mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py
@@ -91,7 +91,7 @@
"""Evaluate one example."""
# Only 14 lsp joints are used for evaluation
joint_mapper = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]
- assert (joints_3d_visible[joint_mapper].min() > 0)
+ assert joints_3d_visible[joint_mapper].min() > 0
pred_joints_3d = np.array(pred_joints_3d)
pred_joints_3d = pred_joints_3d[joint_mapper, :]
| {"golden_diff": "diff --git a/mmpose/core/evaluation/mesh_eval.py b/mmpose/core/evaluation/mesh_eval.py\n--- a/mmpose/core/evaluation/mesh_eval.py\n+++ b/mmpose/core/evaluation/mesh_eval.py\n@@ -24,8 +24,8 @@\n source_points_hat (np.ndarray([N, 3])): Transformed source point set.\n \"\"\"\n \n- assert (target_points.shape[0] == source_points.shape[0])\n- assert (target_points.shape[1] == 3 and source_points.shape[1] == 3)\n+ assert target_points.shape[0] == source_points.shape[0]\n+ assert target_points.shape[1] == 3 and source_points.shape[1] == 3\n \n source_points = source_points.T\n target_points = target_points.T\ndiff --git a/mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py b/mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py\n--- a/mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py\n+++ b/mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py\n@@ -91,7 +91,7 @@\n \"\"\"Evaluate one example.\"\"\"\n # Only 14 lsp joints are used for evaluation\n joint_mapper = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]\n- assert (joints_3d_visible[joint_mapper].min() > 0)\n+ assert joints_3d_visible[joint_mapper].min() > 0\n \n pred_joints_3d = np.array(pred_joints_3d)\n pred_joints_3d = pred_joints_3d[joint_mapper, :]\n", "issue": "Pylint: C0325\n```bash\r\nmmpose/core/evaluation/mesh_eval.py:27:0: C0325: Unnecessary parens after 'assert' keyword (superfluous-parens)\r\nmmpose/datasets/datasets/mesh/mesh_h36m_dataset.py:94:0: C0325: Unnecessary parens after 'assert' keyword (superfluous-parens)\r\n```\n", "before_files": [{"content": "import os\nfrom collections import OrderedDict\n\nimport json_tricks as json\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom ....core.evaluation import compute_similarity_transform\nfrom .mesh_base_dataset import MeshBaseDataset\n\n\[email protected]_module()\nclass MeshH36MDataset(MeshBaseDataset):\n \"\"\"Human3.6M Dataset for 3D human mesh estimation. It inherits all function\n from MeshBaseDataset and has its own evaluate fuction.\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def evaluate(self, outputs, res_folder, metric='joint_error', logger=None):\n \"\"\"Evaluate 3D keypoint results.\"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['joint_error']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n kpts = []\n for preds, boxes, image_path in outputs:\n kpts.append({\n 'keypoints': preds[0].tolist(),\n 'center': boxes[0][0:2].tolist(),\n 'scale': boxes[0][2:4].tolist(),\n 'area': float(boxes[0][4]),\n 'score': float(boxes[0][5]),\n 'image': image_path,\n })\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file)\n name_value = OrderedDict(info_str)\n return name_value\n\n def _write_keypoint_results(self, keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n def _report_metric(self, res_file):\n \"\"\"Keypoint evaluation.\n\n Report mean per joint position error (MPJPE) and mean per joint\n position error after rigid alignment (MPJPE-PA)\n \"\"\"\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n assert len(preds) == len(self.db)\n\n joint_error = []\n joint_error_pa = []\n\n for pred, item in zip(preds, self.db):\n error, error_pa = self.evaluate_kernel(pred['keypoints'][0],\n item['joints_3d'],\n item['joints_3d_visible'])\n joint_error.append(error)\n joint_error_pa.append(error_pa)\n\n mpjpe = np.array(joint_error).mean()\n mpjpe_pa = np.array(joint_error_pa).mean()\n\n info_str = []\n info_str.append(('MPJPE', mpjpe * 1000))\n info_str.append(('MPJPE-PA', mpjpe_pa * 1000))\n return info_str\n\n def evaluate_kernel(self, pred_joints_3d, joints_3d, joints_3d_visible):\n \"\"\"Evaluate one example.\"\"\"\n # Only 14 lsp joints are used for evaluation\n joint_mapper = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]\n assert (joints_3d_visible[joint_mapper].min() > 0)\n\n pred_joints_3d = np.array(pred_joints_3d)\n pred_joints_3d = pred_joints_3d[joint_mapper, :]\n pred_pelvis = (pred_joints_3d[[2]] + pred_joints_3d[[3]]) / 2\n pred_joints_3d = pred_joints_3d - pred_pelvis\n\n gt_joints_3d = joints_3d[joint_mapper, :]\n gt_pelvis = (gt_joints_3d[[2]] + gt_joints_3d[[3]]) / 2\n gt_joints_3d = gt_joints_3d - gt_pelvis\n\n error = pred_joints_3d - gt_joints_3d\n error = np.linalg.norm(error, ord=2, axis=-1).mean(axis=-1)\n\n pred_joints_3d_aligned = compute_similarity_transform(\n pred_joints_3d, gt_joints_3d)\n error_pa = pred_joints_3d_aligned - gt_joints_3d\n error_pa = np.linalg.norm(error_pa, ord=2, axis=-1).mean(axis=-1)\n\n return error, error_pa\n", "path": "mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py"}, {"content": "# ------------------------------------------------------------------------------\n# Adapted from https://github.com/akanazawa/hmr\n# Original licence: Copyright (c) 2018 akanazawa, under the MIT License.\n# ------------------------------------------------------------------------------\n\nimport numpy as np\n\n\ndef compute_similarity_transform(source_points, target_points):\n \"\"\"Computes a similarity transform (sR, t) that takes a set of 3D points\n source_points (N x 3) closest to a set of 3D points target_points, where R\n is an 3x3 rotation matrix, t 3x1 translation, s scale. And return the\n transformed 3D points source_points_hat (N x 3). i.e. solves the orthogonal\n Procrutes problem.\n\n Notes:\n Points number: N\n\n Args:\n source_points (np.ndarray([N, 3])): Source point set.\n target_points (np.ndarray([N, 3])): Target point set.\n\n Returns:\n source_points_hat (np.ndarray([N, 3])): Transformed source point set.\n \"\"\"\n\n assert (target_points.shape[0] == source_points.shape[0])\n assert (target_points.shape[1] == 3 and source_points.shape[1] == 3)\n\n source_points = source_points.T\n target_points = target_points.T\n\n # 1. Remove mean.\n mu1 = source_points.mean(axis=1, keepdims=True)\n mu2 = target_points.mean(axis=1, keepdims=True)\n X1 = source_points - mu1\n X2 = target_points - mu2\n\n # 2. Compute variance of X1 used for scale.\n var1 = np.sum(X1**2)\n\n # 3. The outer product of X1 and X2.\n K = X1.dot(X2.T)\n\n # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are\n # singular vectors of K.\n U, _, Vh = np.linalg.svd(K)\n V = Vh.T\n # Construct Z that fixes the orientation of R to get det(R)=1.\n Z = np.eye(U.shape[0])\n Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))\n # Construct R.\n R = V.dot(Z.dot(U.T))\n\n # 5. Recover scale.\n scale = np.trace(R.dot(K)) / var1\n\n # 6. Recover translation.\n t = mu2 - scale * (R.dot(mu1))\n\n # 7. Transform the source points:\n source_points_hat = scale * R.dot(source_points) + t\n\n source_points_hat = source_points_hat.T\n\n return source_points_hat\n", "path": "mmpose/core/evaluation/mesh_eval.py"}], "after_files": [{"content": "import os\nfrom collections import OrderedDict\n\nimport json_tricks as json\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom ....core.evaluation import compute_similarity_transform\nfrom .mesh_base_dataset import MeshBaseDataset\n\n\[email protected]_module()\nclass MeshH36MDataset(MeshBaseDataset):\n \"\"\"Human3.6M Dataset for 3D human mesh estimation. It inherits all function\n from MeshBaseDataset and has its own evaluate fuction.\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def evaluate(self, outputs, res_folder, metric='joint_error', logger=None):\n \"\"\"Evaluate 3D keypoint results.\"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['joint_error']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n kpts = []\n for preds, boxes, image_path in outputs:\n kpts.append({\n 'keypoints': preds[0].tolist(),\n 'center': boxes[0][0:2].tolist(),\n 'scale': boxes[0][2:4].tolist(),\n 'area': float(boxes[0][4]),\n 'score': float(boxes[0][5]),\n 'image': image_path,\n })\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file)\n name_value = OrderedDict(info_str)\n return name_value\n\n def _write_keypoint_results(self, keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n def _report_metric(self, res_file):\n \"\"\"Keypoint evaluation.\n\n Report mean per joint position error (MPJPE) and mean per joint\n position error after rigid alignment (MPJPE-PA)\n \"\"\"\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n assert len(preds) == len(self.db)\n\n joint_error = []\n joint_error_pa = []\n\n for pred, item in zip(preds, self.db):\n error, error_pa = self.evaluate_kernel(pred['keypoints'][0],\n item['joints_3d'],\n item['joints_3d_visible'])\n joint_error.append(error)\n joint_error_pa.append(error_pa)\n\n mpjpe = np.array(joint_error).mean()\n mpjpe_pa = np.array(joint_error_pa).mean()\n\n info_str = []\n info_str.append(('MPJPE', mpjpe * 1000))\n info_str.append(('MPJPE-PA', mpjpe_pa * 1000))\n return info_str\n\n def evaluate_kernel(self, pred_joints_3d, joints_3d, joints_3d_visible):\n \"\"\"Evaluate one example.\"\"\"\n # Only 14 lsp joints are used for evaluation\n joint_mapper = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]\n assert joints_3d_visible[joint_mapper].min() > 0\n\n pred_joints_3d = np.array(pred_joints_3d)\n pred_joints_3d = pred_joints_3d[joint_mapper, :]\n pred_pelvis = (pred_joints_3d[[2]] + pred_joints_3d[[3]]) / 2\n pred_joints_3d = pred_joints_3d - pred_pelvis\n\n gt_joints_3d = joints_3d[joint_mapper, :]\n gt_pelvis = (gt_joints_3d[[2]] + gt_joints_3d[[3]]) / 2\n gt_joints_3d = gt_joints_3d - gt_pelvis\n\n error = pred_joints_3d - gt_joints_3d\n error = np.linalg.norm(error, ord=2, axis=-1).mean(axis=-1)\n\n pred_joints_3d_aligned = compute_similarity_transform(\n pred_joints_3d, gt_joints_3d)\n error_pa = pred_joints_3d_aligned - gt_joints_3d\n error_pa = np.linalg.norm(error_pa, ord=2, axis=-1).mean(axis=-1)\n\n return error, error_pa\n", "path": "mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py"}, {"content": "# ------------------------------------------------------------------------------\n# Adapted from https://github.com/akanazawa/hmr\n# Original licence: Copyright (c) 2018 akanazawa, under the MIT License.\n# ------------------------------------------------------------------------------\n\nimport numpy as np\n\n\ndef compute_similarity_transform(source_points, target_points):\n \"\"\"Computes a similarity transform (sR, t) that takes a set of 3D points\n source_points (N x 3) closest to a set of 3D points target_points, where R\n is an 3x3 rotation matrix, t 3x1 translation, s scale. And return the\n transformed 3D points source_points_hat (N x 3). i.e. solves the orthogonal\n Procrutes problem.\n\n Notes:\n Points number: N\n\n Args:\n source_points (np.ndarray([N, 3])): Source point set.\n target_points (np.ndarray([N, 3])): Target point set.\n\n Returns:\n source_points_hat (np.ndarray([N, 3])): Transformed source point set.\n \"\"\"\n\n assert target_points.shape[0] == source_points.shape[0]\n assert target_points.shape[1] == 3 and source_points.shape[1] == 3\n\n source_points = source_points.T\n target_points = target_points.T\n\n # 1. Remove mean.\n mu1 = source_points.mean(axis=1, keepdims=True)\n mu2 = target_points.mean(axis=1, keepdims=True)\n X1 = source_points - mu1\n X2 = target_points - mu2\n\n # 2. Compute variance of X1 used for scale.\n var1 = np.sum(X1**2)\n\n # 3. The outer product of X1 and X2.\n K = X1.dot(X2.T)\n\n # 4. Solution that Maximizes trace(R'K) is R=U*V', where U, V are\n # singular vectors of K.\n U, _, Vh = np.linalg.svd(K)\n V = Vh.T\n # Construct Z that fixes the orientation of R to get det(R)=1.\n Z = np.eye(U.shape[0])\n Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))\n # Construct R.\n R = V.dot(Z.dot(U.T))\n\n # 5. Recover scale.\n scale = np.trace(R.dot(K)) / var1\n\n # 6. Recover translation.\n t = mu2 - scale * (R.dot(mu1))\n\n # 7. Transform the source points:\n source_points_hat = scale * R.dot(source_points) + t\n\n source_points_hat = source_points_hat.T\n\n return source_points_hat\n", "path": "mmpose/core/evaluation/mesh_eval.py"}]} | 2,472 | 418 |
gh_patches_debug_1513 | rasdani/github-patches | git_diff | searx__searx-1093 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] Python 3.6 Autocomplete not work
Use searx with archlinux and python 3.6 [https://aur.archlinux.org/packages/searx-py3](https://aur.archlinux.org/packages/searx-py3)
Autocomplete not working, log :
```
rv = self.dispatch_request()
File "/usr/lib/python3.6/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python3.6/site-packages/searx/webapp.py", line 609, in autocompleter
raw_text_query.parse_query()
File "/usr/lib/python3.6/site-packages/searx/query.py", line 55, in parse_query
raw_query_parts = re.split(r'(\s+)', self.query)
File "/usr/lib/python3.6/re.py", line 212, in split
return _compile(pattern, flags).split(string, maxsplit)
TypeError: cannot use a string pattern on a bytes-like object
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/query.py`
Content:
```
1 #!/usr/bin/env python
2
3 '''
4 searx is free software: you can redistribute it and/or modify
5 it under the terms of the GNU Affero General Public License as published by
6 the Free Software Foundation, either version 3 of the License, or
7 (at your option) any later version.
8
9 searx is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU Affero General Public License for more details.
13
14 You should have received a copy of the GNU Affero General Public License
15 along with searx. If not, see < http://www.gnu.org/licenses/ >.
16
17 (C) 2014 by Thomas Pointhuber, <[email protected]>
18 '''
19
20 from searx.languages import language_codes
21 from searx.engines import (
22 categories, engines, engine_shortcuts
23 )
24 import re
25 import sys
26
27 if sys.version_info[0] == 3:
28 unicode = str
29
30 VALID_LANGUAGE_CODE = re.compile(r'^[a-z]{2,3}(-[a-zA-Z]{2})?$')
31
32
33 class RawTextQuery(object):
34 """parse raw text query (the value from the html input)"""
35
36 def __init__(self, query, disabled_engines):
37 self.query = query
38 self.disabled_engines = []
39
40 if disabled_engines:
41 self.disabled_engines = disabled_engines
42
43 self.query_parts = []
44 self.engines = []
45 self.languages = []
46 self.specific = False
47
48 # parse query, if tags are set, which
49 # change the serch engine or search-language
50 def parse_query(self):
51 self.query_parts = []
52
53 # split query, including whitespaces
54 raw_query_parts = re.split(r'(\s+)', self.query)
55
56 parse_next = True
57
58 for query_part in raw_query_parts:
59 if not parse_next:
60 self.query_parts[-1] += query_part
61 continue
62
63 parse_next = False
64
65 # part does only contain spaces, skip
66 if query_part.isspace()\
67 or query_part == '':
68 parse_next = True
69 self.query_parts.append(query_part)
70 continue
71
72 # this force a language
73 if query_part[0] == ':':
74 lang = query_part[1:].lower().replace('_', '-')
75
76 # user may set a valid, yet not selectable language
77 if VALID_LANGUAGE_CODE.match(lang):
78 self.languages.append(lang)
79 parse_next = True
80
81 # check if any language-code is equal with
82 # declared language-codes
83 for lc in language_codes:
84 lang_id, lang_name, country, english_name = map(unicode.lower, lc)
85
86 # if correct language-code is found
87 # set it as new search-language
88 if lang == lang_id\
89 or lang_id.startswith(lang)\
90 or lang == lang_name\
91 or lang == english_name\
92 or lang.replace('-', ' ') == country:
93 parse_next = True
94 self.languages.append(lang_id)
95 # to ensure best match (first match is not necessarily the best one)
96 if lang == lang_id:
97 break
98
99 # this force a engine or category
100 if query_part[0] == '!' or query_part[0] == '?':
101 prefix = query_part[1:].replace('-', ' ').replace('_', ' ')
102
103 # check if prefix is equal with engine shortcut
104 if prefix in engine_shortcuts:
105 parse_next = True
106 self.engines.append({'category': 'none',
107 'name': engine_shortcuts[prefix]})
108
109 # check if prefix is equal with engine name
110 elif prefix in engines:
111 parse_next = True
112 self.engines.append({'category': 'none',
113 'name': prefix})
114
115 # check if prefix is equal with categorie name
116 elif prefix in categories:
117 # using all engines for that search, which
118 # are declared under that categorie name
119 parse_next = True
120 self.engines.extend({'category': prefix,
121 'name': engine.name}
122 for engine in categories[prefix]
123 if (engine.name, prefix) not in self.disabled_engines)
124
125 if query_part[0] == '!':
126 self.specific = True
127
128 # append query part to query_part list
129 self.query_parts.append(query_part)
130
131 def changeSearchQuery(self, search_query):
132 if len(self.query_parts):
133 self.query_parts[-1] = search_query
134 else:
135 self.query_parts.append(search_query)
136
137 def getSearchQuery(self):
138 if len(self.query_parts):
139 return self.query_parts[-1]
140 else:
141 return ''
142
143 def getFullQuery(self):
144 # get full querry including whitespaces
145 return u''.join(self.query_parts)
146
147
148 class SearchQuery(object):
149 """container for all the search parameters (query, language, etc...)"""
150
151 def __init__(self, query, engines, categories, lang, safesearch, pageno, time_range):
152 self.query = query.encode('utf-8')
153 self.engines = engines
154 self.categories = categories
155 self.lang = lang
156 self.safesearch = safesearch
157 self.pageno = pageno
158 self.time_range = time_range
159
160 def __str__(self):
161 return str(self.query) + ";" + str(self.engines)
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/query.py b/searx/query.py
--- a/searx/query.py
+++ b/searx/query.py
@@ -51,7 +51,7 @@
self.query_parts = []
# split query, including whitespaces
- raw_query_parts = re.split(r'(\s+)', self.query)
+ raw_query_parts = re.split(r'(\s+)' if isinstance(self.query, str) else b'(\s+)', self.query)
parse_next = True
| {"golden_diff": "diff --git a/searx/query.py b/searx/query.py\n--- a/searx/query.py\n+++ b/searx/query.py\n@@ -51,7 +51,7 @@\n self.query_parts = []\n \n # split query, including whitespaces\n- raw_query_parts = re.split(r'(\\s+)', self.query)\n+ raw_query_parts = re.split(r'(\\s+)' if isinstance(self.query, str) else b'(\\s+)', self.query)\n \n parse_next = True\n", "issue": "[bug] Python 3.6 Autocomplete not work\nUse searx with archlinux and python 3.6 [https://aur.archlinux.org/packages/searx-py3](https://aur.archlinux.org/packages/searx-py3)\r\nAutocomplete not working, log :\r\n```\r\n rv = self.dispatch_request()\r\n File \"/usr/lib/python3.6/site-packages/flask/app.py\", line 1598, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/usr/lib/python3.6/site-packages/searx/webapp.py\", line 609, in autocompleter\r\n raw_text_query.parse_query()\r\n File \"/usr/lib/python3.6/site-packages/searx/query.py\", line 55, in parse_query\r\n raw_query_parts = re.split(r'(\\s+)', self.query)\r\n File \"/usr/lib/python3.6/re.py\", line 212, in split\r\n return _compile(pattern, flags).split(string, maxsplit)\r\n TypeError: cannot use a string pattern on a bytes-like object\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2014 by Thomas Pointhuber, <[email protected]>\n'''\n\nfrom searx.languages import language_codes\nfrom searx.engines import (\n categories, engines, engine_shortcuts\n)\nimport re\nimport sys\n\nif sys.version_info[0] == 3:\n unicode = str\n\nVALID_LANGUAGE_CODE = re.compile(r'^[a-z]{2,3}(-[a-zA-Z]{2})?$')\n\n\nclass RawTextQuery(object):\n \"\"\"parse raw text query (the value from the html input)\"\"\"\n\n def __init__(self, query, disabled_engines):\n self.query = query\n self.disabled_engines = []\n\n if disabled_engines:\n self.disabled_engines = disabled_engines\n\n self.query_parts = []\n self.engines = []\n self.languages = []\n self.specific = False\n\n # parse query, if tags are set, which\n # change the serch engine or search-language\n def parse_query(self):\n self.query_parts = []\n\n # split query, including whitespaces\n raw_query_parts = re.split(r'(\\s+)', self.query)\n\n parse_next = True\n\n for query_part in raw_query_parts:\n if not parse_next:\n self.query_parts[-1] += query_part\n continue\n\n parse_next = False\n\n # part does only contain spaces, skip\n if query_part.isspace()\\\n or query_part == '':\n parse_next = True\n self.query_parts.append(query_part)\n continue\n\n # this force a language\n if query_part[0] == ':':\n lang = query_part[1:].lower().replace('_', '-')\n\n # user may set a valid, yet not selectable language\n if VALID_LANGUAGE_CODE.match(lang):\n self.languages.append(lang)\n parse_next = True\n\n # check if any language-code is equal with\n # declared language-codes\n for lc in language_codes:\n lang_id, lang_name, country, english_name = map(unicode.lower, lc)\n\n # if correct language-code is found\n # set it as new search-language\n if lang == lang_id\\\n or lang_id.startswith(lang)\\\n or lang == lang_name\\\n or lang == english_name\\\n or lang.replace('-', ' ') == country:\n parse_next = True\n self.languages.append(lang_id)\n # to ensure best match (first match is not necessarily the best one)\n if lang == lang_id:\n break\n\n # this force a engine or category\n if query_part[0] == '!' or query_part[0] == '?':\n prefix = query_part[1:].replace('-', ' ').replace('_', ' ')\n\n # check if prefix is equal with engine shortcut\n if prefix in engine_shortcuts:\n parse_next = True\n self.engines.append({'category': 'none',\n 'name': engine_shortcuts[prefix]})\n\n # check if prefix is equal with engine name\n elif prefix in engines:\n parse_next = True\n self.engines.append({'category': 'none',\n 'name': prefix})\n\n # check if prefix is equal with categorie name\n elif prefix in categories:\n # using all engines for that search, which\n # are declared under that categorie name\n parse_next = True\n self.engines.extend({'category': prefix,\n 'name': engine.name}\n for engine in categories[prefix]\n if (engine.name, prefix) not in self.disabled_engines)\n\n if query_part[0] == '!':\n self.specific = True\n\n # append query part to query_part list\n self.query_parts.append(query_part)\n\n def changeSearchQuery(self, search_query):\n if len(self.query_parts):\n self.query_parts[-1] = search_query\n else:\n self.query_parts.append(search_query)\n\n def getSearchQuery(self):\n if len(self.query_parts):\n return self.query_parts[-1]\n else:\n return ''\n\n def getFullQuery(self):\n # get full querry including whitespaces\n return u''.join(self.query_parts)\n\n\nclass SearchQuery(object):\n \"\"\"container for all the search parameters (query, language, etc...)\"\"\"\n\n def __init__(self, query, engines, categories, lang, safesearch, pageno, time_range):\n self.query = query.encode('utf-8')\n self.engines = engines\n self.categories = categories\n self.lang = lang\n self.safesearch = safesearch\n self.pageno = pageno\n self.time_range = time_range\n\n def __str__(self):\n return str(self.query) + \";\" + str(self.engines)\n", "path": "searx/query.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2014 by Thomas Pointhuber, <[email protected]>\n'''\n\nfrom searx.languages import language_codes\nfrom searx.engines import (\n categories, engines, engine_shortcuts\n)\nimport re\nimport sys\n\nif sys.version_info[0] == 3:\n unicode = str\n\nVALID_LANGUAGE_CODE = re.compile(r'^[a-z]{2,3}(-[a-zA-Z]{2})?$')\n\n\nclass RawTextQuery(object):\n \"\"\"parse raw text query (the value from the html input)\"\"\"\n\n def __init__(self, query, disabled_engines):\n self.query = query\n self.disabled_engines = []\n\n if disabled_engines:\n self.disabled_engines = disabled_engines\n\n self.query_parts = []\n self.engines = []\n self.languages = []\n self.specific = False\n\n # parse query, if tags are set, which\n # change the serch engine or search-language\n def parse_query(self):\n self.query_parts = []\n\n # split query, including whitespaces\n raw_query_parts = re.split(r'(\\s+)' if isinstance(self.query, str) else b'(\\s+)', self.query)\n\n parse_next = True\n\n for query_part in raw_query_parts:\n if not parse_next:\n self.query_parts[-1] += query_part\n continue\n\n parse_next = False\n\n # part does only contain spaces, skip\n if query_part.isspace()\\\n or query_part == '':\n parse_next = True\n self.query_parts.append(query_part)\n continue\n\n # this force a language\n if query_part[0] == ':':\n lang = query_part[1:].lower().replace('_', '-')\n\n # user may set a valid, yet not selectable language\n if VALID_LANGUAGE_CODE.match(lang):\n self.languages.append(lang)\n parse_next = True\n\n # check if any language-code is equal with\n # declared language-codes\n for lc in language_codes:\n lang_id, lang_name, country, english_name = map(unicode.lower, lc)\n\n # if correct language-code is found\n # set it as new search-language\n if lang == lang_id\\\n or lang_id.startswith(lang)\\\n or lang == lang_name\\\n or lang == english_name\\\n or lang.replace('-', ' ') == country:\n parse_next = True\n self.languages.append(lang_id)\n # to ensure best match (first match is not necessarily the best one)\n if lang == lang_id:\n break\n\n # this force a engine or category\n if query_part[0] == '!' or query_part[0] == '?':\n prefix = query_part[1:].replace('-', ' ').replace('_', ' ')\n\n # check if prefix is equal with engine shortcut\n if prefix in engine_shortcuts:\n parse_next = True\n self.engines.append({'category': 'none',\n 'name': engine_shortcuts[prefix]})\n\n # check if prefix is equal with engine name\n elif prefix in engines:\n parse_next = True\n self.engines.append({'category': 'none',\n 'name': prefix})\n\n # check if prefix is equal with categorie name\n elif prefix in categories:\n # using all engines for that search, which\n # are declared under that categorie name\n parse_next = True\n self.engines.extend({'category': prefix,\n 'name': engine.name}\n for engine in categories[prefix]\n if (engine.name, prefix) not in self.disabled_engines)\n\n if query_part[0] == '!':\n self.specific = True\n\n # append query part to query_part list\n self.query_parts.append(query_part)\n\n def changeSearchQuery(self, search_query):\n if len(self.query_parts):\n self.query_parts[-1] = search_query\n else:\n self.query_parts.append(search_query)\n\n def getSearchQuery(self):\n if len(self.query_parts):\n return self.query_parts[-1]\n else:\n return ''\n\n def getFullQuery(self):\n # get full querry including whitespaces\n return u''.join(self.query_parts)\n\n\nclass SearchQuery(object):\n \"\"\"container for all the search parameters (query, language, etc...)\"\"\"\n\n def __init__(self, query, engines, categories, lang, safesearch, pageno, time_range):\n self.query = query.encode('utf-8')\n self.engines = engines\n self.categories = categories\n self.lang = lang\n self.safesearch = safesearch\n self.pageno = pageno\n self.time_range = time_range\n\n def __str__(self):\n return str(self.query) + \";\" + str(self.engines)\n", "path": "searx/query.py"}]} | 2,057 | 115 |
gh_patches_debug_60828 | rasdani/github-patches | git_diff | microsoft__AzureTRE-524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Service bus message times out on deployment of workspace template
**Describe the bug**
When deploying a template that takes > 10 minutes, although deployment is successful the status is not updated.
**Steps to reproduce**
1. Register and deploy the `azureml_devtestlabs` workspace
2. Log on to the VMSS resource processor using bastion
3. View the docker logs, wait until deployment is complete, and see similar to:
`LinkDetach("ErrorCodes.LinkDetachForced: The link 'G3:5725658:sender-link-bd7b69d4-9ad4-4b9b-b9f6-2e311be400a3' is force detached. Code: publisher(link3135). Details: AmqpMessagePublisher.IdleTimerExpired: Idle timeout: 00:10:00.")`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `processor_function/vm_porter/runner.py`
Content:
```
1 import os
2 import sys
3 import json
4 import socket
5 import asyncio
6 import logging
7 from shared.logging import disable_unwanted_loggers, initialize_logging # pylint: disable=import-error # noqa
8 from resources import strings # pylint: disable=import-error # noqa
9 from contextlib import asynccontextmanager
10 from azure.servicebus import ServiceBusMessage
11 from azure.servicebus.aio import ServiceBusClient, AutoLockRenewer
12 from azure.identity.aio import DefaultAzureCredential
13
14 logger_adapter = initialize_logging(logging.INFO, socket.gethostname())
15 disable_unwanted_loggers()
16
17
18 @asynccontextmanager
19 async def default_credentials(msi_id):
20 """
21 Context manager which yields the default credentials.
22 """
23 credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()
24 yield credential
25 await credential.close()
26
27
28 async def receive_message(env_vars, service_bus_client):
29 """
30 This method is an async generator which receives messages from service bus
31 and yields those messages. If the yielded function return True the message is
32 marked complete.
33 """
34 async with service_bus_client:
35 q_name = env_vars["resource_request_queue"]
36 renewer = AutoLockRenewer()
37 receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)
38
39 async with receiver:
40 received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)
41
42 for msg in received_msgs:
43 result = True
44 message = ""
45
46 try:
47 message = json.loads(str(msg))
48 result = (yield message)
49 except (json.JSONDecodeError) as e:
50 logging.error(f"Received bad service bus resource request message: {e}")
51 if result:
52 logging.info(f"Resource request for {message} is complete")
53 else:
54 logging.error('Message processing failed!')
55 logger_adapter.info(f"Message with id = {message['id']} processed as {result} and marked complete.")
56 await receiver.complete_message(msg)
57
58
59 def azure_login_command(env_vars):
60 local_login = f"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}"
61 vmss_login = f"az login --identity -u {env_vars['vmss_msi_id']}"
62 command = vmss_login if env_vars['vmss_msi_id'] else local_login
63 return command
64
65
66 def build_porter_command(msg_body, env_vars):
67 porter_parameters = ""
68 for parameter in msg_body['parameters']:
69 porter_parameters = porter_parameters + f" --param {parameter}={msg_body['parameters'][parameter]}"
70
71 installation_id = msg_body['parameters']['tre_id'] + "-" + msg_body['parameters']['workspace_id']
72
73 porter_parameters = porter_parameters + f" --param tfstate_container_name={env_vars['tfstate_container_name']}"
74 porter_parameters = porter_parameters + f" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}"
75 porter_parameters = porter_parameters + f" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}"
76 porter_parameters = porter_parameters + f" --param arm_use_msi={env_vars['arm_use_msi']}"
77
78 command_line = [f"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter "
79 f"{msg_body['action']} {installation_id} "
80 f" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}"
81 f" {porter_parameters} --cred ./vm_porter/azure.json --allow-docker-host-access"
82 f" && porter show {installation_id}"]
83 return command_line
84
85
86 def porter_envs(env_var):
87 porter_env_vars = {}
88 porter_env_vars["HOME"] = os.environ['HOME']
89 porter_env_vars["PATH"] = os.environ['PATH']
90 porter_env_vars["ARM_CLIENT_ID"] = env_var["arm_client_id"]
91 porter_env_vars["ARM_CLIENT_SECRET"] = env_var["arm_client_secret"]
92 porter_env_vars["ARM_SUBSCRIPTION_ID"] = env_var["arm_subscription_id"]
93 porter_env_vars["ARM_TENANT_ID"] = env_var["arm_tenant_id"]
94
95 return porter_env_vars
96
97
98 async def run_porter(command, env_vars):
99 proc = await asyncio.create_subprocess_shell(
100 ''.join(command),
101 stdout=asyncio.subprocess.PIPE,
102 stderr=asyncio.subprocess.PIPE,
103 env=porter_envs(env_vars))
104
105 stdout, stderr = await proc.communicate()
106 logging.info(f'[{command!r} exited with {proc.returncode}]')
107 result_stdout = None
108 result_stderr = None
109 if stdout:
110 result_stdout = stdout.decode()
111 logger_adapter.info('[stdout]')
112 for string in result_stdout.split('\n'):
113 if len(string) != 0:
114 logger_adapter.info(str(string))
115 if stderr:
116 result_stderr = stderr.decode()
117 logger_adapter.info('[stderr]')
118 for string in result_stderr.split('\n'):
119 if len(string) != 0:
120 logger_adapter.info(str(string))
121
122 return (proc.returncode, result_stdout, result_stderr)
123
124
125 def service_bus_message_generator(sb_message, status, deployment_message):
126 installation_id = sb_message['parameters']['tre_id'] + "-" + sb_message['parameters']['workspace_id']
127 resource_request_message = json.dumps({
128 "id": sb_message["id"],
129 "status": status,
130 "message": f"{installation_id}: {deployment_message}"
131 })
132 return resource_request_message
133
134
135 async def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter):
136 installation_id = msg_body['parameters']['tre_id'] + "-" + msg_body['parameters']['workspace_id']
137 message_logger_adapter.info(f"{installation_id}: Deployment job configuration starting")
138 sb_sender = sb_client.get_queue_sender(queue_name=env_vars["deployment_status_queue"])
139 resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, "Deployment job starting")
140 await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
141
142 returncode, _, err = await run_porter(build_porter_command(msg_body, env_vars), env_vars)
143 if returncode != 0:
144 error_message = "Error context message = " + " ".join(err.split('\n'))
145 resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)
146 await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
147 message_logger_adapter.info(f"{installation_id}: Deployment job configuration failed error = {error_message}")
148 return False
149 else:
150 success_message = "Workspace was deployed successfully..."
151 resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message)
152 await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body["id"]))
153 message_logger_adapter.info(f"{installation_id}: {success_message}")
154 return True
155
156
157 async def runner(env_vars):
158 msi_id = env_vars["vmss_msi_id"]
159 service_bus_namespace = env_vars["service_bus_namespace"]
160 async with default_credentials(msi_id) as credential:
161 service_bus_client = ServiceBusClient(service_bus_namespace, credential)
162 logger_adapter.info("Starting message receiving loop...")
163 while True:
164 logger_adapter.info("Checking for new messages...")
165 receive_message_gen = receive_message(env_vars, service_bus_client)
166 try:
167 async for message in receive_message_gen:
168 logger_adapter.info(f"Message received for id={message['id']}")
169 message_logger_adapter = initialize_logging(logging.INFO, message['id'])
170 result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter)
171 await receive_message_gen.asend(result)
172 except StopAsyncIteration: # the async generator when finished signals end with this exception.
173 pass
174 logger_adapter.info("All messages done sleeping...")
175 await asyncio.sleep(60)
176
177
178 def read_env_vars():
179 env_vars = {
180 # Needed for local dev
181 "app_id": os.environ.get("AZURE_CLIENT_ID", None),
182 "app_password": os.environ.get("AZURE_CLIENT_SECRET", None),
183
184 "registry_server": os.environ["REGISTRY_SERVER"],
185 "tfstate_container_name": os.environ['TERRAFORM_STATE_CONTAINER_NAME'],
186 "tfstate_resource_group_name": os.environ['MGMT_RESOURCE_GROUP_NAME'],
187 "tfstate_storage_account_name": os.environ['MGMT_STORAGE_ACCOUNT_NAME'],
188 "deployment_status_queue": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'],
189 "resource_request_queue": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'],
190 "service_bus_namespace": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'],
191 "vmss_msi_id": os.environ.get('VMSS_MSI_ID', None),
192
193 # Needed for running porter
194 "arm_use_msi": os.environ["ARM_USE_MSI"],
195 "arm_subscription_id": os.environ['ARM_SUBSCRIPTION_ID'],
196 "arm_client_id": os.environ["ARM_CLIENT_ID"],
197 "arm_tenant_id": os.environ["ARM_TENANT_ID"]
198 }
199
200 env_vars["arm_client_secret"] = os.environ["ARM_CLIENT_SECRET"] if env_vars["arm_use_msi"] == "false" else ""
201
202 return env_vars
203
204
205 if __name__ == "__main__":
206 try:
207 env_vars = read_env_vars()
208 except KeyError as e:
209 logger_adapter.error(f"Environment variable {e} is not set correctly...Exiting")
210 sys.exit(1)
211 logger_adapter.info("Started processor")
212 asyncio.run(runner(env_vars))
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/processor_function/vm_porter/runner.py b/processor_function/vm_porter/runner.py
--- a/processor_function/vm_porter/runner.py
+++ b/processor_function/vm_porter/runner.py
@@ -33,7 +33,7 @@
"""
async with service_bus_client:
q_name = env_vars["resource_request_queue"]
- renewer = AutoLockRenewer()
+ renewer = AutoLockRenewer(max_lock_renewal_duration=1800)
receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)
async with receiver:
| {"golden_diff": "diff --git a/processor_function/vm_porter/runner.py b/processor_function/vm_porter/runner.py\n--- a/processor_function/vm_porter/runner.py\n+++ b/processor_function/vm_porter/runner.py\n@@ -33,7 +33,7 @@\n \"\"\"\n async with service_bus_client:\n q_name = env_vars[\"resource_request_queue\"]\n- renewer = AutoLockRenewer()\n+ renewer = AutoLockRenewer(max_lock_renewal_duration=1800)\n receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)\n \n async with receiver:\n", "issue": "[BUG] Service bus message times out on deployment of workspace template \n**Describe the bug**\r\nWhen deploying a template that takes > 10 minutes, although deployment is successful the status is not updated.\r\n\r\n**Steps to reproduce**\r\n\r\n1. Register and deploy the `azureml_devtestlabs` workspace\r\n2. Log on to the VMSS resource processor using bastion\r\n3. View the docker logs, wait until deployment is complete, and see similar to:\r\n\r\n`LinkDetach(\"ErrorCodes.LinkDetachForced: The link 'G3:5725658:sender-link-bd7b69d4-9ad4-4b9b-b9f6-2e311be400a3' is force detached. Code: publisher(link3135). Details: AmqpMessagePublisher.IdleTimerExpired: Idle timeout: 00:10:00.\")`\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport json\nimport socket\nimport asyncio\nimport logging\nfrom shared.logging import disable_unwanted_loggers, initialize_logging # pylint: disable=import-error # noqa\nfrom resources import strings # pylint: disable=import-error # noqa\nfrom contextlib import asynccontextmanager\nfrom azure.servicebus import ServiceBusMessage\nfrom azure.servicebus.aio import ServiceBusClient, AutoLockRenewer\nfrom azure.identity.aio import DefaultAzureCredential\n\nlogger_adapter = initialize_logging(logging.INFO, socket.gethostname())\ndisable_unwanted_loggers()\n\n\n@asynccontextmanager\nasync def default_credentials(msi_id):\n \"\"\"\n Context manager which yields the default credentials.\n \"\"\"\n credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()\n yield credential\n await credential.close()\n\n\nasync def receive_message(env_vars, service_bus_client):\n \"\"\"\n This method is an async generator which receives messages from service bus\n and yields those messages. If the yielded function return True the message is\n marked complete.\n \"\"\"\n async with service_bus_client:\n q_name = env_vars[\"resource_request_queue\"]\n renewer = AutoLockRenewer()\n receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)\n\n async with receiver:\n received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)\n\n for msg in received_msgs:\n result = True\n message = \"\"\n\n try:\n message = json.loads(str(msg))\n result = (yield message)\n except (json.JSONDecodeError) as e:\n logging.error(f\"Received bad service bus resource request message: {e}\")\n if result:\n logging.info(f\"Resource request for {message} is complete\")\n else:\n logging.error('Message processing failed!')\n logger_adapter.info(f\"Message with id = {message['id']} processed as {result} and marked complete.\")\n await receiver.complete_message(msg)\n\n\ndef azure_login_command(env_vars):\n local_login = f\"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}\"\n vmss_login = f\"az login --identity -u {env_vars['vmss_msi_id']}\"\n command = vmss_login if env_vars['vmss_msi_id'] else local_login\n return command\n\n\ndef build_porter_command(msg_body, env_vars):\n porter_parameters = \"\"\n for parameter in msg_body['parameters']:\n porter_parameters = porter_parameters + f\" --param {parameter}={msg_body['parameters'][parameter]}\"\n\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n\n porter_parameters = porter_parameters + f\" --param tfstate_container_name={env_vars['tfstate_container_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}\"\n porter_parameters = porter_parameters + f\" --param arm_use_msi={env_vars['arm_use_msi']}\"\n\n command_line = [f\"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter \"\n f\"{msg_body['action']} {installation_id} \"\n f\" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}\"\n f\" {porter_parameters} --cred ./vm_porter/azure.json --allow-docker-host-access\"\n f\" && porter show {installation_id}\"]\n return command_line\n\n\ndef porter_envs(env_var):\n porter_env_vars = {}\n porter_env_vars[\"HOME\"] = os.environ['HOME']\n porter_env_vars[\"PATH\"] = os.environ['PATH']\n porter_env_vars[\"ARM_CLIENT_ID\"] = env_var[\"arm_client_id\"]\n porter_env_vars[\"ARM_CLIENT_SECRET\"] = env_var[\"arm_client_secret\"]\n porter_env_vars[\"ARM_SUBSCRIPTION_ID\"] = env_var[\"arm_subscription_id\"]\n porter_env_vars[\"ARM_TENANT_ID\"] = env_var[\"arm_tenant_id\"]\n\n return porter_env_vars\n\n\nasync def run_porter(command, env_vars):\n proc = await asyncio.create_subprocess_shell(\n ''.join(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=porter_envs(env_vars))\n\n stdout, stderr = await proc.communicate()\n logging.info(f'[{command!r} exited with {proc.returncode}]')\n result_stdout = None\n result_stderr = None\n if stdout:\n result_stdout = stdout.decode()\n logger_adapter.info('[stdout]')\n for string in result_stdout.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n if stderr:\n result_stderr = stderr.decode()\n logger_adapter.info('[stderr]')\n for string in result_stderr.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n\n return (proc.returncode, result_stdout, result_stderr)\n\n\ndef service_bus_message_generator(sb_message, status, deployment_message):\n installation_id = sb_message['parameters']['tre_id'] + \"-\" + sb_message['parameters']['workspace_id']\n resource_request_message = json.dumps({\n \"id\": sb_message[\"id\"],\n \"status\": status,\n \"message\": f\"{installation_id}: {deployment_message}\"\n })\n return resource_request_message\n\n\nasync def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter):\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration starting\")\n sb_sender = sb_client.get_queue_sender(queue_name=env_vars[\"deployment_status_queue\"])\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, \"Deployment job starting\")\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n\n returncode, _, err = await run_porter(build_porter_command(msg_body, env_vars), env_vars)\n if returncode != 0:\n error_message = \"Error context message = \" + \" \".join(err.split('\\n'))\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration failed error = {error_message}\")\n return False\n else:\n success_message = \"Workspace was deployed successfully...\"\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: {success_message}\")\n return True\n\n\nasync def runner(env_vars):\n msi_id = env_vars[\"vmss_msi_id\"]\n service_bus_namespace = env_vars[\"service_bus_namespace\"]\n async with default_credentials(msi_id) as credential:\n service_bus_client = ServiceBusClient(service_bus_namespace, credential)\n logger_adapter.info(\"Starting message receiving loop...\")\n while True:\n logger_adapter.info(\"Checking for new messages...\")\n receive_message_gen = receive_message(env_vars, service_bus_client)\n try:\n async for message in receive_message_gen:\n logger_adapter.info(f\"Message received for id={message['id']}\")\n message_logger_adapter = initialize_logging(logging.INFO, message['id'])\n result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter)\n await receive_message_gen.asend(result)\n except StopAsyncIteration: # the async generator when finished signals end with this exception.\n pass\n logger_adapter.info(\"All messages done sleeping...\")\n await asyncio.sleep(60)\n\n\ndef read_env_vars():\n env_vars = {\n # Needed for local dev\n \"app_id\": os.environ.get(\"AZURE_CLIENT_ID\", None),\n \"app_password\": os.environ.get(\"AZURE_CLIENT_SECRET\", None),\n\n \"registry_server\": os.environ[\"REGISTRY_SERVER\"],\n \"tfstate_container_name\": os.environ['TERRAFORM_STATE_CONTAINER_NAME'],\n \"tfstate_resource_group_name\": os.environ['MGMT_RESOURCE_GROUP_NAME'],\n \"tfstate_storage_account_name\": os.environ['MGMT_STORAGE_ACCOUNT_NAME'],\n \"deployment_status_queue\": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'],\n \"resource_request_queue\": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'],\n \"service_bus_namespace\": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'],\n \"vmss_msi_id\": os.environ.get('VMSS_MSI_ID', None),\n\n # Needed for running porter\n \"arm_use_msi\": os.environ[\"ARM_USE_MSI\"],\n \"arm_subscription_id\": os.environ['ARM_SUBSCRIPTION_ID'],\n \"arm_client_id\": os.environ[\"ARM_CLIENT_ID\"],\n \"arm_tenant_id\": os.environ[\"ARM_TENANT_ID\"]\n }\n\n env_vars[\"arm_client_secret\"] = os.environ[\"ARM_CLIENT_SECRET\"] if env_vars[\"arm_use_msi\"] == \"false\" else \"\"\n\n return env_vars\n\n\nif __name__ == \"__main__\":\n try:\n env_vars = read_env_vars()\n except KeyError as e:\n logger_adapter.error(f\"Environment variable {e} is not set correctly...Exiting\")\n sys.exit(1)\n logger_adapter.info(\"Started processor\")\n asyncio.run(runner(env_vars))\n", "path": "processor_function/vm_porter/runner.py"}], "after_files": [{"content": "import os\nimport sys\nimport json\nimport socket\nimport asyncio\nimport logging\nfrom shared.logging import disable_unwanted_loggers, initialize_logging # pylint: disable=import-error # noqa\nfrom resources import strings # pylint: disable=import-error # noqa\nfrom contextlib import asynccontextmanager\nfrom azure.servicebus import ServiceBusMessage\nfrom azure.servicebus.aio import ServiceBusClient, AutoLockRenewer\nfrom azure.identity.aio import DefaultAzureCredential\n\nlogger_adapter = initialize_logging(logging.INFO, socket.gethostname())\ndisable_unwanted_loggers()\n\n\n@asynccontextmanager\nasync def default_credentials(msi_id):\n \"\"\"\n Context manager which yields the default credentials.\n \"\"\"\n credential = DefaultAzureCredential(managed_identity_client_id=msi_id) if msi_id else DefaultAzureCredential()\n yield credential\n await credential.close()\n\n\nasync def receive_message(env_vars, service_bus_client):\n \"\"\"\n This method is an async generator which receives messages from service bus\n and yields those messages. If the yielded function return True the message is\n marked complete.\n \"\"\"\n async with service_bus_client:\n q_name = env_vars[\"resource_request_queue\"]\n renewer = AutoLockRenewer(max_lock_renewal_duration=1800)\n receiver = service_bus_client.get_queue_receiver(queue_name=q_name, auto_lock_renewer=renewer)\n\n async with receiver:\n received_msgs = await receiver.receive_messages(max_message_count=10, max_wait_time=5)\n\n for msg in received_msgs:\n result = True\n message = \"\"\n\n try:\n message = json.loads(str(msg))\n result = (yield message)\n except (json.JSONDecodeError) as e:\n logging.error(f\"Received bad service bus resource request message: {e}\")\n if result:\n logging.info(f\"Resource request for {message} is complete\")\n else:\n logging.error('Message processing failed!')\n logger_adapter.info(f\"Message with id = {message['id']} processed as {result} and marked complete.\")\n await receiver.complete_message(msg)\n\n\ndef azure_login_command(env_vars):\n local_login = f\"az login --service-principal --username {env_vars['arm_client_id']} --password {env_vars['arm_client_secret']} --tenant {env_vars['arm_tenant_id']}\"\n vmss_login = f\"az login --identity -u {env_vars['vmss_msi_id']}\"\n command = vmss_login if env_vars['vmss_msi_id'] else local_login\n return command\n\n\ndef build_porter_command(msg_body, env_vars):\n porter_parameters = \"\"\n for parameter in msg_body['parameters']:\n porter_parameters = porter_parameters + f\" --param {parameter}={msg_body['parameters'][parameter]}\"\n\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n\n porter_parameters = porter_parameters + f\" --param tfstate_container_name={env_vars['tfstate_container_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_resource_group_name={env_vars['tfstate_resource_group_name']}\"\n porter_parameters = porter_parameters + f\" --param tfstate_storage_account_name={env_vars['tfstate_storage_account_name']}\"\n porter_parameters = porter_parameters + f\" --param arm_use_msi={env_vars['arm_use_msi']}\"\n\n command_line = [f\"{azure_login_command(env_vars)} && az acr login --name {env_vars['registry_server'].replace('.azurecr.io','')} && porter \"\n f\"{msg_body['action']} {installation_id} \"\n f\" --reference {env_vars['registry_server']}/{msg_body['name']}:v{msg_body['version']}\"\n f\" {porter_parameters} --cred ./vm_porter/azure.json --allow-docker-host-access\"\n f\" && porter show {installation_id}\"]\n return command_line\n\n\ndef porter_envs(env_var):\n porter_env_vars = {}\n porter_env_vars[\"HOME\"] = os.environ['HOME']\n porter_env_vars[\"PATH\"] = os.environ['PATH']\n porter_env_vars[\"ARM_CLIENT_ID\"] = env_var[\"arm_client_id\"]\n porter_env_vars[\"ARM_CLIENT_SECRET\"] = env_var[\"arm_client_secret\"]\n porter_env_vars[\"ARM_SUBSCRIPTION_ID\"] = env_var[\"arm_subscription_id\"]\n porter_env_vars[\"ARM_TENANT_ID\"] = env_var[\"arm_tenant_id\"]\n\n return porter_env_vars\n\n\nasync def run_porter(command, env_vars):\n proc = await asyncio.create_subprocess_shell(\n ''.join(command),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=porter_envs(env_vars))\n\n stdout, stderr = await proc.communicate()\n logging.info(f'[{command!r} exited with {proc.returncode}]')\n result_stdout = None\n result_stderr = None\n if stdout:\n result_stdout = stdout.decode()\n logger_adapter.info('[stdout]')\n for string in result_stdout.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n if stderr:\n result_stderr = stderr.decode()\n logger_adapter.info('[stderr]')\n for string in result_stderr.split('\\n'):\n if len(string) != 0:\n logger_adapter.info(str(string))\n\n return (proc.returncode, result_stdout, result_stderr)\n\n\ndef service_bus_message_generator(sb_message, status, deployment_message):\n installation_id = sb_message['parameters']['tre_id'] + \"-\" + sb_message['parameters']['workspace_id']\n resource_request_message = json.dumps({\n \"id\": sb_message[\"id\"],\n \"status\": status,\n \"message\": f\"{installation_id}: {deployment_message}\"\n })\n return resource_request_message\n\n\nasync def deploy_porter_bundle(msg_body, sb_client, env_vars, message_logger_adapter):\n installation_id = msg_body['parameters']['tre_id'] + \"-\" + msg_body['parameters']['workspace_id']\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration starting\")\n sb_sender = sb_client.get_queue_sender(queue_name=env_vars[\"deployment_status_queue\"])\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYING, \"Deployment job starting\")\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n\n returncode, _, err = await run_porter(build_porter_command(msg_body, env_vars), env_vars)\n if returncode != 0:\n error_message = \"Error context message = \" + \" \".join(err.split('\\n'))\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_FAILED, error_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: Deployment job configuration failed error = {error_message}\")\n return False\n else:\n success_message = \"Workspace was deployed successfully...\"\n resource_request_message = service_bus_message_generator(msg_body, strings.RESOURCE_STATUS_DEPLOYED, success_message)\n await sb_sender.send_messages(ServiceBusMessage(body=resource_request_message, correlation_id=msg_body[\"id\"]))\n message_logger_adapter.info(f\"{installation_id}: {success_message}\")\n return True\n\n\nasync def runner(env_vars):\n msi_id = env_vars[\"vmss_msi_id\"]\n service_bus_namespace = env_vars[\"service_bus_namespace\"]\n async with default_credentials(msi_id) as credential:\n service_bus_client = ServiceBusClient(service_bus_namespace, credential)\n logger_adapter.info(\"Starting message receiving loop...\")\n while True:\n logger_adapter.info(\"Checking for new messages...\")\n receive_message_gen = receive_message(env_vars, service_bus_client)\n try:\n async for message in receive_message_gen:\n logger_adapter.info(f\"Message received for id={message['id']}\")\n message_logger_adapter = initialize_logging(logging.INFO, message['id'])\n result = await deploy_porter_bundle(message, service_bus_client, env_vars, message_logger_adapter)\n await receive_message_gen.asend(result)\n except StopAsyncIteration: # the async generator when finished signals end with this exception.\n pass\n logger_adapter.info(\"All messages done sleeping...\")\n await asyncio.sleep(60)\n\n\ndef read_env_vars():\n env_vars = {\n # Needed for local dev\n \"app_id\": os.environ.get(\"AZURE_CLIENT_ID\", None),\n \"app_password\": os.environ.get(\"AZURE_CLIENT_SECRET\", None),\n\n \"registry_server\": os.environ[\"REGISTRY_SERVER\"],\n \"tfstate_container_name\": os.environ['TERRAFORM_STATE_CONTAINER_NAME'],\n \"tfstate_resource_group_name\": os.environ['MGMT_RESOURCE_GROUP_NAME'],\n \"tfstate_storage_account_name\": os.environ['MGMT_STORAGE_ACCOUNT_NAME'],\n \"deployment_status_queue\": os.environ['SERVICE_BUS_DEPLOYMENT_STATUS_UPDATE_QUEUE'],\n \"resource_request_queue\": os.environ['SERVICE_BUS_RESOURCE_REQUEST_QUEUE'],\n \"service_bus_namespace\": os.environ['SERVICE_BUS_FULLY_QUALIFIED_NAMESPACE'],\n \"vmss_msi_id\": os.environ.get('VMSS_MSI_ID', None),\n\n # Needed for running porter\n \"arm_use_msi\": os.environ[\"ARM_USE_MSI\"],\n \"arm_subscription_id\": os.environ['ARM_SUBSCRIPTION_ID'],\n \"arm_client_id\": os.environ[\"ARM_CLIENT_ID\"],\n \"arm_tenant_id\": os.environ[\"ARM_TENANT_ID\"]\n }\n\n env_vars[\"arm_client_secret\"] = os.environ[\"ARM_CLIENT_SECRET\"] if env_vars[\"arm_use_msi\"] == \"false\" else \"\"\n\n return env_vars\n\n\nif __name__ == \"__main__\":\n try:\n env_vars = read_env_vars()\n except KeyError as e:\n logger_adapter.error(f\"Environment variable {e} is not set correctly...Exiting\")\n sys.exit(1)\n logger_adapter.info(\"Started processor\")\n asyncio.run(runner(env_vars))\n", "path": "processor_function/vm_porter/runner.py"}]} | 3,107 | 146 |
gh_patches_debug_14173 | rasdani/github-patches | git_diff | napari__napari-466 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Canvas splitter no longer expanding when widening the viewer
## 🐛 Bug
Introduced by #443 or #453 I presume.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_qt/qt_viewer.py`
Content:
```
1 import os.path
2 from glob import glob
3 import numpy as np
4 import inspect
5 from pathlib import Path
6
7 from qtpy.QtCore import QCoreApplication, Qt, QSize
8 from qtpy.QtWidgets import (
9 QWidget,
10 QVBoxLayout,
11 QHBoxLayout,
12 QFrame,
13 QFileDialog,
14 QSplitter,
15 )
16 from qtpy.QtWidgets import QStackedWidget
17 from qtpy.QtGui import QCursor, QPixmap
18 from qtpy import API_NAME
19 from vispy.scene import SceneCanvas, PanZoomCamera, ArcballCamera
20 from vispy.app import use_app
21
22 from .qt_dims import QtDims
23 from .qt_layerlist import QtLayerList
24 from ..resources import resources_dir
25 from ..util.theme import template
26 from ..util.misc import is_multichannel
27 from ..util.keybindings import components_to_key_combo
28 from ..util.io import read
29
30 from .qt_controls import QtControls
31 from .qt_layer_buttons import QtLayersButtons
32 from .qt_console import QtConsole
33
34
35 # set vispy application to the appropriate qt backend
36 use_app(API_NAME)
37
38
39 class QtViewer(QSplitter):
40 with open(os.path.join(resources_dir, 'stylesheet.qss'), 'r') as f:
41 raw_stylesheet = f.read()
42
43 def __init__(self, viewer):
44 super().__init__()
45
46 QCoreApplication.setAttribute(
47 Qt.AA_UseStyleSheetPropagationInWidgetStyles, True
48 )
49
50 self.viewer = viewer
51 self.dims = QtDims(self.viewer.dims)
52 self.controls = QtControls(self.viewer)
53 self.layers = QtLayerList(self.viewer.layers)
54 self.buttons = QtLayersButtons(self.viewer)
55 self.console = QtConsole({'viewer': self.viewer})
56
57 if self.console.shell is not None:
58 self.console.style().unpolish(self.console)
59 self.console.style().polish(self.console)
60 self.console.hide()
61 self.buttons.consoleButton.clicked.connect(
62 lambda: self._toggle_console()
63 )
64 else:
65 self.buttons.consoleButton.setEnabled(False)
66
67 self.canvas = SceneCanvas(keys=None, vsync=True)
68 self.canvas.native.setMinimumSize(QSize(200, 200))
69
70 self.canvas.connect(self.on_mouse_move)
71 self.canvas.connect(self.on_mouse_press)
72 self.canvas.connect(self.on_mouse_release)
73 self.canvas.connect(self.on_key_press)
74 self.canvas.connect(self.on_key_release)
75 self.canvas.connect(self.on_draw)
76
77 self.view = self.canvas.central_widget.add_view()
78 self._update_camera()
79
80 center = QWidget()
81 center_layout = QVBoxLayout()
82 center_layout.setContentsMargins(15, 20, 15, 10)
83 center_layout.addWidget(self.canvas.native)
84 center_layout.addWidget(self.dims)
85 center.setLayout(center_layout)
86
87 right = QWidget()
88 right_layout = QVBoxLayout()
89 right_layout.addWidget(self.layers)
90 right_layout.addWidget(self.buttons)
91 right.setLayout(right_layout)
92
93 left = self.controls
94
95 top = QWidget()
96 top_layout = QHBoxLayout()
97 top_layout.addWidget(left)
98 top_layout.addWidget(center)
99 top_layout.addWidget(right)
100 top.setLayout(top_layout)
101
102 self.setOrientation(Qt.Vertical)
103 self.addWidget(top)
104
105 if self.console.shell is not None:
106 self.addWidget(self.console)
107
108 self._last_visited_dir = str(Path.home())
109
110 self._cursors = {
111 'disabled': QCursor(
112 QPixmap(':/icons/cursor/cursor_disabled.png').scaled(20, 20)
113 ),
114 'cross': Qt.CrossCursor,
115 'forbidden': Qt.ForbiddenCursor,
116 'pointing': Qt.PointingHandCursor,
117 'standard': QCursor(),
118 }
119
120 self._update_palette(viewer.palette)
121
122 self._key_release_generators = {}
123
124 self.viewer.events.interactive.connect(self._on_interactive)
125 self.viewer.events.cursor.connect(self._on_cursor)
126 self.viewer.events.reset_view.connect(self._on_reset_view)
127 self.viewer.events.palette.connect(
128 lambda event: self._update_palette(event.palette)
129 )
130 self.viewer.layers.events.reordered.connect(self._update_canvas)
131 self.viewer.dims.events.display.connect(
132 lambda event: self._update_camera()
133 )
134
135 self.setAcceptDrops(True)
136
137 def _update_camera(self):
138 if np.sum(self.viewer.dims.display) == 3:
139 # Set a 3D camera
140 self.view.camera = ArcballCamera(name="ArcballCamera")
141 # flip y-axis to have correct alignment
142 self.view.camera.flip = (0, 1, 0)
143 min_shape, max_shape = self.viewer._calc_bbox()
144 centroid = np.add(max_shape, min_shape) / 2
145 size = np.subtract(max_shape, min_shape)
146 # Scale the camera to the contents in the scene
147 if len(centroid) > 0:
148 centroid = centroid[-3:]
149 self.view.camera.center = centroid[::-1]
150 self.view.camera.scale_factor = 1.5 * np.mean(size[-3:])
151 elif np.sum(self.viewer.dims.display) == 2:
152 # Set 2D camera
153 self.view.camera = PanZoomCamera(aspect=1, name="PanZoomCamera")
154 # flip y-axis to have correct alignment
155 self.view.camera.flip = (0, 1, 0)
156 # Scale the camera to the contents in the scene
157 self.view.camera.set_range()
158 else:
159 raise ValueError(
160 "Invalid display flags set in dimensions {}".format(
161 self.viewer.dims.display
162 )
163 )
164
165 self.view.camera.viewbox_key_event = viewbox_key_event
166 # TO DO: Remove
167 self.viewer._view = self.view
168
169 def screenshot(self):
170 """Take currently displayed screen and convert to an image array.
171
172 Returns
173 -------
174 image : array
175 Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
176 upper-left corner of the rendered region.
177 """
178 img = self.canvas.native.grabFramebuffer()
179 b = img.constBits()
180 h, w, c = img.height(), img.width(), 4
181
182 # As vispy doesn't use qtpy we need to reconcile the differences
183 # between the `QImage` API for `PySide2` and `PyQt5` on how to convert
184 # a QImage to a numpy array.
185 if API_NAME == 'PySide2':
186 arr = np.array(b).reshape(h, w, c)
187 else:
188 b.setsize(h * w * c)
189 arr = np.frombuffer(b, np.uint8).reshape(h, w, c)
190
191 # Format of QImage is ARGB32_Premultiplied, but color channels are
192 # reversed.
193 arr = arr[:, :, [2, 1, 0, 3]]
194 return arr
195
196 def _open_images(self):
197 """Adds image files from the menubar."""
198 filenames, _ = QFileDialog.getOpenFileNames(
199 parent=self,
200 caption='Select image(s)...',
201 directory=self._last_visited_dir, # home dir by default
202 )
203 self._add_files(filenames)
204
205 def _add_files(self, filenames):
206 """Adds an image layer to the viewer.
207
208 Whether the image is multichannel is determined by
209 :func:`napari.util.misc.is_multichannel`.
210
211 If multiple images are selected, they are stacked along the 0th
212 axis.
213
214 Parameters
215 -------
216 filenames : list
217 List of filenames to be opened
218 """
219 if len(filenames) > 0:
220 image = read(filenames)
221 self.viewer.add_image(
222 image, multichannel=is_multichannel(image.shape)
223 )
224 self._last_visited_dir = os.path.dirname(filenames[0])
225
226 def _on_interactive(self, event):
227 self.view.interactive = self.viewer.interactive
228
229 def _on_cursor(self, event):
230 cursor = self.viewer.cursor
231 size = self.viewer.cursor_size
232 if cursor == 'square':
233 if size < 10 or size > 300:
234 q_cursor = self._cursors['cross']
235 else:
236 q_cursor = QCursor(
237 QPixmap(':/icons/cursor/cursor_square.png').scaledToHeight(
238 size
239 )
240 )
241 else:
242 q_cursor = self._cursors[cursor]
243 self.canvas.native.setCursor(q_cursor)
244
245 def _on_reset_view(self, event):
246 self.view.camera.rect = event.viewbox
247
248 def _update_canvas(self, event):
249 """Clears draw order and refreshes canvas. Usefeul for when layers are
250 reoredered.
251 """
252 self.canvas._draw_order.clear()
253 self.canvas.update()
254
255 def _update_palette(self, palette):
256 # template and apply the primary stylesheet
257 themed_stylesheet = template(self.raw_stylesheet, **palette)
258 self.console.style_sheet = themed_stylesheet
259 self.console.syntax_style = palette['syntax_style']
260 self.setStyleSheet(themed_stylesheet)
261
262 def _toggle_console(self):
263 """Toggle console visible and not visible."""
264 self.console.setVisible(not self.console.isVisible())
265 self.buttons.consoleButton.setProperty(
266 'expanded', self.console.isVisible()
267 )
268 self.buttons.consoleButton.style().unpolish(self.buttons.consoleButton)
269 self.buttons.consoleButton.style().polish(self.buttons.consoleButton)
270
271 def on_mouse_move(self, event):
272 """Called whenever mouse moves over canvas.
273 """
274 layer = self.viewer.active_layer
275 if layer is not None:
276 layer.on_mouse_move(event)
277
278 def on_mouse_press(self, event):
279 """Called whenever mouse pressed in canvas.
280 """
281 layer = self.viewer.active_layer
282 if layer is not None:
283 layer.on_mouse_press(event)
284
285 def on_mouse_release(self, event):
286 """Called whenever mouse released in canvas.
287 """
288 layer = self.viewer.active_layer
289 if layer is not None:
290 layer.on_mouse_release(event)
291
292 def on_key_press(self, event):
293 """Called whenever key pressed in canvas.
294 """
295 if event.native.isAutoRepeat() or event.key is None:
296 return
297
298 comb = components_to_key_combo(event.key.name, event.modifiers)
299
300 layer = self.viewer.active_layer
301
302 if layer is not None and comb in layer.keymap:
303 parent = layer
304 elif comb in self.viewer.keymap:
305 parent = self.viewer
306 else:
307 return
308
309 func = parent.keymap[comb]
310 gen = func(parent)
311
312 if inspect.isgeneratorfunction(func):
313 try:
314 next(gen)
315 except StopIteration: # only one statement
316 pass
317 else:
318 self._key_release_generators[event.key] = gen
319
320 def on_key_release(self, event):
321 """Called whenever key released in canvas.
322 """
323 try:
324 next(self._key_release_generators[event.key])
325 except (KeyError, StopIteration):
326 pass
327
328 def on_draw(self, event):
329 """Called whenever drawn in canvas. Called for all layers, not just top
330 """
331 for layer in self.viewer.layers:
332 layer.on_draw(event)
333
334 def dragEnterEvent(self, event):
335 if event.mimeData().hasUrls():
336 event.accept()
337 else:
338 event.ignore()
339
340 def dropEvent(self, event):
341 """Add local files and web URLS with drag and drop."""
342 filenames = []
343 for url in event.mimeData().urls():
344 path = url.toString()
345 if os.path.isfile(path):
346 filenames.append(path)
347 elif os.path.isdir(path):
348 filenames = filenames + list(glob(os.path.join(path, '*')))
349 else:
350 filenames.append(path)
351 self._add_files(filenames)
352
353
354 def viewbox_key_event(event):
355 """ViewBox key event handler
356 Parameters
357 ----------
358 event : instance of Event
359 The event.
360 """
361 return
362
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/_qt/qt_viewer.py b/napari/_qt/qt_viewer.py
--- a/napari/_qt/qt_viewer.py
+++ b/napari/_qt/qt_viewer.py
@@ -13,7 +13,7 @@
QFileDialog,
QSplitter,
)
-from qtpy.QtWidgets import QStackedWidget
+from qtpy.QtWidgets import QStackedWidget, QSizePolicy
from qtpy.QtGui import QCursor, QPixmap
from qtpy import API_NAME
from vispy.scene import SceneCanvas, PanZoomCamera, ArcballCamera
@@ -89,6 +89,7 @@
right_layout.addWidget(self.layers)
right_layout.addWidget(self.buttons)
right.setLayout(right_layout)
+ right.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
left = self.controls
| {"golden_diff": "diff --git a/napari/_qt/qt_viewer.py b/napari/_qt/qt_viewer.py\n--- a/napari/_qt/qt_viewer.py\n+++ b/napari/_qt/qt_viewer.py\n@@ -13,7 +13,7 @@\n QFileDialog,\n QSplitter,\n )\n-from qtpy.QtWidgets import QStackedWidget\n+from qtpy.QtWidgets import QStackedWidget, QSizePolicy\n from qtpy.QtGui import QCursor, QPixmap\n from qtpy import API_NAME\n from vispy.scene import SceneCanvas, PanZoomCamera, ArcballCamera\n@@ -89,6 +89,7 @@\n right_layout.addWidget(self.layers)\n right_layout.addWidget(self.buttons)\n right.setLayout(right_layout)\n+ right.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)\n \n left = self.controls\n", "issue": "Canvas splitter no longer expanding when widening the viewer\n## \ud83d\udc1b Bug\r\n\r\nIntroduced by #443 or #453 I presume.\r\n\r\n\n", "before_files": [{"content": "import os.path\nfrom glob import glob\nimport numpy as np\nimport inspect\nfrom pathlib import Path\n\nfrom qtpy.QtCore import QCoreApplication, Qt, QSize\nfrom qtpy.QtWidgets import (\n QWidget,\n QVBoxLayout,\n QHBoxLayout,\n QFrame,\n QFileDialog,\n QSplitter,\n)\nfrom qtpy.QtWidgets import QStackedWidget\nfrom qtpy.QtGui import QCursor, QPixmap\nfrom qtpy import API_NAME\nfrom vispy.scene import SceneCanvas, PanZoomCamera, ArcballCamera\nfrom vispy.app import use_app\n\nfrom .qt_dims import QtDims\nfrom .qt_layerlist import QtLayerList\nfrom ..resources import resources_dir\nfrom ..util.theme import template\nfrom ..util.misc import is_multichannel\nfrom ..util.keybindings import components_to_key_combo\nfrom ..util.io import read\n\nfrom .qt_controls import QtControls\nfrom .qt_layer_buttons import QtLayersButtons\nfrom .qt_console import QtConsole\n\n\n# set vispy application to the appropriate qt backend\nuse_app(API_NAME)\n\n\nclass QtViewer(QSplitter):\n with open(os.path.join(resources_dir, 'stylesheet.qss'), 'r') as f:\n raw_stylesheet = f.read()\n\n def __init__(self, viewer):\n super().__init__()\n\n QCoreApplication.setAttribute(\n Qt.AA_UseStyleSheetPropagationInWidgetStyles, True\n )\n\n self.viewer = viewer\n self.dims = QtDims(self.viewer.dims)\n self.controls = QtControls(self.viewer)\n self.layers = QtLayerList(self.viewer.layers)\n self.buttons = QtLayersButtons(self.viewer)\n self.console = QtConsole({'viewer': self.viewer})\n\n if self.console.shell is not None:\n self.console.style().unpolish(self.console)\n self.console.style().polish(self.console)\n self.console.hide()\n self.buttons.consoleButton.clicked.connect(\n lambda: self._toggle_console()\n )\n else:\n self.buttons.consoleButton.setEnabled(False)\n\n self.canvas = SceneCanvas(keys=None, vsync=True)\n self.canvas.native.setMinimumSize(QSize(200, 200))\n\n self.canvas.connect(self.on_mouse_move)\n self.canvas.connect(self.on_mouse_press)\n self.canvas.connect(self.on_mouse_release)\n self.canvas.connect(self.on_key_press)\n self.canvas.connect(self.on_key_release)\n self.canvas.connect(self.on_draw)\n\n self.view = self.canvas.central_widget.add_view()\n self._update_camera()\n\n center = QWidget()\n center_layout = QVBoxLayout()\n center_layout.setContentsMargins(15, 20, 15, 10)\n center_layout.addWidget(self.canvas.native)\n center_layout.addWidget(self.dims)\n center.setLayout(center_layout)\n\n right = QWidget()\n right_layout = QVBoxLayout()\n right_layout.addWidget(self.layers)\n right_layout.addWidget(self.buttons)\n right.setLayout(right_layout)\n\n left = self.controls\n\n top = QWidget()\n top_layout = QHBoxLayout()\n top_layout.addWidget(left)\n top_layout.addWidget(center)\n top_layout.addWidget(right)\n top.setLayout(top_layout)\n\n self.setOrientation(Qt.Vertical)\n self.addWidget(top)\n\n if self.console.shell is not None:\n self.addWidget(self.console)\n\n self._last_visited_dir = str(Path.home())\n\n self._cursors = {\n 'disabled': QCursor(\n QPixmap(':/icons/cursor/cursor_disabled.png').scaled(20, 20)\n ),\n 'cross': Qt.CrossCursor,\n 'forbidden': Qt.ForbiddenCursor,\n 'pointing': Qt.PointingHandCursor,\n 'standard': QCursor(),\n }\n\n self._update_palette(viewer.palette)\n\n self._key_release_generators = {}\n\n self.viewer.events.interactive.connect(self._on_interactive)\n self.viewer.events.cursor.connect(self._on_cursor)\n self.viewer.events.reset_view.connect(self._on_reset_view)\n self.viewer.events.palette.connect(\n lambda event: self._update_palette(event.palette)\n )\n self.viewer.layers.events.reordered.connect(self._update_canvas)\n self.viewer.dims.events.display.connect(\n lambda event: self._update_camera()\n )\n\n self.setAcceptDrops(True)\n\n def _update_camera(self):\n if np.sum(self.viewer.dims.display) == 3:\n # Set a 3D camera\n self.view.camera = ArcballCamera(name=\"ArcballCamera\")\n # flip y-axis to have correct alignment\n self.view.camera.flip = (0, 1, 0)\n min_shape, max_shape = self.viewer._calc_bbox()\n centroid = np.add(max_shape, min_shape) / 2\n size = np.subtract(max_shape, min_shape)\n # Scale the camera to the contents in the scene\n if len(centroid) > 0:\n centroid = centroid[-3:]\n self.view.camera.center = centroid[::-1]\n self.view.camera.scale_factor = 1.5 * np.mean(size[-3:])\n elif np.sum(self.viewer.dims.display) == 2:\n # Set 2D camera\n self.view.camera = PanZoomCamera(aspect=1, name=\"PanZoomCamera\")\n # flip y-axis to have correct alignment\n self.view.camera.flip = (0, 1, 0)\n # Scale the camera to the contents in the scene\n self.view.camera.set_range()\n else:\n raise ValueError(\n \"Invalid display flags set in dimensions {}\".format(\n self.viewer.dims.display\n )\n )\n\n self.view.camera.viewbox_key_event = viewbox_key_event\n # TO DO: Remove\n self.viewer._view = self.view\n\n def screenshot(self):\n \"\"\"Take currently displayed screen and convert to an image array.\n\n Returns\n -------\n image : array\n Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the\n upper-left corner of the rendered region.\n \"\"\"\n img = self.canvas.native.grabFramebuffer()\n b = img.constBits()\n h, w, c = img.height(), img.width(), 4\n\n # As vispy doesn't use qtpy we need to reconcile the differences\n # between the `QImage` API for `PySide2` and `PyQt5` on how to convert\n # a QImage to a numpy array.\n if API_NAME == 'PySide2':\n arr = np.array(b).reshape(h, w, c)\n else:\n b.setsize(h * w * c)\n arr = np.frombuffer(b, np.uint8).reshape(h, w, c)\n\n # Format of QImage is ARGB32_Premultiplied, but color channels are\n # reversed.\n arr = arr[:, :, [2, 1, 0, 3]]\n return arr\n\n def _open_images(self):\n \"\"\"Adds image files from the menubar.\"\"\"\n filenames, _ = QFileDialog.getOpenFileNames(\n parent=self,\n caption='Select image(s)...',\n directory=self._last_visited_dir, # home dir by default\n )\n self._add_files(filenames)\n\n def _add_files(self, filenames):\n \"\"\"Adds an image layer to the viewer.\n\n Whether the image is multichannel is determined by\n :func:`napari.util.misc.is_multichannel`.\n\n If multiple images are selected, they are stacked along the 0th\n axis.\n\n Parameters\n -------\n filenames : list\n List of filenames to be opened\n \"\"\"\n if len(filenames) > 0:\n image = read(filenames)\n self.viewer.add_image(\n image, multichannel=is_multichannel(image.shape)\n )\n self._last_visited_dir = os.path.dirname(filenames[0])\n\n def _on_interactive(self, event):\n self.view.interactive = self.viewer.interactive\n\n def _on_cursor(self, event):\n cursor = self.viewer.cursor\n size = self.viewer.cursor_size\n if cursor == 'square':\n if size < 10 or size > 300:\n q_cursor = self._cursors['cross']\n else:\n q_cursor = QCursor(\n QPixmap(':/icons/cursor/cursor_square.png').scaledToHeight(\n size\n )\n )\n else:\n q_cursor = self._cursors[cursor]\n self.canvas.native.setCursor(q_cursor)\n\n def _on_reset_view(self, event):\n self.view.camera.rect = event.viewbox\n\n def _update_canvas(self, event):\n \"\"\"Clears draw order and refreshes canvas. Usefeul for when layers are\n reoredered.\n \"\"\"\n self.canvas._draw_order.clear()\n self.canvas.update()\n\n def _update_palette(self, palette):\n # template and apply the primary stylesheet\n themed_stylesheet = template(self.raw_stylesheet, **palette)\n self.console.style_sheet = themed_stylesheet\n self.console.syntax_style = palette['syntax_style']\n self.setStyleSheet(themed_stylesheet)\n\n def _toggle_console(self):\n \"\"\"Toggle console visible and not visible.\"\"\"\n self.console.setVisible(not self.console.isVisible())\n self.buttons.consoleButton.setProperty(\n 'expanded', self.console.isVisible()\n )\n self.buttons.consoleButton.style().unpolish(self.buttons.consoleButton)\n self.buttons.consoleButton.style().polish(self.buttons.consoleButton)\n\n def on_mouse_move(self, event):\n \"\"\"Called whenever mouse moves over canvas.\n \"\"\"\n layer = self.viewer.active_layer\n if layer is not None:\n layer.on_mouse_move(event)\n\n def on_mouse_press(self, event):\n \"\"\"Called whenever mouse pressed in canvas.\n \"\"\"\n layer = self.viewer.active_layer\n if layer is not None:\n layer.on_mouse_press(event)\n\n def on_mouse_release(self, event):\n \"\"\"Called whenever mouse released in canvas.\n \"\"\"\n layer = self.viewer.active_layer\n if layer is not None:\n layer.on_mouse_release(event)\n\n def on_key_press(self, event):\n \"\"\"Called whenever key pressed in canvas.\n \"\"\"\n if event.native.isAutoRepeat() or event.key is None:\n return\n\n comb = components_to_key_combo(event.key.name, event.modifiers)\n\n layer = self.viewer.active_layer\n\n if layer is not None and comb in layer.keymap:\n parent = layer\n elif comb in self.viewer.keymap:\n parent = self.viewer\n else:\n return\n\n func = parent.keymap[comb]\n gen = func(parent)\n\n if inspect.isgeneratorfunction(func):\n try:\n next(gen)\n except StopIteration: # only one statement\n pass\n else:\n self._key_release_generators[event.key] = gen\n\n def on_key_release(self, event):\n \"\"\"Called whenever key released in canvas.\n \"\"\"\n try:\n next(self._key_release_generators[event.key])\n except (KeyError, StopIteration):\n pass\n\n def on_draw(self, event):\n \"\"\"Called whenever drawn in canvas. Called for all layers, not just top\n \"\"\"\n for layer in self.viewer.layers:\n layer.on_draw(event)\n\n def dragEnterEvent(self, event):\n if event.mimeData().hasUrls():\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n \"\"\"Add local files and web URLS with drag and drop.\"\"\"\n filenames = []\n for url in event.mimeData().urls():\n path = url.toString()\n if os.path.isfile(path):\n filenames.append(path)\n elif os.path.isdir(path):\n filenames = filenames + list(glob(os.path.join(path, '*')))\n else:\n filenames.append(path)\n self._add_files(filenames)\n\n\ndef viewbox_key_event(event):\n \"\"\"ViewBox key event handler\n Parameters\n ----------\n event : instance of Event\n The event.\n \"\"\"\n return\n", "path": "napari/_qt/qt_viewer.py"}], "after_files": [{"content": "import os.path\nfrom glob import glob\nimport numpy as np\nimport inspect\nfrom pathlib import Path\n\nfrom qtpy.QtCore import QCoreApplication, Qt, QSize\nfrom qtpy.QtWidgets import (\n QWidget,\n QVBoxLayout,\n QHBoxLayout,\n QFrame,\n QFileDialog,\n QSplitter,\n)\nfrom qtpy.QtWidgets import QStackedWidget, QSizePolicy\nfrom qtpy.QtGui import QCursor, QPixmap\nfrom qtpy import API_NAME\nfrom vispy.scene import SceneCanvas, PanZoomCamera, ArcballCamera\nfrom vispy.app import use_app\n\nfrom .qt_dims import QtDims\nfrom .qt_layerlist import QtLayerList\nfrom ..resources import resources_dir\nfrom ..util.theme import template\nfrom ..util.misc import is_multichannel\nfrom ..util.keybindings import components_to_key_combo\nfrom ..util.io import read\n\nfrom .qt_controls import QtControls\nfrom .qt_layer_buttons import QtLayersButtons\nfrom .qt_console import QtConsole\n\n\n# set vispy application to the appropriate qt backend\nuse_app(API_NAME)\n\n\nclass QtViewer(QSplitter):\n with open(os.path.join(resources_dir, 'stylesheet.qss'), 'r') as f:\n raw_stylesheet = f.read()\n\n def __init__(self, viewer):\n super().__init__()\n\n QCoreApplication.setAttribute(\n Qt.AA_UseStyleSheetPropagationInWidgetStyles, True\n )\n\n self.viewer = viewer\n self.dims = QtDims(self.viewer.dims)\n self.controls = QtControls(self.viewer)\n self.layers = QtLayerList(self.viewer.layers)\n self.buttons = QtLayersButtons(self.viewer)\n self.console = QtConsole({'viewer': self.viewer})\n\n if self.console.shell is not None:\n self.console.style().unpolish(self.console)\n self.console.style().polish(self.console)\n self.console.hide()\n self.buttons.consoleButton.clicked.connect(\n lambda: self._toggle_console()\n )\n else:\n self.buttons.consoleButton.setEnabled(False)\n\n self.canvas = SceneCanvas(keys=None, vsync=True)\n self.canvas.native.setMinimumSize(QSize(200, 200))\n\n self.canvas.connect(self.on_mouse_move)\n self.canvas.connect(self.on_mouse_press)\n self.canvas.connect(self.on_mouse_release)\n self.canvas.connect(self.on_key_press)\n self.canvas.connect(self.on_key_release)\n self.canvas.connect(self.on_draw)\n\n self.view = self.canvas.central_widget.add_view()\n self._update_camera()\n\n center = QWidget()\n center_layout = QVBoxLayout()\n center_layout.setContentsMargins(15, 20, 15, 10)\n center_layout.addWidget(self.canvas.native)\n center_layout.addWidget(self.dims)\n center.setLayout(center_layout)\n\n right = QWidget()\n right_layout = QVBoxLayout()\n right_layout.addWidget(self.layers)\n right_layout.addWidget(self.buttons)\n right.setLayout(right_layout)\n right.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)\n\n left = self.controls\n\n top = QWidget()\n top_layout = QHBoxLayout()\n top_layout.addWidget(left)\n top_layout.addWidget(center)\n top_layout.addWidget(right)\n top.setLayout(top_layout)\n\n self.setOrientation(Qt.Vertical)\n self.addWidget(top)\n\n if self.console.shell is not None:\n self.addWidget(self.console)\n\n self._last_visited_dir = str(Path.home())\n\n self._cursors = {\n 'disabled': QCursor(\n QPixmap(':/icons/cursor/cursor_disabled.png').scaled(20, 20)\n ),\n 'cross': Qt.CrossCursor,\n 'forbidden': Qt.ForbiddenCursor,\n 'pointing': Qt.PointingHandCursor,\n 'standard': QCursor(),\n }\n\n self._update_palette(viewer.palette)\n\n self._key_release_generators = {}\n\n self.viewer.events.interactive.connect(self._on_interactive)\n self.viewer.events.cursor.connect(self._on_cursor)\n self.viewer.events.reset_view.connect(self._on_reset_view)\n self.viewer.events.palette.connect(\n lambda event: self._update_palette(event.palette)\n )\n self.viewer.layers.events.reordered.connect(self._update_canvas)\n self.viewer.dims.events.display.connect(\n lambda event: self._update_camera()\n )\n\n self.setAcceptDrops(True)\n\n def _update_camera(self):\n if np.sum(self.viewer.dims.display) == 3:\n # Set a 3D camera\n self.view.camera = ArcballCamera(name=\"ArcballCamera\")\n # flip y-axis to have correct alignment\n self.view.camera.flip = (0, 1, 0)\n min_shape, max_shape = self.viewer._calc_bbox()\n centroid = np.add(max_shape, min_shape) / 2\n size = np.subtract(max_shape, min_shape)\n # Scale the camera to the contents in the scene\n if len(centroid) > 0:\n centroid = centroid[-3:]\n self.view.camera.center = centroid[::-1]\n self.view.camera.scale_factor = 1.5 * np.mean(size[-3:])\n elif np.sum(self.viewer.dims.display) == 2:\n # Set 2D camera\n self.view.camera = PanZoomCamera(aspect=1, name=\"PanZoomCamera\")\n # flip y-axis to have correct alignment\n self.view.camera.flip = (0, 1, 0)\n # Scale the camera to the contents in the scene\n self.view.camera.set_range()\n else:\n raise ValueError(\n \"Invalid display flags set in dimensions {}\".format(\n self.viewer.dims.display\n )\n )\n\n self.view.camera.viewbox_key_event = viewbox_key_event\n # TO DO: Remove\n self.viewer._view = self.view\n\n def screenshot(self):\n \"\"\"Take currently displayed screen and convert to an image array.\n\n Returns\n -------\n image : array\n Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the\n upper-left corner of the rendered region.\n \"\"\"\n img = self.canvas.native.grabFramebuffer()\n b = img.constBits()\n h, w, c = img.height(), img.width(), 4\n\n # As vispy doesn't use qtpy we need to reconcile the differences\n # between the `QImage` API for `PySide2` and `PyQt5` on how to convert\n # a QImage to a numpy array.\n if API_NAME == 'PySide2':\n arr = np.array(b).reshape(h, w, c)\n else:\n b.setsize(h * w * c)\n arr = np.frombuffer(b, np.uint8).reshape(h, w, c)\n\n # Format of QImage is ARGB32_Premultiplied, but color channels are\n # reversed.\n arr = arr[:, :, [2, 1, 0, 3]]\n return arr\n\n def _open_images(self):\n \"\"\"Adds image files from the menubar.\"\"\"\n filenames, _ = QFileDialog.getOpenFileNames(\n parent=self,\n caption='Select image(s)...',\n directory=self._last_visited_dir, # home dir by default\n )\n self._add_files(filenames)\n\n def _add_files(self, filenames):\n \"\"\"Adds an image layer to the viewer.\n\n Whether the image is multichannel is determined by\n :func:`napari.util.misc.is_multichannel`.\n\n If multiple images are selected, they are stacked along the 0th\n axis.\n\n Parameters\n -------\n filenames : list\n List of filenames to be opened\n \"\"\"\n if len(filenames) > 0:\n image = read(filenames)\n self.viewer.add_image(\n image, multichannel=is_multichannel(image.shape)\n )\n self._last_visited_dir = os.path.dirname(filenames[0])\n\n def _on_interactive(self, event):\n self.view.interactive = self.viewer.interactive\n\n def _on_cursor(self, event):\n cursor = self.viewer.cursor\n size = self.viewer.cursor_size\n if cursor == 'square':\n if size < 10 or size > 300:\n q_cursor = self._cursors['cross']\n else:\n q_cursor = QCursor(\n QPixmap(':/icons/cursor/cursor_square.png').scaledToHeight(\n size\n )\n )\n else:\n q_cursor = self._cursors[cursor]\n self.canvas.native.setCursor(q_cursor)\n\n def _on_reset_view(self, event):\n self.view.camera.rect = event.viewbox\n\n def _update_canvas(self, event):\n \"\"\"Clears draw order and refreshes canvas. Usefeul for when layers are\n reoredered.\n \"\"\"\n self.canvas._draw_order.clear()\n self.canvas.update()\n\n def _update_palette(self, palette):\n # template and apply the primary stylesheet\n themed_stylesheet = template(self.raw_stylesheet, **palette)\n self.console.style_sheet = themed_stylesheet\n self.console.syntax_style = palette['syntax_style']\n self.setStyleSheet(themed_stylesheet)\n\n def _toggle_console(self):\n \"\"\"Toggle console visible and not visible.\"\"\"\n self.console.setVisible(not self.console.isVisible())\n self.buttons.consoleButton.setProperty(\n 'expanded', self.console.isVisible()\n )\n self.buttons.consoleButton.style().unpolish(self.buttons.consoleButton)\n self.buttons.consoleButton.style().polish(self.buttons.consoleButton)\n\n def on_mouse_move(self, event):\n \"\"\"Called whenever mouse moves over canvas.\n \"\"\"\n layer = self.viewer.active_layer\n if layer is not None:\n layer.on_mouse_move(event)\n\n def on_mouse_press(self, event):\n \"\"\"Called whenever mouse pressed in canvas.\n \"\"\"\n layer = self.viewer.active_layer\n if layer is not None:\n layer.on_mouse_press(event)\n\n def on_mouse_release(self, event):\n \"\"\"Called whenever mouse released in canvas.\n \"\"\"\n layer = self.viewer.active_layer\n if layer is not None:\n layer.on_mouse_release(event)\n\n def on_key_press(self, event):\n \"\"\"Called whenever key pressed in canvas.\n \"\"\"\n if event.native.isAutoRepeat() or event.key is None:\n return\n\n comb = components_to_key_combo(event.key.name, event.modifiers)\n\n layer = self.viewer.active_layer\n\n if layer is not None and comb in layer.keymap:\n parent = layer\n elif comb in self.viewer.keymap:\n parent = self.viewer\n else:\n return\n\n func = parent.keymap[comb]\n gen = func(parent)\n\n if inspect.isgeneratorfunction(func):\n try:\n next(gen)\n except StopIteration: # only one statement\n pass\n else:\n self._key_release_generators[event.key] = gen\n\n def on_key_release(self, event):\n \"\"\"Called whenever key released in canvas.\n \"\"\"\n try:\n next(self._key_release_generators[event.key])\n except (KeyError, StopIteration):\n pass\n\n def on_draw(self, event):\n \"\"\"Called whenever drawn in canvas. Called for all layers, not just top\n \"\"\"\n for layer in self.viewer.layers:\n layer.on_draw(event)\n\n def dragEnterEvent(self, event):\n if event.mimeData().hasUrls():\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n \"\"\"Add local files and web URLS with drag and drop.\"\"\"\n filenames = []\n for url in event.mimeData().urls():\n path = url.toString()\n if os.path.isfile(path):\n filenames.append(path)\n elif os.path.isdir(path):\n filenames = filenames + list(glob(os.path.join(path, '*')))\n else:\n filenames.append(path)\n self._add_files(filenames)\n\n\ndef viewbox_key_event(event):\n \"\"\"ViewBox key event handler\n Parameters\n ----------\n event : instance of Event\n The event.\n \"\"\"\n return\n", "path": "napari/_qt/qt_viewer.py"}]} | 3,889 | 183 |
gh_patches_debug_948 | rasdani/github-patches | git_diff | deis__deis-280 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update chef_version in provisioning scripts
I see in the digitalocean support that @bacongobbler removed the --bootstrap-version=11.4.4 and things still seem to work with more current Chef (11.6.2). This wasn't the case before--the apt cookbook failed--so we had pinned it at a working version.
Let's retest that we're compatible with Chef 11.6.x and then remove --bootstrap-version from the provisioning scripts if so.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cm/chef.py`
Content:
```
1 """
2 Deis configuration management implementation for Opscode Chef.
3 """
4
5 from __future__ import unicode_literals
6
7 import os
8 import re
9 import subprocess
10 import tempfile
11 import time
12 import socket
13
14 from celery.canvas import group
15
16 from api.ssh import exec_ssh, connect_ssh
17 from cm.chef_api import ChefAPI
18
19
20 CHEF_CONFIG_PATH = '/etc/chef'
21 CHEF_INSTALL_TYPE = 'gems'
22 CHEF_RUBY_VERSION = '1.9.1'
23 CHEF_ENVIRONMENT = '_default'
24 CHEF_CLIENT_VERSION = '11.4.4'
25
26 # load chef config using CHEF_CONFIG_PATH
27 try:
28 # parse controller's chef config for server_url and client_name
29 _client_cfg_path = os.path.join(CHEF_CONFIG_PATH, 'client.rb')
30 if not os.path.exists(_client_cfg_path):
31 raise EnvironmentError('Could not find {}'.format(_client_cfg_path))
32 with open(_client_cfg_path) as f:
33 _data = f.read()
34 # construct a dict from the ruby client.rb
35 _d = {}
36 for m in re.findall(r'''^([a-zA-Z0-9_]+)[ \t]+(.*)$''',
37 _data, re.MULTILINE):
38 _d[m[0]] = m[1].strip("'").strip('"')
39 # set global variables from client.rb
40 CHEF_SERVER_URL = _d['chef_server_url']
41 CHEF_NODE_NAME = _d.get('node_name', socket.gethostname())
42 CHEF_CLIENT_NAME = _d.get('node_name', socket.gethostname())
43 CHEF_VALIDATION_NAME = _d['validation_client_name']
44 # read the client key
45 _client_pem_path = os.path.join(CHEF_CONFIG_PATH, 'client.pem')
46 CHEF_CLIENT_KEY = subprocess.check_output(
47 ['sudo', '/bin/cat', _client_pem_path]).strip('\n')
48 # read the validation key
49 _valid_pem_path = os.path.join(CHEF_CONFIG_PATH, 'validation.pem')
50 CHEF_VALIDATION_KEY = subprocess.check_output(
51 ['sudo', '/bin/cat', _valid_pem_path]).strip('\n')
52 except Exception as err:
53 msg = "Failed to auto-configure Chef -- {}".format(err)
54 if os.environ.get('READTHEDOCS'):
55 # Just print the error if Sphinx is running
56 print(msg)
57 else:
58 raise EnvironmentError(msg)
59
60
61 def _get_client():
62 """
63 Return a new instance of a Chef API Client
64
65 :rtype: a :class:`~cm.chef_api.ChefAPI` object
66 """
67 return ChefAPI(CHEF_SERVER_URL, CHEF_CLIENT_NAME, CHEF_CLIENT_KEY)
68
69
70 def bootstrap_node(node):
71 """
72 Bootstrap the Chef configuration management tools onto a node.
73
74 :param node: a dict containing the node's fully-qualified domain name and SSH info
75 :raises: RuntimeError
76 """
77 # block until we can connect over ssh
78 ssh = connect_ssh(node['ssh_username'], node['fqdn'], node.get('ssh_port', 22),
79 node['ssh_private_key'], timeout=120)
80 # block until ubuntu cloud-init is finished
81 initializing = True
82 while initializing:
83 time.sleep(10)
84 initializing, _rc = exec_ssh(ssh, 'ps auxw | egrep "cloud-init" | grep -v egrep')
85 # write out private key and prepare to `knife bootstrap`
86 try:
87 _, pk_path = tempfile.mkstemp()
88 _, output_path = tempfile.mkstemp()
89 with open(pk_path, 'w') as f:
90 f.write(node['ssh_private_key'])
91 # build knife bootstrap command
92 args = ['knife', 'bootstrap', node['fqdn']]
93 args.extend(['--identity-file', pk_path])
94 args.extend(['--node-name', node['id']])
95 args.extend(['--sudo', '--ssh-user', node['ssh_username']])
96 args.extend(['--ssh-port', str(node.get('ssh_port', 22))])
97 args.extend(['--bootstrap-version', CHEF_CLIENT_VERSION])
98 args.extend(['--no-host-key-verify'])
99 args.extend(['--run-list', _construct_run_list(node)])
100 print(' '.join(args))
101 # tee the command's output to a tempfile
102 args.extend(['|', 'tee', output_path])
103 # TODO: figure out why home isn't being set correctly for knife exec
104 env = os.environ.copy()
105 env['HOME'] = '/opt/deis'
106 # execute knife bootstrap
107 p = subprocess.Popen(' '.join(args), env=env, shell=True)
108 rc = p.wait()
109 # always print knife output
110 with open(output_path) as f:
111 output = f.read()
112 print(output)
113 # raise an exception if bootstrap failed
114 if rc != 0:
115 raise RuntimeError('Node Bootstrap Error')
116 # remove temp files from filesystem
117 finally:
118 os.remove(pk_path)
119 os.remove(output_path)
120
121
122 def _construct_run_list(node):
123 config = node['config']
124 # if run_list override specified, use it (assumes csv)
125 run_list = config.get('run_list', [])
126 # otherwise construct a run_list using proxy/runtime flags
127 if not run_list:
128 run_list = ['recipe[deis]']
129 if node.get('runtime') is True:
130 run_list.append('recipe[deis::runtime]')
131 if node.get('proxy') is True:
132 run_list.append('recipe[deis::proxy]')
133 return ','.join(run_list)
134
135
136 def purge_node(node):
137 """
138 Purge a node and its client from Chef configuration management.
139
140 :param node: a dict containing the id of a node to purge
141 """
142 client = _get_client()
143 client.delete_node(node['id'])
144 client.delete_client(node['id'])
145
146
147 def converge_controller():
148 """
149 Converge this controller node.
150
151 "Converge" means to change a node's configuration to match that defined by
152 configuration management.
153
154 :returns: the output of the convergence command, in this case `sudo chef-client`
155 """
156 try:
157 return subprocess.check_output(['sudo', 'chef-client'])
158 except subprocess.CalledProcessError as err:
159 print(err)
160 print(err.output)
161 raise err
162
163
164 def converge_node(node):
165 """
166 Converge a node.
167
168 "Converge" means to change a node's configuration to match that defined by
169 configuration management.
170
171 :param node: a dict containing the node's fully-qualified domain name and SSH info
172 :returns: a tuple of the convergence command's (output, return_code)
173 """
174 ssh = connect_ssh(node['ssh_username'],
175 node['fqdn'], 22,
176 node['ssh_private_key'])
177 output, rc = exec_ssh(ssh, 'sudo chef-client')
178 print(output)
179 if rc != 0:
180 e = RuntimeError('Node converge error')
181 e.output = output
182 raise e
183 return output, rc
184
185
186 def run_node(node, command):
187 """
188 Run a command on a node.
189
190 :param node: a dict containing the node's fully-qualified domain name and SSH info
191 :param command: the command-line to execute on the node
192 :returns: a tuple of the command's (output, return_code)
193 """
194 ssh = connect_ssh(node['ssh_username'], node['fqdn'],
195 node['ssh_port'], node['ssh_private_key'])
196 output, rc = exec_ssh(ssh, command, pty=True)
197 return output, rc
198
199
200 def converge_formation(formation):
201 """
202 Converge all nodes in a formation.
203
204 "Converge" means to change a node's configuration to match that defined by
205 configuration management.
206
207 :param formation: a :class:`~api.models.Formation` to converge
208 :returns: the combined output of the nodes' convergence commands
209 """
210 nodes = formation.node_set.all()
211 subtasks = []
212 for n in nodes:
213 subtask = converge_node.s(n.id,
214 n.layer.flavor.ssh_username,
215 n.fqdn,
216 n.layer.flavor.ssh_private_key)
217 subtasks.append(subtask)
218 job = group(*subtasks)
219 return job.apply_async().join()
220
221
222 def publish_user(user, data):
223 """
224 Publish a user to configuration management.
225
226 :param user: a dict containing the username
227 :param data: data to store with the user
228 :returns: a tuple of (body, status) from the underlying HTTP response
229 :raises: RuntimeError
230 """
231 _publish('deis-users', user['username'], data)
232
233
234 def publish_app(app, data):
235 """
236 Publish an app to configuration management.
237
238 :param app: a dict containing the id of the app
239 :param data: data to store with the app
240 :returns: a tuple of (body, status) from the underlying HTTP response
241 :raises: RuntimeError
242 """
243 _publish('deis-apps', app['id'], data)
244
245
246 def purge_app(app):
247 """
248 Purge an app from configuration management.
249
250 :param app: a dict containing the id of the app
251 :returns: a tuple of (body, status) from the underlying HTTP response
252 :raises: RuntimeError
253 """
254 _purge('deis-apps', app['id'])
255
256
257 def publish_formation(formation, data):
258 """
259 Publish a formation to configuration management.
260
261 :param formation: a dict containing the id of the formation
262 :param data: data to store with the formation
263 :returns: a tuple of (body, status) from the underlying HTTP response
264 :raises: RuntimeError
265 """
266 _publish('deis-formations', formation['id'], data)
267
268
269 def purge_formation(formation):
270 """
271 Purge a formation from configuration management.
272
273 :param formation: a dict containing the id of the formation
274 :returns: a tuple of (body, status) from the underlying HTTP response
275 :raises: RuntimeError
276 """
277 _purge('deis-formations', formation['id'])
278
279
280 def _publish(data_bag, item_name, item_value):
281 """
282 Publish a data bag item to the Chef server.
283
284 :param data_bag: the name of a Chef data bag
285 :param item_name: the name of the item to publish
286 :param item_value: the value of the item to publish
287 :returns: a tuple of (body, status) from the underlying HTTP response
288 :raises: RuntimeError
289 """
290 client = _get_client()
291 body, status = client.update_databag_item(data_bag, item_name, item_value)
292 if status != 200:
293 body, status = client.create_databag_item(data_bag, item_name, item_value)
294 if status != 201:
295 raise RuntimeError('Could not publish {item_name}: {body}'.format(**locals()))
296 return body, status
297
298
299 def _purge(databag_name, item_name):
300 """
301 Purge a data bag item from the Chef server.
302
303 :param databag_name: the name of a Chef data bag
304 :param item_name: the name of the item to purge
305 :returns: a tuple of (body, status) from the underlying HTTP response
306 :raises: RuntimeError
307 """
308 client = _get_client()
309 body, status = client.delete_databag_item(databag_name, item_name)
310 if status == 200 or status == 404:
311 return body, status
312 raise RuntimeError('Could not purge {item_name}: {body}'.format(**locals()))
313
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cm/chef.py b/cm/chef.py
--- a/cm/chef.py
+++ b/cm/chef.py
@@ -21,7 +21,7 @@
CHEF_INSTALL_TYPE = 'gems'
CHEF_RUBY_VERSION = '1.9.1'
CHEF_ENVIRONMENT = '_default'
-CHEF_CLIENT_VERSION = '11.4.4'
+CHEF_CLIENT_VERSION = '11.6.2'
# load chef config using CHEF_CONFIG_PATH
try:
| {"golden_diff": "diff --git a/cm/chef.py b/cm/chef.py\n--- a/cm/chef.py\n+++ b/cm/chef.py\n@@ -21,7 +21,7 @@\n CHEF_INSTALL_TYPE = 'gems'\n CHEF_RUBY_VERSION = '1.9.1'\n CHEF_ENVIRONMENT = '_default'\n-CHEF_CLIENT_VERSION = '11.4.4'\n+CHEF_CLIENT_VERSION = '11.6.2'\n \n # load chef config using CHEF_CONFIG_PATH\n try:\n", "issue": "Update chef_version in provisioning scripts\nI see in the digitalocean support that @bacongobbler removed the --bootstrap-version=11.4.4 and things still seem to work with more current Chef (11.6.2). This wasn't the case before--the apt cookbook failed--so we had pinned it at a working version.\n\nLet's retest that we're compatible with Chef 11.6.x and then remove --bootstrap-version from the provisioning scripts if so.\n\n", "before_files": [{"content": "\"\"\"\nDeis configuration management implementation for Opscode Chef.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\nimport subprocess\nimport tempfile\nimport time\nimport socket\n\nfrom celery.canvas import group\n\nfrom api.ssh import exec_ssh, connect_ssh\nfrom cm.chef_api import ChefAPI\n\n\nCHEF_CONFIG_PATH = '/etc/chef'\nCHEF_INSTALL_TYPE = 'gems'\nCHEF_RUBY_VERSION = '1.9.1'\nCHEF_ENVIRONMENT = '_default'\nCHEF_CLIENT_VERSION = '11.4.4'\n\n# load chef config using CHEF_CONFIG_PATH\ntry:\n # parse controller's chef config for server_url and client_name\n _client_cfg_path = os.path.join(CHEF_CONFIG_PATH, 'client.rb')\n if not os.path.exists(_client_cfg_path):\n raise EnvironmentError('Could not find {}'.format(_client_cfg_path))\n with open(_client_cfg_path) as f:\n _data = f.read()\n # construct a dict from the ruby client.rb\n _d = {}\n for m in re.findall(r'''^([a-zA-Z0-9_]+)[ \\t]+(.*)$''',\n _data, re.MULTILINE):\n _d[m[0]] = m[1].strip(\"'\").strip('\"')\n # set global variables from client.rb\n CHEF_SERVER_URL = _d['chef_server_url']\n CHEF_NODE_NAME = _d.get('node_name', socket.gethostname())\n CHEF_CLIENT_NAME = _d.get('node_name', socket.gethostname())\n CHEF_VALIDATION_NAME = _d['validation_client_name']\n # read the client key\n _client_pem_path = os.path.join(CHEF_CONFIG_PATH, 'client.pem')\n CHEF_CLIENT_KEY = subprocess.check_output(\n ['sudo', '/bin/cat', _client_pem_path]).strip('\\n')\n # read the validation key\n _valid_pem_path = os.path.join(CHEF_CONFIG_PATH, 'validation.pem')\n CHEF_VALIDATION_KEY = subprocess.check_output(\n ['sudo', '/bin/cat', _valid_pem_path]).strip('\\n')\nexcept Exception as err:\n msg = \"Failed to auto-configure Chef -- {}\".format(err)\n if os.environ.get('READTHEDOCS'):\n # Just print the error if Sphinx is running\n print(msg)\n else:\n raise EnvironmentError(msg)\n\n\ndef _get_client():\n \"\"\"\n Return a new instance of a Chef API Client\n\n :rtype: a :class:`~cm.chef_api.ChefAPI` object\n \"\"\"\n return ChefAPI(CHEF_SERVER_URL, CHEF_CLIENT_NAME, CHEF_CLIENT_KEY)\n\n\ndef bootstrap_node(node):\n \"\"\"\n Bootstrap the Chef configuration management tools onto a node.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :raises: RuntimeError\n \"\"\"\n # block until we can connect over ssh\n ssh = connect_ssh(node['ssh_username'], node['fqdn'], node.get('ssh_port', 22),\n node['ssh_private_key'], timeout=120)\n # block until ubuntu cloud-init is finished\n initializing = True\n while initializing:\n time.sleep(10)\n initializing, _rc = exec_ssh(ssh, 'ps auxw | egrep \"cloud-init\" | grep -v egrep')\n # write out private key and prepare to `knife bootstrap`\n try:\n _, pk_path = tempfile.mkstemp()\n _, output_path = tempfile.mkstemp()\n with open(pk_path, 'w') as f:\n f.write(node['ssh_private_key'])\n # build knife bootstrap command\n args = ['knife', 'bootstrap', node['fqdn']]\n args.extend(['--identity-file', pk_path])\n args.extend(['--node-name', node['id']])\n args.extend(['--sudo', '--ssh-user', node['ssh_username']])\n args.extend(['--ssh-port', str(node.get('ssh_port', 22))])\n args.extend(['--bootstrap-version', CHEF_CLIENT_VERSION])\n args.extend(['--no-host-key-verify'])\n args.extend(['--run-list', _construct_run_list(node)])\n print(' '.join(args))\n # tee the command's output to a tempfile\n args.extend(['|', 'tee', output_path])\n # TODO: figure out why home isn't being set correctly for knife exec\n env = os.environ.copy()\n env['HOME'] = '/opt/deis'\n # execute knife bootstrap\n p = subprocess.Popen(' '.join(args), env=env, shell=True)\n rc = p.wait()\n # always print knife output\n with open(output_path) as f:\n output = f.read()\n print(output)\n # raise an exception if bootstrap failed\n if rc != 0:\n raise RuntimeError('Node Bootstrap Error')\n # remove temp files from filesystem\n finally:\n os.remove(pk_path)\n os.remove(output_path)\n\n\ndef _construct_run_list(node):\n config = node['config']\n # if run_list override specified, use it (assumes csv)\n run_list = config.get('run_list', [])\n # otherwise construct a run_list using proxy/runtime flags\n if not run_list:\n run_list = ['recipe[deis]']\n if node.get('runtime') is True:\n run_list.append('recipe[deis::runtime]')\n if node.get('proxy') is True:\n run_list.append('recipe[deis::proxy]')\n return ','.join(run_list)\n\n\ndef purge_node(node):\n \"\"\"\n Purge a node and its client from Chef configuration management.\n\n :param node: a dict containing the id of a node to purge\n \"\"\"\n client = _get_client()\n client.delete_node(node['id'])\n client.delete_client(node['id'])\n\n\ndef converge_controller():\n \"\"\"\n Converge this controller node.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :returns: the output of the convergence command, in this case `sudo chef-client`\n \"\"\"\n try:\n return subprocess.check_output(['sudo', 'chef-client'])\n except subprocess.CalledProcessError as err:\n print(err)\n print(err.output)\n raise err\n\n\ndef converge_node(node):\n \"\"\"\n Converge a node.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :returns: a tuple of the convergence command's (output, return_code)\n \"\"\"\n ssh = connect_ssh(node['ssh_username'],\n node['fqdn'], 22,\n node['ssh_private_key'])\n output, rc = exec_ssh(ssh, 'sudo chef-client')\n print(output)\n if rc != 0:\n e = RuntimeError('Node converge error')\n e.output = output\n raise e\n return output, rc\n\n\ndef run_node(node, command):\n \"\"\"\n Run a command on a node.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :param command: the command-line to execute on the node\n :returns: a tuple of the command's (output, return_code)\n \"\"\"\n ssh = connect_ssh(node['ssh_username'], node['fqdn'],\n node['ssh_port'], node['ssh_private_key'])\n output, rc = exec_ssh(ssh, command, pty=True)\n return output, rc\n\n\ndef converge_formation(formation):\n \"\"\"\n Converge all nodes in a formation.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :param formation: a :class:`~api.models.Formation` to converge\n :returns: the combined output of the nodes' convergence commands\n \"\"\"\n nodes = formation.node_set.all()\n subtasks = []\n for n in nodes:\n subtask = converge_node.s(n.id,\n n.layer.flavor.ssh_username,\n n.fqdn,\n n.layer.flavor.ssh_private_key)\n subtasks.append(subtask)\n job = group(*subtasks)\n return job.apply_async().join()\n\n\ndef publish_user(user, data):\n \"\"\"\n Publish a user to configuration management.\n\n :param user: a dict containing the username\n :param data: data to store with the user\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-users', user['username'], data)\n\n\ndef publish_app(app, data):\n \"\"\"\n Publish an app to configuration management.\n\n :param app: a dict containing the id of the app\n :param data: data to store with the app\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-apps', app['id'], data)\n\n\ndef purge_app(app):\n \"\"\"\n Purge an app from configuration management.\n\n :param app: a dict containing the id of the app\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _purge('deis-apps', app['id'])\n\n\ndef publish_formation(formation, data):\n \"\"\"\n Publish a formation to configuration management.\n\n :param formation: a dict containing the id of the formation\n :param data: data to store with the formation\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-formations', formation['id'], data)\n\n\ndef purge_formation(formation):\n \"\"\"\n Purge a formation from configuration management.\n\n :param formation: a dict containing the id of the formation\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _purge('deis-formations', formation['id'])\n\n\ndef _publish(data_bag, item_name, item_value):\n \"\"\"\n Publish a data bag item to the Chef server.\n\n :param data_bag: the name of a Chef data bag\n :param item_name: the name of the item to publish\n :param item_value: the value of the item to publish\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n client = _get_client()\n body, status = client.update_databag_item(data_bag, item_name, item_value)\n if status != 200:\n body, status = client.create_databag_item(data_bag, item_name, item_value)\n if status != 201:\n raise RuntimeError('Could not publish {item_name}: {body}'.format(**locals()))\n return body, status\n\n\ndef _purge(databag_name, item_name):\n \"\"\"\n Purge a data bag item from the Chef server.\n\n :param databag_name: the name of a Chef data bag\n :param item_name: the name of the item to purge\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n client = _get_client()\n body, status = client.delete_databag_item(databag_name, item_name)\n if status == 200 or status == 404:\n return body, status\n raise RuntimeError('Could not purge {item_name}: {body}'.format(**locals()))\n", "path": "cm/chef.py"}], "after_files": [{"content": "\"\"\"\nDeis configuration management implementation for Opscode Chef.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\nimport subprocess\nimport tempfile\nimport time\nimport socket\n\nfrom celery.canvas import group\n\nfrom api.ssh import exec_ssh, connect_ssh\nfrom cm.chef_api import ChefAPI\n\n\nCHEF_CONFIG_PATH = '/etc/chef'\nCHEF_INSTALL_TYPE = 'gems'\nCHEF_RUBY_VERSION = '1.9.1'\nCHEF_ENVIRONMENT = '_default'\nCHEF_CLIENT_VERSION = '11.6.2'\n\n# load chef config using CHEF_CONFIG_PATH\ntry:\n # parse controller's chef config for server_url and client_name\n _client_cfg_path = os.path.join(CHEF_CONFIG_PATH, 'client.rb')\n if not os.path.exists(_client_cfg_path):\n raise EnvironmentError('Could not find {}'.format(_client_cfg_path))\n with open(_client_cfg_path) as f:\n _data = f.read()\n # construct a dict from the ruby client.rb\n _d = {}\n for m in re.findall(r'''^([a-zA-Z0-9_]+)[ \\t]+(.*)$''',\n _data, re.MULTILINE):\n _d[m[0]] = m[1].strip(\"'\").strip('\"')\n # set global variables from client.rb\n CHEF_SERVER_URL = _d['chef_server_url']\n CHEF_NODE_NAME = _d.get('node_name', socket.gethostname())\n CHEF_CLIENT_NAME = _d.get('node_name', socket.gethostname())\n CHEF_VALIDATION_NAME = _d['validation_client_name']\n # read the client key\n _client_pem_path = os.path.join(CHEF_CONFIG_PATH, 'client.pem')\n CHEF_CLIENT_KEY = subprocess.check_output(\n ['sudo', '/bin/cat', _client_pem_path]).strip('\\n')\n # read the validation key\n _valid_pem_path = os.path.join(CHEF_CONFIG_PATH, 'validation.pem')\n CHEF_VALIDATION_KEY = subprocess.check_output(\n ['sudo', '/bin/cat', _valid_pem_path]).strip('\\n')\nexcept Exception as err:\n msg = \"Failed to auto-configure Chef -- {}\".format(err)\n if os.environ.get('READTHEDOCS'):\n # Just print the error if Sphinx is running\n print(msg)\n else:\n raise EnvironmentError(msg)\n\n\ndef _get_client():\n \"\"\"\n Return a new instance of a Chef API Client\n\n :rtype: a :class:`~cm.chef_api.ChefAPI` object\n \"\"\"\n return ChefAPI(CHEF_SERVER_URL, CHEF_CLIENT_NAME, CHEF_CLIENT_KEY)\n\n\ndef bootstrap_node(node):\n \"\"\"\n Bootstrap the Chef configuration management tools onto a node.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :raises: RuntimeError\n \"\"\"\n # block until we can connect over ssh\n ssh = connect_ssh(node['ssh_username'], node['fqdn'], node.get('ssh_port', 22),\n node['ssh_private_key'], timeout=120)\n # block until ubuntu cloud-init is finished\n initializing = True\n while initializing:\n time.sleep(10)\n initializing, _rc = exec_ssh(ssh, 'ps auxw | egrep \"cloud-init\" | grep -v egrep')\n # write out private key and prepare to `knife bootstrap`\n try:\n _, pk_path = tempfile.mkstemp()\n _, output_path = tempfile.mkstemp()\n with open(pk_path, 'w') as f:\n f.write(node['ssh_private_key'])\n # build knife bootstrap command\n args = ['knife', 'bootstrap', node['fqdn']]\n args.extend(['--identity-file', pk_path])\n args.extend(['--node-name', node['id']])\n args.extend(['--sudo', '--ssh-user', node['ssh_username']])\n args.extend(['--ssh-port', str(node.get('ssh_port', 22))])\n args.extend(['--bootstrap-version', CHEF_CLIENT_VERSION])\n args.extend(['--no-host-key-verify'])\n args.extend(['--run-list', _construct_run_list(node)])\n print(' '.join(args))\n # tee the command's output to a tempfile\n args.extend(['|', 'tee', output_path])\n # TODO: figure out why home isn't being set correctly for knife exec\n env = os.environ.copy()\n env['HOME'] = '/opt/deis'\n # execute knife bootstrap\n p = subprocess.Popen(' '.join(args), env=env, shell=True)\n rc = p.wait()\n # always print knife output\n with open(output_path) as f:\n output = f.read()\n print(output)\n # raise an exception if bootstrap failed\n if rc != 0:\n raise RuntimeError('Node Bootstrap Error')\n # remove temp files from filesystem\n finally:\n os.remove(pk_path)\n os.remove(output_path)\n\n\ndef _construct_run_list(node):\n config = node['config']\n # if run_list override specified, use it (assumes csv)\n run_list = config.get('run_list', [])\n # otherwise construct a run_list using proxy/runtime flags\n if not run_list:\n run_list = ['recipe[deis]']\n if node.get('runtime') is True:\n run_list.append('recipe[deis::runtime]')\n if node.get('proxy') is True:\n run_list.append('recipe[deis::proxy]')\n return ','.join(run_list)\n\n\ndef purge_node(node):\n \"\"\"\n Purge a node and its client from Chef configuration management.\n\n :param node: a dict containing the id of a node to purge\n \"\"\"\n client = _get_client()\n client.delete_node(node['id'])\n client.delete_client(node['id'])\n\n\ndef converge_controller():\n \"\"\"\n Converge this controller node.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :returns: the output of the convergence command, in this case `sudo chef-client`\n \"\"\"\n try:\n return subprocess.check_output(['sudo', 'chef-client'])\n except subprocess.CalledProcessError as err:\n print(err)\n print(err.output)\n raise err\n\n\ndef converge_node(node):\n \"\"\"\n Converge a node.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :returns: a tuple of the convergence command's (output, return_code)\n \"\"\"\n ssh = connect_ssh(node['ssh_username'],\n node['fqdn'], 22,\n node['ssh_private_key'])\n output, rc = exec_ssh(ssh, 'sudo chef-client')\n print(output)\n if rc != 0:\n e = RuntimeError('Node converge error')\n e.output = output\n raise e\n return output, rc\n\n\ndef run_node(node, command):\n \"\"\"\n Run a command on a node.\n\n :param node: a dict containing the node's fully-qualified domain name and SSH info\n :param command: the command-line to execute on the node\n :returns: a tuple of the command's (output, return_code)\n \"\"\"\n ssh = connect_ssh(node['ssh_username'], node['fqdn'],\n node['ssh_port'], node['ssh_private_key'])\n output, rc = exec_ssh(ssh, command, pty=True)\n return output, rc\n\n\ndef converge_formation(formation):\n \"\"\"\n Converge all nodes in a formation.\n\n \"Converge\" means to change a node's configuration to match that defined by\n configuration management.\n\n :param formation: a :class:`~api.models.Formation` to converge\n :returns: the combined output of the nodes' convergence commands\n \"\"\"\n nodes = formation.node_set.all()\n subtasks = []\n for n in nodes:\n subtask = converge_node.s(n.id,\n n.layer.flavor.ssh_username,\n n.fqdn,\n n.layer.flavor.ssh_private_key)\n subtasks.append(subtask)\n job = group(*subtasks)\n return job.apply_async().join()\n\n\ndef publish_user(user, data):\n \"\"\"\n Publish a user to configuration management.\n\n :param user: a dict containing the username\n :param data: data to store with the user\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-users', user['username'], data)\n\n\ndef publish_app(app, data):\n \"\"\"\n Publish an app to configuration management.\n\n :param app: a dict containing the id of the app\n :param data: data to store with the app\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-apps', app['id'], data)\n\n\ndef purge_app(app):\n \"\"\"\n Purge an app from configuration management.\n\n :param app: a dict containing the id of the app\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _purge('deis-apps', app['id'])\n\n\ndef publish_formation(formation, data):\n \"\"\"\n Publish a formation to configuration management.\n\n :param formation: a dict containing the id of the formation\n :param data: data to store with the formation\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _publish('deis-formations', formation['id'], data)\n\n\ndef purge_formation(formation):\n \"\"\"\n Purge a formation from configuration management.\n\n :param formation: a dict containing the id of the formation\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n _purge('deis-formations', formation['id'])\n\n\ndef _publish(data_bag, item_name, item_value):\n \"\"\"\n Publish a data bag item to the Chef server.\n\n :param data_bag: the name of a Chef data bag\n :param item_name: the name of the item to publish\n :param item_value: the value of the item to publish\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n client = _get_client()\n body, status = client.update_databag_item(data_bag, item_name, item_value)\n if status != 200:\n body, status = client.create_databag_item(data_bag, item_name, item_value)\n if status != 201:\n raise RuntimeError('Could not publish {item_name}: {body}'.format(**locals()))\n return body, status\n\n\ndef _purge(databag_name, item_name):\n \"\"\"\n Purge a data bag item from the Chef server.\n\n :param databag_name: the name of a Chef data bag\n :param item_name: the name of the item to purge\n :returns: a tuple of (body, status) from the underlying HTTP response\n :raises: RuntimeError\n \"\"\"\n client = _get_client()\n body, status = client.delete_databag_item(databag_name, item_name)\n if status == 200 or status == 404:\n return body, status\n raise RuntimeError('Could not purge {item_name}: {body}'.format(**locals()))\n", "path": "cm/chef.py"}]} | 3,729 | 110 |
gh_patches_debug_22524 | rasdani/github-patches | git_diff | napari__napari-1402 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Colormap in 3D broken by Image Layer Event Handler
## 🐛 Bug
Changing the colormap in 3D doesn't cause the colormap for the actual data to update. The thumbnail does update. This was likely introduced in #1376. Changing colormap in 2D still works fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_vispy/vispy_image_layer.py`
Content:
```
1 import warnings
2 from vispy.scene.visuals import Image as ImageNode
3 from .volume import Volume as VolumeNode
4 from vispy.color import Colormap
5 import numpy as np
6 from .vispy_base_layer import VispyBaseLayer
7 from ..layers.image._image_constants import Rendering
8 from ..utils.colormaps import ensure_colormap_tuple
9
10
11 texture_dtypes = [
12 np.dtype(np.int8),
13 np.dtype(np.uint8),
14 np.dtype(np.int16),
15 np.dtype(np.uint16),
16 np.dtype(np.float32),
17 ]
18
19
20 class VispyImageLayer(VispyBaseLayer):
21 def __init__(self, layer):
22 node = ImageNode(None, method='auto')
23 super().__init__(layer, node)
24
25 # Once #1842 and #1844 from vispy are released and gamma adjustment is
26 # done on the GPU these can be dropped
27 self._raw_cmap = None
28 self._gamma = 1
29
30 # Until we add a specific attenuation parameter to vispy we have to
31 # track both iso_threshold and attenuation ourselves.
32 self._iso_threshold = 1
33 self._attenuation = 1
34
35 self._on_display_change()
36 self._on_slice_data_change()
37
38 def _on_display_change(self, data=None):
39 parent = self.node.parent
40 self.node.parent = None
41
42 if self.layer.dims.ndisplay == 2:
43 self.node = ImageNode(data, method='auto')
44 else:
45 if data is None:
46 data = np.zeros((1, 1, 1))
47 self.node = VolumeNode(data, clim=self.layer.contrast_limits)
48
49 self.node.parent = parent
50 self.reset()
51
52 def _on_slice_data_change(self, event=None):
53 # Slice data event will be fixed to use passed value after EVH refactor
54 # is finished for all layers
55 data = self.layer._data_view
56 dtype = np.dtype(data.dtype)
57 if dtype not in texture_dtypes:
58 try:
59 dtype = dict(
60 i=np.int16, f=np.float32, u=np.uint16, b=np.uint8
61 )[dtype.kind]
62 except KeyError: # not an int or float
63 raise TypeError(
64 f'type {dtype} not allowed for texture; must be one of {set(texture_dtypes)}' # noqa: E501
65 )
66 data = data.astype(dtype)
67
68 if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
69 data = np.expand_dims(data, axis=0)
70
71 # Check if data exceeds MAX_TEXTURE_SIZE and downsample
72 if (
73 self.MAX_TEXTURE_SIZE_2D is not None
74 and self.layer.dims.ndisplay == 2
75 ):
76 data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_2D)
77 elif (
78 self.MAX_TEXTURE_SIZE_3D is not None
79 and self.layer.dims.ndisplay == 3
80 ):
81 data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_3D)
82
83 # Check if ndisplay has changed current node type needs updating
84 if (
85 self.layer.dims.ndisplay == 3
86 and not isinstance(self.node, VolumeNode)
87 ) or (
88 self.layer.dims.ndisplay == 2
89 and not isinstance(self.node, ImageNode)
90 ):
91 self._on_display_change(data)
92 else:
93 if self.layer.dims.ndisplay == 2:
94 self.node._need_colortransform_update = True
95 self.node.set_data(data)
96 else:
97 self.node.set_data(data, clim=self.layer.contrast_limits)
98
99 # Call to update order of translation values with new dims:
100 self._on_scale_change()
101 self._on_translate_change()
102 self.node.update()
103
104 def _on_interpolation_change(self, interpolation):
105 """Receive layer model isosurface change event and update the visual.
106
107 Parameters
108 ----------
109 interpolation : float
110 Iso surface threshold value, between 0 and 1.
111 """
112 self.node.interpolation = interpolation
113
114 def _on_rendering_change(self, rendering):
115 """Receive layer model rendering change event and update dropdown menu.
116
117 Parameters
118 ----------
119 text : str
120 Rendering mode used by VisPy.
121 Selects a preset rendering mode in VisPy that determines how
122 volume is displayed:
123 * translucent: voxel colors are blended along the view ray until
124 the result is opaque.
125 * mip: maxiumum intensity projection. Cast a ray and display the
126 maximum value that was encountered.
127 * additive: voxel colors are added along the view ray until
128 the result is saturated.
129 * iso: isosurface. Cast a ray until a certain threshold is
130 encountered. At that location, lighning calculations are
131 performed to give the visual appearance of a surface.
132 * attenuated_mip: attenuated maxiumum intensity projection. Cast a
133 ray and attenuate values based on integral of encountered values,
134 display the maximum value that was encountered after attenuation.
135 This will make nearer objects appear more prominent.
136 """
137 if isinstance(self.node, VolumeNode):
138 self.node.method = rendering
139 if Rendering(rendering) == Rendering.ISO:
140 self.node.threshold = float(self._iso_threshold)
141 elif Rendering(rendering) == Rendering.ATTENUATED_MIP:
142 self.node.threshold = float(self._attenuation)
143
144 def _on_colormap_change(self, colormap):
145 """Receive layer model colormap change event and update the visual.
146
147 Parameters
148 ----------
149 colormap : str or tuple
150 Colormap name or tuple of (name, vispy.color.Colormap).
151 """
152 name, cmap = ensure_colormap_tuple(colormap)
153 # Once #1842 and #1844 from vispy are released and gamma adjustment is
154 # done on the GPU this can be dropped
155 self._raw_cmap = cmap
156 if self._gamma != 1:
157 # when gamma!=1, we instantiate a new colormap with 256 control
158 # points from 0-1
159 node_cmap = Colormap(cmap[np.linspace(0, 1, 256) ** self._gamma])
160 else:
161 node_cmap = cmap
162 self.node.cmap = node_cmap
163
164 def _on_contrast_limits_change(self, contrast_limits):
165 """Receive layer model contrast limits change event and update visual.
166
167 Parameters
168 ----------
169 contrast_limits : tuple
170 Contrast limits.
171 """
172 # Once #1842 from vispy is released this if else can be dropped
173 if isinstance(self.node, VolumeNode):
174 self._on_slice_data_change()
175 else:
176 self.node.clim = contrast_limits
177
178 def _on_gamma_change(self, gamma):
179 """Receive the layer model gamma change event and update the visual.
180
181 Parameters
182 ----------
183 gamma : float
184 Gamma value.
185 """
186 # Once #1842 and #1844 from vispy are released and gamma adjustment is
187 # done on the GPU this can be dropped
188 if gamma != 1:
189 # when gamma!=1, we instantiate a new colormap with 256 control
190 # points from 0-1
191 cmap = Colormap(self._raw_cmap[np.linspace(0, 1, 256) ** gamma])
192 else:
193 cmap = self._raw_cmap
194 self._gamma = gamma
195 self.node.cmap = cmap
196
197 def _on_iso_threshold_change(self, iso_threshold):
198 """Receive layer model isosurface change event and update the visual.
199
200 Parameters
201 ----------
202 iso_threshold : float
203 Iso surface threshold value, between 0 and 1.
204 """
205 if (
206 isinstance(self.node, VolumeNode)
207 and Rendering(self.node.method) == Rendering.ISO
208 ):
209 self._iso_threshold = iso_threshold
210 self.node.threshold = float(iso_threshold)
211
212 def _on_attenuation_change(self, attenuation):
213 """Receive layer model attenuation change event and update the visual.
214
215 Parameters
216 ----------
217 attenuation : float
218 Attenuation value, between 0 and 2.
219 """
220 if (
221 isinstance(self.node, VolumeNode)
222 and Rendering(self.node.method) == Rendering.ATTENUATED_MIP
223 ):
224 self._attenuation = attenuation
225 self.node.threshold = float(attenuation)
226
227 def reset(self, event=None):
228 self._reset_base()
229 self._on_colormap_change(self.layer.colormap)
230 self._on_rendering_change(self.layer.rendering)
231 if isinstance(self.node, ImageNode):
232 self._on_contrast_limits_change(self.layer.contrast_limits)
233
234 def downsample_texture(self, data, MAX_TEXTURE_SIZE):
235 """Downsample data based on maximum allowed texture size.
236
237 Parameters
238 ----------
239 data : array
240 Data to be downsampled if needed.
241 MAX_TEXTURE_SIZE : int
242 Maximum allowed texture size.
243
244 Returns
245 -------
246 data : array
247 Data that now fits inside texture.
248 """
249 if np.any(np.greater(data.shape, MAX_TEXTURE_SIZE)):
250 if self.layer.multiscale:
251 raise ValueError(
252 f"Shape of individual tiles in multiscale {data.shape} "
253 f"cannot exceed GL_MAX_TEXTURE_SIZE "
254 f"{MAX_TEXTURE_SIZE}. Rendering is currently in "
255 f"{self.layer.dims.ndisplay}D mode."
256 )
257 warnings.warn(
258 f"data shape {data.shape} exceeds GL_MAX_TEXTURE_SIZE "
259 f"{MAX_TEXTURE_SIZE} in at least one axis and "
260 f"will be downsampled. Rendering is currently in "
261 f"{self.layer.dims.ndisplay}D mode."
262 )
263 downsample = np.ceil(
264 np.divide(data.shape, MAX_TEXTURE_SIZE)
265 ).astype(int)
266 scale = np.ones(self.layer.ndim)
267 for i, d in enumerate(self.layer.dims.displayed):
268 scale[d] = downsample[i]
269 self.layer._transforms['tile2data'].scale = scale
270 self._on_scale_change()
271 slices = tuple(slice(None, None, ds) for ds in downsample)
272 data = data[slices]
273 return data
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/_vispy/vispy_image_layer.py b/napari/_vispy/vispy_image_layer.py
--- a/napari/_vispy/vispy_image_layer.py
+++ b/napari/_vispy/vispy_image_layer.py
@@ -159,6 +159,13 @@
node_cmap = Colormap(cmap[np.linspace(0, 1, 256) ** self._gamma])
else:
node_cmap = cmap
+ # Following should be added to cmap setter in VisPy volume visual
+ if isinstance(self.node, VolumeNode):
+ self.node.view_program['texture2D_LUT'] = (
+ node_cmap.texture_lut()
+ if (hasattr(node_cmap, 'texture_lut'))
+ else None
+ )
self.node.cmap = node_cmap
def _on_contrast_limits_change(self, contrast_limits):
@@ -192,6 +199,11 @@
else:
cmap = self._raw_cmap
self._gamma = gamma
+ # Following should be added to cmap setter in VisPy volume visual
+ if isinstance(self.node, VolumeNode):
+ self.node.view_program['texture2D_LUT'] = (
+ cmap.texture_lut() if (hasattr(cmap, 'texture_lut')) else None
+ )
self.node.cmap = cmap
def _on_iso_threshold_change(self, iso_threshold):
| {"golden_diff": "diff --git a/napari/_vispy/vispy_image_layer.py b/napari/_vispy/vispy_image_layer.py\n--- a/napari/_vispy/vispy_image_layer.py\n+++ b/napari/_vispy/vispy_image_layer.py\n@@ -159,6 +159,13 @@\n node_cmap = Colormap(cmap[np.linspace(0, 1, 256) ** self._gamma])\n else:\n node_cmap = cmap\n+ # Following should be added to cmap setter in VisPy volume visual\n+ if isinstance(self.node, VolumeNode):\n+ self.node.view_program['texture2D_LUT'] = (\n+ node_cmap.texture_lut()\n+ if (hasattr(node_cmap, 'texture_lut'))\n+ else None\n+ )\n self.node.cmap = node_cmap\n \n def _on_contrast_limits_change(self, contrast_limits):\n@@ -192,6 +199,11 @@\n else:\n cmap = self._raw_cmap\n self._gamma = gamma\n+ # Following should be added to cmap setter in VisPy volume visual\n+ if isinstance(self.node, VolumeNode):\n+ self.node.view_program['texture2D_LUT'] = (\n+ cmap.texture_lut() if (hasattr(cmap, 'texture_lut')) else None\n+ )\n self.node.cmap = cmap\n \n def _on_iso_threshold_change(self, iso_threshold):\n", "issue": "Colormap in 3D broken by Image Layer Event Handler\n## \ud83d\udc1b Bug\r\n\r\nChanging the colormap in 3D doesn't cause the colormap for the actual data to update. The thumbnail does update. This was likely introduced in #1376. Changing colormap in 2D still works fine.\n", "before_files": [{"content": "import warnings\nfrom vispy.scene.visuals import Image as ImageNode\nfrom .volume import Volume as VolumeNode\nfrom vispy.color import Colormap\nimport numpy as np\nfrom .vispy_base_layer import VispyBaseLayer\nfrom ..layers.image._image_constants import Rendering\nfrom ..utils.colormaps import ensure_colormap_tuple\n\n\ntexture_dtypes = [\n np.dtype(np.int8),\n np.dtype(np.uint8),\n np.dtype(np.int16),\n np.dtype(np.uint16),\n np.dtype(np.float32),\n]\n\n\nclass VispyImageLayer(VispyBaseLayer):\n def __init__(self, layer):\n node = ImageNode(None, method='auto')\n super().__init__(layer, node)\n\n # Once #1842 and #1844 from vispy are released and gamma adjustment is\n # done on the GPU these can be dropped\n self._raw_cmap = None\n self._gamma = 1\n\n # Until we add a specific attenuation parameter to vispy we have to\n # track both iso_threshold and attenuation ourselves.\n self._iso_threshold = 1\n self._attenuation = 1\n\n self._on_display_change()\n self._on_slice_data_change()\n\n def _on_display_change(self, data=None):\n parent = self.node.parent\n self.node.parent = None\n\n if self.layer.dims.ndisplay == 2:\n self.node = ImageNode(data, method='auto')\n else:\n if data is None:\n data = np.zeros((1, 1, 1))\n self.node = VolumeNode(data, clim=self.layer.contrast_limits)\n\n self.node.parent = parent\n self.reset()\n\n def _on_slice_data_change(self, event=None):\n # Slice data event will be fixed to use passed value after EVH refactor\n # is finished for all layers\n data = self.layer._data_view\n dtype = np.dtype(data.dtype)\n if dtype not in texture_dtypes:\n try:\n dtype = dict(\n i=np.int16, f=np.float32, u=np.uint16, b=np.uint8\n )[dtype.kind]\n except KeyError: # not an int or float\n raise TypeError(\n f'type {dtype} not allowed for texture; must be one of {set(texture_dtypes)}' # noqa: E501\n )\n data = data.astype(dtype)\n\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n data = np.expand_dims(data, axis=0)\n\n # Check if data exceeds MAX_TEXTURE_SIZE and downsample\n if (\n self.MAX_TEXTURE_SIZE_2D is not None\n and self.layer.dims.ndisplay == 2\n ):\n data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_2D)\n elif (\n self.MAX_TEXTURE_SIZE_3D is not None\n and self.layer.dims.ndisplay == 3\n ):\n data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_3D)\n\n # Check if ndisplay has changed current node type needs updating\n if (\n self.layer.dims.ndisplay == 3\n and not isinstance(self.node, VolumeNode)\n ) or (\n self.layer.dims.ndisplay == 2\n and not isinstance(self.node, ImageNode)\n ):\n self._on_display_change(data)\n else:\n if self.layer.dims.ndisplay == 2:\n self.node._need_colortransform_update = True\n self.node.set_data(data)\n else:\n self.node.set_data(data, clim=self.layer.contrast_limits)\n\n # Call to update order of translation values with new dims:\n self._on_scale_change()\n self._on_translate_change()\n self.node.update()\n\n def _on_interpolation_change(self, interpolation):\n \"\"\"Receive layer model isosurface change event and update the visual.\n\n Parameters\n ----------\n interpolation : float\n Iso surface threshold value, between 0 and 1.\n \"\"\"\n self.node.interpolation = interpolation\n\n def _on_rendering_change(self, rendering):\n \"\"\"Receive layer model rendering change event and update dropdown menu.\n\n Parameters\n ----------\n text : str\n Rendering mode used by VisPy.\n Selects a preset rendering mode in VisPy that determines how\n volume is displayed:\n * translucent: voxel colors are blended along the view ray until\n the result is opaque.\n * mip: maxiumum intensity projection. Cast a ray and display the\n maximum value that was encountered.\n * additive: voxel colors are added along the view ray until\n the result is saturated.\n * iso: isosurface. Cast a ray until a certain threshold is\n encountered. At that location, lighning calculations are\n performed to give the visual appearance of a surface.\n * attenuated_mip: attenuated maxiumum intensity projection. Cast a\n ray and attenuate values based on integral of encountered values,\n display the maximum value that was encountered after attenuation.\n This will make nearer objects appear more prominent.\n \"\"\"\n if isinstance(self.node, VolumeNode):\n self.node.method = rendering\n if Rendering(rendering) == Rendering.ISO:\n self.node.threshold = float(self._iso_threshold)\n elif Rendering(rendering) == Rendering.ATTENUATED_MIP:\n self.node.threshold = float(self._attenuation)\n\n def _on_colormap_change(self, colormap):\n \"\"\"Receive layer model colormap change event and update the visual.\n\n Parameters\n ----------\n colormap : str or tuple\n Colormap name or tuple of (name, vispy.color.Colormap).\n \"\"\"\n name, cmap = ensure_colormap_tuple(colormap)\n # Once #1842 and #1844 from vispy are released and gamma adjustment is\n # done on the GPU this can be dropped\n self._raw_cmap = cmap\n if self._gamma != 1:\n # when gamma!=1, we instantiate a new colormap with 256 control\n # points from 0-1\n node_cmap = Colormap(cmap[np.linspace(0, 1, 256) ** self._gamma])\n else:\n node_cmap = cmap\n self.node.cmap = node_cmap\n\n def _on_contrast_limits_change(self, contrast_limits):\n \"\"\"Receive layer model contrast limits change event and update visual.\n\n Parameters\n ----------\n contrast_limits : tuple\n Contrast limits.\n \"\"\"\n # Once #1842 from vispy is released this if else can be dropped\n if isinstance(self.node, VolumeNode):\n self._on_slice_data_change()\n else:\n self.node.clim = contrast_limits\n\n def _on_gamma_change(self, gamma):\n \"\"\"Receive the layer model gamma change event and update the visual.\n\n Parameters\n ----------\n gamma : float\n Gamma value.\n \"\"\"\n # Once #1842 and #1844 from vispy are released and gamma adjustment is\n # done on the GPU this can be dropped\n if gamma != 1:\n # when gamma!=1, we instantiate a new colormap with 256 control\n # points from 0-1\n cmap = Colormap(self._raw_cmap[np.linspace(0, 1, 256) ** gamma])\n else:\n cmap = self._raw_cmap\n self._gamma = gamma\n self.node.cmap = cmap\n\n def _on_iso_threshold_change(self, iso_threshold):\n \"\"\"Receive layer model isosurface change event and update the visual.\n\n Parameters\n ----------\n iso_threshold : float\n Iso surface threshold value, between 0 and 1.\n \"\"\"\n if (\n isinstance(self.node, VolumeNode)\n and Rendering(self.node.method) == Rendering.ISO\n ):\n self._iso_threshold = iso_threshold\n self.node.threshold = float(iso_threshold)\n\n def _on_attenuation_change(self, attenuation):\n \"\"\"Receive layer model attenuation change event and update the visual.\n\n Parameters\n ----------\n attenuation : float\n Attenuation value, between 0 and 2.\n \"\"\"\n if (\n isinstance(self.node, VolumeNode)\n and Rendering(self.node.method) == Rendering.ATTENUATED_MIP\n ):\n self._attenuation = attenuation\n self.node.threshold = float(attenuation)\n\n def reset(self, event=None):\n self._reset_base()\n self._on_colormap_change(self.layer.colormap)\n self._on_rendering_change(self.layer.rendering)\n if isinstance(self.node, ImageNode):\n self._on_contrast_limits_change(self.layer.contrast_limits)\n\n def downsample_texture(self, data, MAX_TEXTURE_SIZE):\n \"\"\"Downsample data based on maximum allowed texture size.\n\n Parameters\n ----------\n data : array\n Data to be downsampled if needed.\n MAX_TEXTURE_SIZE : int\n Maximum allowed texture size.\n\n Returns\n -------\n data : array\n Data that now fits inside texture.\n \"\"\"\n if np.any(np.greater(data.shape, MAX_TEXTURE_SIZE)):\n if self.layer.multiscale:\n raise ValueError(\n f\"Shape of individual tiles in multiscale {data.shape} \"\n f\"cannot exceed GL_MAX_TEXTURE_SIZE \"\n f\"{MAX_TEXTURE_SIZE}. Rendering is currently in \"\n f\"{self.layer.dims.ndisplay}D mode.\"\n )\n warnings.warn(\n f\"data shape {data.shape} exceeds GL_MAX_TEXTURE_SIZE \"\n f\"{MAX_TEXTURE_SIZE} in at least one axis and \"\n f\"will be downsampled. Rendering is currently in \"\n f\"{self.layer.dims.ndisplay}D mode.\"\n )\n downsample = np.ceil(\n np.divide(data.shape, MAX_TEXTURE_SIZE)\n ).astype(int)\n scale = np.ones(self.layer.ndim)\n for i, d in enumerate(self.layer.dims.displayed):\n scale[d] = downsample[i]\n self.layer._transforms['tile2data'].scale = scale\n self._on_scale_change()\n slices = tuple(slice(None, None, ds) for ds in downsample)\n data = data[slices]\n return data\n", "path": "napari/_vispy/vispy_image_layer.py"}], "after_files": [{"content": "import warnings\nfrom vispy.scene.visuals import Image as ImageNode\nfrom .volume import Volume as VolumeNode\nfrom vispy.color import Colormap\nimport numpy as np\nfrom .vispy_base_layer import VispyBaseLayer\nfrom ..layers.image._image_constants import Rendering\nfrom ..utils.colormaps import ensure_colormap_tuple\n\n\ntexture_dtypes = [\n np.dtype(np.int8),\n np.dtype(np.uint8),\n np.dtype(np.int16),\n np.dtype(np.uint16),\n np.dtype(np.float32),\n]\n\n\nclass VispyImageLayer(VispyBaseLayer):\n def __init__(self, layer):\n node = ImageNode(None, method='auto')\n super().__init__(layer, node)\n\n # Once #1842 and #1844 from vispy are released and gamma adjustment is\n # done on the GPU these can be dropped\n self._raw_cmap = None\n self._gamma = 1\n\n # Until we add a specific attenuation parameter to vispy we have to\n # track both iso_threshold and attenuation ourselves.\n self._iso_threshold = 1\n self._attenuation = 1\n\n self._on_display_change()\n self._on_slice_data_change()\n\n def _on_display_change(self, data=None):\n parent = self.node.parent\n self.node.parent = None\n\n if self.layer.dims.ndisplay == 2:\n self.node = ImageNode(data, method='auto')\n else:\n if data is None:\n data = np.zeros((1, 1, 1))\n self.node = VolumeNode(data, clim=self.layer.contrast_limits)\n\n self.node.parent = parent\n self.reset()\n\n def _on_slice_data_change(self, event=None):\n # Slice data event will be fixed to use passed value after EVH refactor\n # is finished for all layers\n data = self.layer._data_view\n dtype = np.dtype(data.dtype)\n if dtype not in texture_dtypes:\n try:\n dtype = dict(\n i=np.int16, f=np.float32, u=np.uint16, b=np.uint8\n )[dtype.kind]\n except KeyError: # not an int or float\n raise TypeError(\n f'type {dtype} not allowed for texture; must be one of {set(texture_dtypes)}' # noqa: E501\n )\n data = data.astype(dtype)\n\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n data = np.expand_dims(data, axis=0)\n\n # Check if data exceeds MAX_TEXTURE_SIZE and downsample\n if (\n self.MAX_TEXTURE_SIZE_2D is not None\n and self.layer.dims.ndisplay == 2\n ):\n data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_2D)\n elif (\n self.MAX_TEXTURE_SIZE_3D is not None\n and self.layer.dims.ndisplay == 3\n ):\n data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_3D)\n\n # Check if ndisplay has changed current node type needs updating\n if (\n self.layer.dims.ndisplay == 3\n and not isinstance(self.node, VolumeNode)\n ) or (\n self.layer.dims.ndisplay == 2\n and not isinstance(self.node, ImageNode)\n ):\n self._on_display_change(data)\n else:\n if self.layer.dims.ndisplay == 2:\n self.node._need_colortransform_update = True\n self.node.set_data(data)\n else:\n self.node.set_data(data, clim=self.layer.contrast_limits)\n\n # Call to update order of translation values with new dims:\n self._on_scale_change()\n self._on_translate_change()\n self.node.update()\n\n def _on_interpolation_change(self, interpolation):\n \"\"\"Receive layer model isosurface change event and update the visual.\n\n Parameters\n ----------\n interpolation : float\n Iso surface threshold value, between 0 and 1.\n \"\"\"\n self.node.interpolation = interpolation\n\n def _on_rendering_change(self, rendering):\n \"\"\"Receive layer model rendering change event and update dropdown menu.\n\n Parameters\n ----------\n text : str\n Rendering mode used by VisPy.\n Selects a preset rendering mode in VisPy that determines how\n volume is displayed:\n * translucent: voxel colors are blended along the view ray until\n the result is opaque.\n * mip: maxiumum intensity projection. Cast a ray and display the\n maximum value that was encountered.\n * additive: voxel colors are added along the view ray until\n the result is saturated.\n * iso: isosurface. Cast a ray until a certain threshold is\n encountered. At that location, lighning calculations are\n performed to give the visual appearance of a surface.\n * attenuated_mip: attenuated maxiumum intensity projection. Cast a\n ray and attenuate values based on integral of encountered values,\n display the maximum value that was encountered after attenuation.\n This will make nearer objects appear more prominent.\n \"\"\"\n if isinstance(self.node, VolumeNode):\n self.node.method = rendering\n if Rendering(rendering) == Rendering.ISO:\n self.node.threshold = float(self._iso_threshold)\n elif Rendering(rendering) == Rendering.ATTENUATED_MIP:\n self.node.threshold = float(self._attenuation)\n\n def _on_colormap_change(self, colormap):\n \"\"\"Receive layer model colormap change event and update the visual.\n\n Parameters\n ----------\n colormap : str or tuple\n Colormap name or tuple of (name, vispy.color.Colormap).\n \"\"\"\n name, cmap = ensure_colormap_tuple(colormap)\n # Once #1842 and #1844 from vispy are released and gamma adjustment is\n # done on the GPU this can be dropped\n self._raw_cmap = cmap\n if self._gamma != 1:\n # when gamma!=1, we instantiate a new colormap with 256 control\n # points from 0-1\n node_cmap = Colormap(cmap[np.linspace(0, 1, 256) ** self._gamma])\n else:\n node_cmap = cmap\n # Following should be added to cmap setter in VisPy volume visual\n if isinstance(self.node, VolumeNode):\n self.node.view_program['texture2D_LUT'] = (\n node_cmap.texture_lut()\n if (hasattr(node_cmap, 'texture_lut'))\n else None\n )\n self.node.cmap = node_cmap\n\n def _on_contrast_limits_change(self, contrast_limits):\n \"\"\"Receive layer model contrast limits change event and update visual.\n\n Parameters\n ----------\n contrast_limits : tuple\n Contrast limits.\n \"\"\"\n # Once #1842 from vispy is released this if else can be dropped\n if isinstance(self.node, VolumeNode):\n self._on_slice_data_change()\n else:\n self.node.clim = contrast_limits\n\n def _on_gamma_change(self, gamma):\n \"\"\"Receive the layer model gamma change event and update the visual.\n\n Parameters\n ----------\n gamma : float\n Gamma value.\n \"\"\"\n # Once #1842 and #1844 from vispy are released and gamma adjustment is\n # done on the GPU this can be dropped\n if gamma != 1:\n # when gamma!=1, we instantiate a new colormap with 256 control\n # points from 0-1\n cmap = Colormap(self._raw_cmap[np.linspace(0, 1, 256) ** gamma])\n else:\n cmap = self._raw_cmap\n self._gamma = gamma\n # Following should be added to cmap setter in VisPy volume visual\n if isinstance(self.node, VolumeNode):\n self.node.view_program['texture2D_LUT'] = (\n cmap.texture_lut() if (hasattr(cmap, 'texture_lut')) else None\n )\n self.node.cmap = cmap\n\n def _on_iso_threshold_change(self, iso_threshold):\n \"\"\"Receive layer model isosurface change event and update the visual.\n\n Parameters\n ----------\n iso_threshold : float\n Iso surface threshold value, between 0 and 1.\n \"\"\"\n if (\n isinstance(self.node, VolumeNode)\n and Rendering(self.node.method) == Rendering.ISO\n ):\n self._iso_threshold = iso_threshold\n self.node.threshold = float(iso_threshold)\n\n def _on_attenuation_change(self, attenuation):\n \"\"\"Receive layer model attenuation change event and update the visual.\n\n Parameters\n ----------\n attenuation : float\n Attenuation value, between 0 and 2.\n \"\"\"\n if (\n isinstance(self.node, VolumeNode)\n and Rendering(self.node.method) == Rendering.ATTENUATED_MIP\n ):\n self._attenuation = attenuation\n self.node.threshold = float(attenuation)\n\n def reset(self, event=None):\n self._reset_base()\n self._on_colormap_change(self.layer.colormap)\n self._on_rendering_change(self.layer.rendering)\n if isinstance(self.node, ImageNode):\n self._on_contrast_limits_change(self.layer.contrast_limits)\n\n def downsample_texture(self, data, MAX_TEXTURE_SIZE):\n \"\"\"Downsample data based on maximum allowed texture size.\n\n Parameters\n ----------\n data : array\n Data to be downsampled if needed.\n MAX_TEXTURE_SIZE : int\n Maximum allowed texture size.\n\n Returns\n -------\n data : array\n Data that now fits inside texture.\n \"\"\"\n if np.any(np.greater(data.shape, MAX_TEXTURE_SIZE)):\n if self.layer.multiscale:\n raise ValueError(\n f\"Shape of individual tiles in multiscale {data.shape} \"\n f\"cannot exceed GL_MAX_TEXTURE_SIZE \"\n f\"{MAX_TEXTURE_SIZE}. Rendering is currently in \"\n f\"{self.layer.dims.ndisplay}D mode.\"\n )\n warnings.warn(\n f\"data shape {data.shape} exceeds GL_MAX_TEXTURE_SIZE \"\n f\"{MAX_TEXTURE_SIZE} in at least one axis and \"\n f\"will be downsampled. Rendering is currently in \"\n f\"{self.layer.dims.ndisplay}D mode.\"\n )\n downsample = np.ceil(\n np.divide(data.shape, MAX_TEXTURE_SIZE)\n ).astype(int)\n scale = np.ones(self.layer.ndim)\n for i, d in enumerate(self.layer.dims.displayed):\n scale[d] = downsample[i]\n self.layer._transforms['tile2data'].scale = scale\n self._on_scale_change()\n slices = tuple(slice(None, None, ds) for ds in downsample)\n data = data[slices]\n return data\n", "path": "napari/_vispy/vispy_image_layer.py"}]} | 3,265 | 328 |
gh_patches_debug_31286 | rasdani/github-patches | git_diff | ocf__ocfweb-57 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Home page should have a link to password reset, check print quota, view print queue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ocfweb/context_processors.py`
Content:
```
1 import re
2 from datetime import date
3 from datetime import datetime
4
5 from ocflib.lab.hours import DayHours
6
7 from ocfweb.component.lab_status import get_lab_status
8
9
10 def ocf_template_processor(request):
11 now = datetime.now()
12 today = date.today()
13 hours = DayHours.from_date(today)
14
15 base_css_classes = []
16 if request.resolver_match.url_name:
17 page_class = 'page-' + request.resolver_match.url_name
18 base_css_classes.append(page_class)
19
20 for arg in request.resolver_match.args:
21 page_class += '-' + re.sub('[^a-zA-Z_\-]', '-', arg)
22 base_css_classes.append(page_class)
23
24 return {
25 'lab_is_open': hours.is_open(now),
26 'current_lab_hours': hours,
27 'lab_status': get_lab_status(),
28 'base_css_classes': ' '.join(base_css_classes),
29 }
30
```
Path: `setup.py`
Content:
```
1 from setuptools import find_packages
2 from setuptools import setup
3
4 try:
5 with open('.version') as f:
6 VERSION = f.readline().strip()
7 except IOError:
8 VERSION = 'unknown'
9
10 setup(
11 name='ocfweb',
12 version=VERSION,
13 packages=find_packages(exclude=['debian', 'virtualenv_run']),
14 include_package_data=True,
15 url='https://www.ocf.berkeley.edu/',
16 author='Open Computing Facility',
17 author_email='[email protected]',
18 install_requires=[
19 'cachetools',
20 'django>=1.8,<1.8.999',
21 'gunicorn',
22 'libsass',
23 'lxml',
24 'mistune',
25 'ocflib',
26 'pygments',
27 'python-dateutil',
28 ],
29 sass_manifests={
30 'ocfweb': ('static/scss',),
31 },
32 )
33
```
Path: `ocfweb/main/home.py`
Content:
```
1 from datetime import date
2 from datetime import timedelta
3
4 from django.shortcuts import render_to_response
5 from django.template import RequestContext
6 from django.utils import timezone
7 from ocflib.lab.hours import get_hours
8 from ocflib.lab.staff_hours import get_staff_hours_soonest_first
9
10 from ocfweb.component.blog import get_blog_posts
11 from ocfweb.component.lab_status import get_lab_status
12
13
14 def home(request):
15 hours = [
16 get_hours(date.today() + timedelta(days=i)) for i in range(5)
17 ]
18
19 blog_posts = [
20 post for post
21 in get_blog_posts()
22 if timezone.now() - post.published < timedelta(days=365)
23 ][:2]
24
25 return render_to_response(
26 'home.html',
27 {
28 'fulltitle': 'Open Computing Facility at UC Berkeley',
29 'description': (
30 'The Open Computing Facility is an all-volunteer student '
31 'organization dedicated to free and open-source computing for all UC '
32 'Berkeley students.'''
33 ),
34 'staff_hours': get_staff_hours_soonest_first()[:2],
35 'hours': hours,
36 'today': hours[0],
37 'blog_posts': blog_posts,
38 'lab_status': get_lab_status(),
39 },
40 context_instance=RequestContext(request),
41 )
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ocfweb/context_processors.py b/ocfweb/context_processors.py
--- a/ocfweb/context_processors.py
+++ b/ocfweb/context_processors.py
@@ -1,12 +1,20 @@
import re
from datetime import date
from datetime import datetime
+from ipaddress import ip_address
+from ipaddress import ip_network
+from ipware.ip import get_real_ip
from ocflib.lab.hours import DayHours
from ocfweb.component.lab_status import get_lab_status
+def is_ocf_ip(ip):
+ # TODO: move this to ocflib when it drops Python 3.2 support
+ return ip_address(ip) in ip_network('169.229.10.0/24')
+
+
def ocf_template_processor(request):
now = datetime.now()
today = date.today()
@@ -21,9 +29,12 @@
page_class += '-' + re.sub('[^a-zA-Z_\-]', '-', arg)
base_css_classes.append(page_class)
+ real_ip = get_real_ip(request)
+
return {
'lab_is_open': hours.is_open(now),
'current_lab_hours': hours,
'lab_status': get_lab_status(),
'base_css_classes': ' '.join(base_css_classes),
+ 'is_ocf_ip': is_ocf_ip(real_ip) if real_ip else True,
}
diff --git a/ocfweb/main/home.py b/ocfweb/main/home.py
--- a/ocfweb/main/home.py
+++ b/ocfweb/main/home.py
@@ -13,7 +13,7 @@
def home(request):
hours = [
- get_hours(date.today() + timedelta(days=i)) for i in range(5)
+ get_hours(date.today() + timedelta(days=i)) for i in range(3)
]
blog_posts = [
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,7 @@
install_requires=[
'cachetools',
'django>=1.8,<1.8.999',
+ 'django-ipware',
'gunicorn',
'libsass',
'lxml',
| {"golden_diff": "diff --git a/ocfweb/context_processors.py b/ocfweb/context_processors.py\n--- a/ocfweb/context_processors.py\n+++ b/ocfweb/context_processors.py\n@@ -1,12 +1,20 @@\n import re\n from datetime import date\n from datetime import datetime\n+from ipaddress import ip_address\n+from ipaddress import ip_network\n \n+from ipware.ip import get_real_ip\n from ocflib.lab.hours import DayHours\n \n from ocfweb.component.lab_status import get_lab_status\n \n \n+def is_ocf_ip(ip):\n+ # TODO: move this to ocflib when it drops Python 3.2 support\n+ return ip_address(ip) in ip_network('169.229.10.0/24')\n+\n+\n def ocf_template_processor(request):\n now = datetime.now()\n today = date.today()\n@@ -21,9 +29,12 @@\n page_class += '-' + re.sub('[^a-zA-Z_\\-]', '-', arg)\n base_css_classes.append(page_class)\n \n+ real_ip = get_real_ip(request)\n+\n return {\n 'lab_is_open': hours.is_open(now),\n 'current_lab_hours': hours,\n 'lab_status': get_lab_status(),\n 'base_css_classes': ' '.join(base_css_classes),\n+ 'is_ocf_ip': is_ocf_ip(real_ip) if real_ip else True,\n }\ndiff --git a/ocfweb/main/home.py b/ocfweb/main/home.py\n--- a/ocfweb/main/home.py\n+++ b/ocfweb/main/home.py\n@@ -13,7 +13,7 @@\n \n def home(request):\n hours = [\n- get_hours(date.today() + timedelta(days=i)) for i in range(5)\n+ get_hours(date.today() + timedelta(days=i)) for i in range(3)\n ]\n \n blog_posts = [\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,6 +18,7 @@\n install_requires=[\n 'cachetools',\n 'django>=1.8,<1.8.999',\n+ 'django-ipware',\n 'gunicorn',\n 'libsass',\n 'lxml',\n", "issue": "Home page should have a link to password reset, check print quota, view print queue\n\n", "before_files": [{"content": "import re\nfrom datetime import date\nfrom datetime import datetime\n\nfrom ocflib.lab.hours import DayHours\n\nfrom ocfweb.component.lab_status import get_lab_status\n\n\ndef ocf_template_processor(request):\n now = datetime.now()\n today = date.today()\n hours = DayHours.from_date(today)\n\n base_css_classes = []\n if request.resolver_match.url_name:\n page_class = 'page-' + request.resolver_match.url_name\n base_css_classes.append(page_class)\n\n for arg in request.resolver_match.args:\n page_class += '-' + re.sub('[^a-zA-Z_\\-]', '-', arg)\n base_css_classes.append(page_class)\n\n return {\n 'lab_is_open': hours.is_open(now),\n 'current_lab_hours': hours,\n 'lab_status': get_lab_status(),\n 'base_css_classes': ' '.join(base_css_classes),\n }\n", "path": "ocfweb/context_processors.py"}, {"content": "from setuptools import find_packages\nfrom setuptools import setup\n\ntry:\n with open('.version') as f:\n VERSION = f.readline().strip()\nexcept IOError:\n VERSION = 'unknown'\n\nsetup(\n name='ocfweb',\n version=VERSION,\n packages=find_packages(exclude=['debian', 'virtualenv_run']),\n include_package_data=True,\n url='https://www.ocf.berkeley.edu/',\n author='Open Computing Facility',\n author_email='[email protected]',\n install_requires=[\n 'cachetools',\n 'django>=1.8,<1.8.999',\n 'gunicorn',\n 'libsass',\n 'lxml',\n 'mistune',\n 'ocflib',\n 'pygments',\n 'python-dateutil',\n ],\n sass_manifests={\n 'ocfweb': ('static/scss',),\n },\n)\n", "path": "setup.py"}, {"content": "from datetime import date\nfrom datetime import timedelta\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.utils import timezone\nfrom ocflib.lab.hours import get_hours\nfrom ocflib.lab.staff_hours import get_staff_hours_soonest_first\n\nfrom ocfweb.component.blog import get_blog_posts\nfrom ocfweb.component.lab_status import get_lab_status\n\n\ndef home(request):\n hours = [\n get_hours(date.today() + timedelta(days=i)) for i in range(5)\n ]\n\n blog_posts = [\n post for post\n in get_blog_posts()\n if timezone.now() - post.published < timedelta(days=365)\n ][:2]\n\n return render_to_response(\n 'home.html',\n {\n 'fulltitle': 'Open Computing Facility at UC Berkeley',\n 'description': (\n 'The Open Computing Facility is an all-volunteer student '\n 'organization dedicated to free and open-source computing for all UC '\n 'Berkeley students.'''\n ),\n 'staff_hours': get_staff_hours_soonest_first()[:2],\n 'hours': hours,\n 'today': hours[0],\n 'blog_posts': blog_posts,\n 'lab_status': get_lab_status(),\n },\n context_instance=RequestContext(request),\n )\n", "path": "ocfweb/main/home.py"}], "after_files": [{"content": "import re\nfrom datetime import date\nfrom datetime import datetime\nfrom ipaddress import ip_address\nfrom ipaddress import ip_network\n\nfrom ipware.ip import get_real_ip\nfrom ocflib.lab.hours import DayHours\n\nfrom ocfweb.component.lab_status import get_lab_status\n\n\ndef is_ocf_ip(ip):\n # TODO: move this to ocflib when it drops Python 3.2 support\n return ip_address(ip) in ip_network('169.229.10.0/24')\n\n\ndef ocf_template_processor(request):\n now = datetime.now()\n today = date.today()\n hours = DayHours.from_date(today)\n\n base_css_classes = []\n if request.resolver_match.url_name:\n page_class = 'page-' + request.resolver_match.url_name\n base_css_classes.append(page_class)\n\n for arg in request.resolver_match.args:\n page_class += '-' + re.sub('[^a-zA-Z_\\-]', '-', arg)\n base_css_classes.append(page_class)\n\n real_ip = get_real_ip(request)\n\n return {\n 'lab_is_open': hours.is_open(now),\n 'current_lab_hours': hours,\n 'lab_status': get_lab_status(),\n 'base_css_classes': ' '.join(base_css_classes),\n 'is_ocf_ip': is_ocf_ip(real_ip) if real_ip else True,\n }\n", "path": "ocfweb/context_processors.py"}, {"content": "from setuptools import find_packages\nfrom setuptools import setup\n\ntry:\n with open('.version') as f:\n VERSION = f.readline().strip()\nexcept IOError:\n VERSION = 'unknown'\n\nsetup(\n name='ocfweb',\n version=VERSION,\n packages=find_packages(exclude=['debian', 'virtualenv_run']),\n include_package_data=True,\n url='https://www.ocf.berkeley.edu/',\n author='Open Computing Facility',\n author_email='[email protected]',\n install_requires=[\n 'cachetools',\n 'django>=1.8,<1.8.999',\n 'django-ipware',\n 'gunicorn',\n 'libsass',\n 'lxml',\n 'mistune',\n 'ocflib',\n 'pygments',\n 'python-dateutil',\n ],\n sass_manifests={\n 'ocfweb': ('static/scss',),\n },\n)\n", "path": "setup.py"}, {"content": "from datetime import date\nfrom datetime import timedelta\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.utils import timezone\nfrom ocflib.lab.hours import get_hours\nfrom ocflib.lab.staff_hours import get_staff_hours_soonest_first\n\nfrom ocfweb.component.blog import get_blog_posts\nfrom ocfweb.component.lab_status import get_lab_status\n\n\ndef home(request):\n hours = [\n get_hours(date.today() + timedelta(days=i)) for i in range(3)\n ]\n\n blog_posts = [\n post for post\n in get_blog_posts()\n if timezone.now() - post.published < timedelta(days=365)\n ][:2]\n\n return render_to_response(\n 'home.html',\n {\n 'fulltitle': 'Open Computing Facility at UC Berkeley',\n 'description': (\n 'The Open Computing Facility is an all-volunteer student '\n 'organization dedicated to free and open-source computing for all UC '\n 'Berkeley students.'''\n ),\n 'staff_hours': get_staff_hours_soonest_first()[:2],\n 'hours': hours,\n 'today': hours[0],\n 'blog_posts': blog_posts,\n 'lab_status': get_lab_status(),\n },\n context_instance=RequestContext(request),\n )\n", "path": "ocfweb/main/home.py"}]} | 1,152 | 498 |
gh_patches_debug_16219 | rasdani/github-patches | git_diff | getsentry__sentry-5339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Localhost filter should consider affected URL
Right now the "localhost" filter only consider's the affected User's IP: https://github.com/getsentry/sentry/blob/master/src/sentry/filters/localhost.py
But users are also expecting that this should filter server errors triggered from a server running on localhost (e.g. local development).
See also: #4729, #4762
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/filters/localhost.py`
Content:
```
1 from __future__ import absolute_import
2
3 from .base import Filter
4
5 LOCAL_IPS = frozenset(['127.0.0.1', '::1'])
6
7
8 class LocalhostFilter(Filter):
9 id = 'localhost'
10 name = 'Filter out errors coming from localhost'
11 description = 'This applies to to both IPv4 (``127.0.0.1``) and IPv6 (``::1``) addresses.'
12
13 def get_ip_address(self, data):
14 try:
15 return data['sentry.interfaces.User']['ip_address']
16 except KeyError:
17 return ''
18
19 def test(self, data):
20 return self.get_ip_address(data) in LOCAL_IPS
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/filters/localhost.py b/src/sentry/filters/localhost.py
--- a/src/sentry/filters/localhost.py
+++ b/src/sentry/filters/localhost.py
@@ -1,8 +1,10 @@
from __future__ import absolute_import
from .base import Filter
+from six.moves.urllib.parse import urlparse
LOCAL_IPS = frozenset(['127.0.0.1', '::1'])
+LOCAL_DOMAINS = frozenset(['127.0.0.1', 'localhost'])
class LocalhostFilter(Filter):
@@ -16,5 +18,14 @@
except KeyError:
return ''
+ def get_url(self, data):
+ try:
+ return data['sentry.interfaces.Http']['url'] or ''
+ except KeyError:
+ return ''
+
+ def get_domain(self, data):
+ return urlparse(self.get_url(data)).netloc
+
def test(self, data):
- return self.get_ip_address(data) in LOCAL_IPS
+ return self.get_ip_address(data) in LOCAL_IPS or self.get_domain(data) in LOCAL_DOMAINS
| {"golden_diff": "diff --git a/src/sentry/filters/localhost.py b/src/sentry/filters/localhost.py\n--- a/src/sentry/filters/localhost.py\n+++ b/src/sentry/filters/localhost.py\n@@ -1,8 +1,10 @@\n from __future__ import absolute_import\n \n from .base import Filter\n+from six.moves.urllib.parse import urlparse\n \n LOCAL_IPS = frozenset(['127.0.0.1', '::1'])\n+LOCAL_DOMAINS = frozenset(['127.0.0.1', 'localhost'])\n \n \n class LocalhostFilter(Filter):\n@@ -16,5 +18,14 @@\n except KeyError:\n return ''\n \n+ def get_url(self, data):\n+ try:\n+ return data['sentry.interfaces.Http']['url'] or ''\n+ except KeyError:\n+ return ''\n+\n+ def get_domain(self, data):\n+ return urlparse(self.get_url(data)).netloc\n+\n def test(self, data):\n- return self.get_ip_address(data) in LOCAL_IPS\n+ return self.get_ip_address(data) in LOCAL_IPS or self.get_domain(data) in LOCAL_DOMAINS\n", "issue": "Localhost filter should consider affected URL\nRight now the \"localhost\" filter only consider's the affected User's IP: https://github.com/getsentry/sentry/blob/master/src/sentry/filters/localhost.py\r\n\r\nBut users are also expecting that this should filter server errors triggered from a server running on localhost (e.g. local development).\r\n\r\nSee also: #4729, #4762\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom .base import Filter\n\nLOCAL_IPS = frozenset(['127.0.0.1', '::1'])\n\n\nclass LocalhostFilter(Filter):\n id = 'localhost'\n name = 'Filter out errors coming from localhost'\n description = 'This applies to to both IPv4 (``127.0.0.1``) and IPv6 (``::1``) addresses.'\n\n def get_ip_address(self, data):\n try:\n return data['sentry.interfaces.User']['ip_address']\n except KeyError:\n return ''\n\n def test(self, data):\n return self.get_ip_address(data) in LOCAL_IPS\n", "path": "src/sentry/filters/localhost.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom .base import Filter\nfrom six.moves.urllib.parse import urlparse\n\nLOCAL_IPS = frozenset(['127.0.0.1', '::1'])\nLOCAL_DOMAINS = frozenset(['127.0.0.1', 'localhost'])\n\n\nclass LocalhostFilter(Filter):\n id = 'localhost'\n name = 'Filter out errors coming from localhost'\n description = 'This applies to to both IPv4 (``127.0.0.1``) and IPv6 (``::1``) addresses.'\n\n def get_ip_address(self, data):\n try:\n return data['sentry.interfaces.User']['ip_address']\n except KeyError:\n return ''\n\n def get_url(self, data):\n try:\n return data['sentry.interfaces.Http']['url'] or ''\n except KeyError:\n return ''\n\n def get_domain(self, data):\n return urlparse(self.get_url(data)).netloc\n\n def test(self, data):\n return self.get_ip_address(data) in LOCAL_IPS or self.get_domain(data) in LOCAL_DOMAINS\n", "path": "src/sentry/filters/localhost.py"}]} | 530 | 254 |
gh_patches_debug_51927 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1683 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle www. prefix when input in /availability API
[PR#100 in getgov-home for additional context
](https://github.com/cisagov/getgov-home/pull/100)
Handle edge case of including [www](http://www/). in the search input. This is most likely best handled by updating the manage.get.gov's availability endpoint to handle the [www](http://www/). prefix when parsing arguments, similarly to how the API handles the .gov suffix.
Per Katherine:
I envisioned that it would strip out the "www." when checking availability. So the confirmation message for "[www.example.gov](http://www.example.gov/)" would read: "[example.gov](http://example.gov/) is not available" Is that what you were thinking,
Example:
if [example.gov](http://example.gov/) was not available to begin with. I would think yes it strips www. then if [example.gov](http://example.gov/) is already taken it says “[example.gov](http://example.gov/) is not available”. If [example.gov](http://example.gov/) is actually available then entering [www.example.gov](http://www.example.gov/) would result in “[example.gov](http://example.gov/) is available”. Basically have it just ignore a www. at the start.
_Originally posted by @erinysong in https://github.com/cisagov/manage.get.gov/issues/476#issuecomment-1802870748_
[Slack thread](https://cisa-corp.slack.com/archives/C05BDEA3C11/p1705599697584059)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/models/utility/domain_helper.py`
Content:
```
1 import re
2
3 from django import forms
4 from django.http import JsonResponse
5
6 from api.views import DOMAIN_API_MESSAGES, check_domain_available
7 from registrar.utility import errors
8 from epplibwrapper.errors import RegistryError
9 from registrar.utility.enums import ValidationReturnType
10
11
12 class DomainHelper:
13 """Utility functions and constants for domain names."""
14
15 # a domain name is alphanumeric or hyphen, up to 63 characters, doesn't
16 # begin or end with a hyphen, followed by a TLD of 2-6 alphabetic characters
17 DOMAIN_REGEX = re.compile(r"^(?!-)[A-Za-z0-9-]{1,63}(?<!-)\.[A-Za-z]{2,6}$")
18
19 # a domain can be no longer than 253 characters in total
20 MAX_LENGTH = 253
21
22 @classmethod
23 def string_could_be_domain(cls, domain: str | None) -> bool:
24 """Return True if the string could be a domain name, otherwise False."""
25 if not isinstance(domain, str):
26 return False
27 return bool(cls.DOMAIN_REGEX.match(domain))
28
29 @classmethod
30 def validate(cls, domain: str, blank_ok=False) -> str:
31 """Attempt to determine if a domain name could be requested."""
32
33 # Split into pieces for the linter
34 domain = cls._validate_domain_string(domain, blank_ok)
35
36 try:
37 if not check_domain_available(domain):
38 raise errors.DomainUnavailableError()
39 except RegistryError as err:
40 raise errors.RegistrySystemError() from err
41 return domain
42
43 @staticmethod
44 def _validate_domain_string(domain, blank_ok):
45 """Normalize the domain string, and check its content"""
46 if domain is None:
47 raise errors.BlankValueError()
48
49 if not isinstance(domain, str):
50 raise errors.InvalidDomainError()
51
52 domain = domain.lower().strip()
53
54 if domain == "" and not blank_ok:
55 raise errors.BlankValueError()
56 elif domain == "":
57 # If blank ok is true, just return the domain
58 return domain
59
60 if domain.endswith(".gov"):
61 domain = domain[:-4]
62
63 if "." in domain:
64 raise errors.ExtraDotsError()
65
66 if not DomainHelper.string_could_be_domain(domain + ".gov"):
67 raise errors.InvalidDomainError()
68
69 return domain
70
71 @classmethod
72 def validate_and_handle_errors(cls, domain, return_type, blank_ok=False):
73 """
74 Validates a domain and returns an appropriate response based on the validation result.
75
76 This method uses the `validate` method to validate the domain. If validation fails, it catches the exception,
77 maps it to a corresponding error code, and returns a response based on the `return_type` parameter.
78
79 Args:
80 domain (str): The domain to validate.
81 return_type (ValidationReturnType): Determines the type of response (JSON or form validation error).
82 blank_ok (bool, optional): If True, blank input does not raise an exception. Defaults to False.
83
84 Returns:
85 tuple: The validated domain (or None if validation failed), and the response (success or error).
86 """ # noqa
87
88 # Map each exception to a corresponding error code
89 error_map = {
90 errors.BlankValueError: "required",
91 errors.ExtraDotsError: "extra_dots",
92 errors.DomainUnavailableError: "unavailable",
93 errors.RegistrySystemError: "error",
94 errors.InvalidDomainError: "invalid",
95 }
96
97 validated = None
98 response = None
99
100 try:
101 # Attempt to validate the domain
102 validated = cls.validate(domain, blank_ok)
103
104 # Get a list of each possible exception, and the code to return
105 except tuple(error_map.keys()) as error:
106 # If an error is caught, get its type
107 error_type = type(error)
108
109 # Generate the response based on the error code and return type
110 response = DomainHelper._return_form_error_or_json_response(return_type, code=error_map.get(error_type))
111 else:
112 # For form validation, we do not need to display the success message
113 if return_type != ValidationReturnType.FORM_VALIDATION_ERROR:
114 response = DomainHelper._return_form_error_or_json_response(return_type, code="success", available=True)
115
116 # Return the validated domain and the response (either error or success)
117 return (validated, response)
118
119 @staticmethod
120 def _return_form_error_or_json_response(return_type: ValidationReturnType, code, available=False):
121 """
122 Returns an error response based on the `return_type`.
123
124 If `return_type` is `FORM_VALIDATION_ERROR`, raises a form validation error.
125 If `return_type` is `JSON_RESPONSE`, returns a JSON response with 'available', 'code', and 'message' fields.
126 If `return_type` is neither, raises a ValueError.
127
128 Args:
129 return_type (ValidationReturnType): The type of error response.
130 code (str): The error code for the error message.
131 available (bool, optional): Availability, only used for JSON responses. Defaults to False.
132
133 Returns:
134 A JSON response or a form validation error.
135
136 Raises:
137 ValueError: If `return_type` is neither `FORM_VALIDATION_ERROR` nor `JSON_RESPONSE`.
138 """ # noqa
139 match return_type:
140 case ValidationReturnType.FORM_VALIDATION_ERROR:
141 raise forms.ValidationError(DOMAIN_API_MESSAGES[code], code=code)
142 case ValidationReturnType.JSON_RESPONSE:
143 return JsonResponse({"available": available, "code": code, "message": DOMAIN_API_MESSAGES[code]})
144 case _:
145 raise ValueError("Invalid return type specified")
146
147 @classmethod
148 def sld(cls, domain: str):
149 """
150 Get the second level domain. Example: `gsa.gov` -> `gsa`.
151
152 If no TLD is present, returns the original string.
153 """
154 return domain.split(".")[0]
155
156 @classmethod
157 def tld(cls, domain: str):
158 """Get the top level domain. Example: `gsa.gov` -> `gov`."""
159 parts = domain.rsplit(".")
160 return parts[-1] if len(parts) > 1 else ""
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/models/utility/domain_helper.py b/src/registrar/models/utility/domain_helper.py
--- a/src/registrar/models/utility/domain_helper.py
+++ b/src/registrar/models/utility/domain_helper.py
@@ -57,6 +57,9 @@
# If blank ok is true, just return the domain
return domain
+ if domain.startswith("www."):
+ domain = domain[4:]
+
if domain.endswith(".gov"):
domain = domain[:-4]
| {"golden_diff": "diff --git a/src/registrar/models/utility/domain_helper.py b/src/registrar/models/utility/domain_helper.py\n--- a/src/registrar/models/utility/domain_helper.py\n+++ b/src/registrar/models/utility/domain_helper.py\n@@ -57,6 +57,9 @@\n # If blank ok is true, just return the domain\n return domain\n \n+ if domain.startswith(\"www.\"):\n+ domain = domain[4:]\n+\n if domain.endswith(\".gov\"):\n domain = domain[:-4]\n", "issue": "Handle www. prefix when input in /availability API\n[PR#100 in getgov-home for additional context\r\n](https://github.com/cisagov/getgov-home/pull/100)\r\nHandle edge case of including [www](http://www/). in the search input. This is most likely best handled by updating the manage.get.gov's availability endpoint to handle the [www](http://www/). prefix when parsing arguments, similarly to how the API handles the .gov suffix.\r\n\r\nPer Katherine:\r\n I envisioned that it would strip out the \"www.\" when checking availability. So the confirmation message for \"[www.example.gov](http://www.example.gov/)\" would read: \"[example.gov](http://example.gov/) is not available\" Is that what you were thinking,\r\n\r\nExample: \r\n if [example.gov](http://example.gov/) was not available to begin with. I would think yes it strips www. then if [example.gov](http://example.gov/) is already taken it says \u201c[example.gov](http://example.gov/) is not available\u201d. If [example.gov](http://example.gov/) is actually available then entering [www.example.gov](http://www.example.gov/) would result in \u201c[example.gov](http://example.gov/) is available\u201d. Basically have it just ignore a www. at the start.\r\n\r\n_Originally posted by @erinysong in https://github.com/cisagov/manage.get.gov/issues/476#issuecomment-1802870748_\r\n \r\n[Slack thread](https://cisa-corp.slack.com/archives/C05BDEA3C11/p1705599697584059)\n", "before_files": [{"content": "import re\n\nfrom django import forms\nfrom django.http import JsonResponse\n\nfrom api.views import DOMAIN_API_MESSAGES, check_domain_available\nfrom registrar.utility import errors\nfrom epplibwrapper.errors import RegistryError\nfrom registrar.utility.enums import ValidationReturnType\n\n\nclass DomainHelper:\n \"\"\"Utility functions and constants for domain names.\"\"\"\n\n # a domain name is alphanumeric or hyphen, up to 63 characters, doesn't\n # begin or end with a hyphen, followed by a TLD of 2-6 alphabetic characters\n DOMAIN_REGEX = re.compile(r\"^(?!-)[A-Za-z0-9-]{1,63}(?<!-)\\.[A-Za-z]{2,6}$\")\n\n # a domain can be no longer than 253 characters in total\n MAX_LENGTH = 253\n\n @classmethod\n def string_could_be_domain(cls, domain: str | None) -> bool:\n \"\"\"Return True if the string could be a domain name, otherwise False.\"\"\"\n if not isinstance(domain, str):\n return False\n return bool(cls.DOMAIN_REGEX.match(domain))\n\n @classmethod\n def validate(cls, domain: str, blank_ok=False) -> str:\n \"\"\"Attempt to determine if a domain name could be requested.\"\"\"\n\n # Split into pieces for the linter\n domain = cls._validate_domain_string(domain, blank_ok)\n\n try:\n if not check_domain_available(domain):\n raise errors.DomainUnavailableError()\n except RegistryError as err:\n raise errors.RegistrySystemError() from err\n return domain\n\n @staticmethod\n def _validate_domain_string(domain, blank_ok):\n \"\"\"Normalize the domain string, and check its content\"\"\"\n if domain is None:\n raise errors.BlankValueError()\n\n if not isinstance(domain, str):\n raise errors.InvalidDomainError()\n\n domain = domain.lower().strip()\n\n if domain == \"\" and not blank_ok:\n raise errors.BlankValueError()\n elif domain == \"\":\n # If blank ok is true, just return the domain\n return domain\n\n if domain.endswith(\".gov\"):\n domain = domain[:-4]\n\n if \".\" in domain:\n raise errors.ExtraDotsError()\n\n if not DomainHelper.string_could_be_domain(domain + \".gov\"):\n raise errors.InvalidDomainError()\n\n return domain\n\n @classmethod\n def validate_and_handle_errors(cls, domain, return_type, blank_ok=False):\n \"\"\"\n Validates a domain and returns an appropriate response based on the validation result.\n\n This method uses the `validate` method to validate the domain. If validation fails, it catches the exception,\n maps it to a corresponding error code, and returns a response based on the `return_type` parameter.\n\n Args:\n domain (str): The domain to validate.\n return_type (ValidationReturnType): Determines the type of response (JSON or form validation error).\n blank_ok (bool, optional): If True, blank input does not raise an exception. Defaults to False.\n\n Returns:\n tuple: The validated domain (or None if validation failed), and the response (success or error).\n \"\"\" # noqa\n\n # Map each exception to a corresponding error code\n error_map = {\n errors.BlankValueError: \"required\",\n errors.ExtraDotsError: \"extra_dots\",\n errors.DomainUnavailableError: \"unavailable\",\n errors.RegistrySystemError: \"error\",\n errors.InvalidDomainError: \"invalid\",\n }\n\n validated = None\n response = None\n\n try:\n # Attempt to validate the domain\n validated = cls.validate(domain, blank_ok)\n\n # Get a list of each possible exception, and the code to return\n except tuple(error_map.keys()) as error:\n # If an error is caught, get its type\n error_type = type(error)\n\n # Generate the response based on the error code and return type\n response = DomainHelper._return_form_error_or_json_response(return_type, code=error_map.get(error_type))\n else:\n # For form validation, we do not need to display the success message\n if return_type != ValidationReturnType.FORM_VALIDATION_ERROR:\n response = DomainHelper._return_form_error_or_json_response(return_type, code=\"success\", available=True)\n\n # Return the validated domain and the response (either error or success)\n return (validated, response)\n\n @staticmethod\n def _return_form_error_or_json_response(return_type: ValidationReturnType, code, available=False):\n \"\"\"\n Returns an error response based on the `return_type`.\n\n If `return_type` is `FORM_VALIDATION_ERROR`, raises a form validation error.\n If `return_type` is `JSON_RESPONSE`, returns a JSON response with 'available', 'code', and 'message' fields.\n If `return_type` is neither, raises a ValueError.\n\n Args:\n return_type (ValidationReturnType): The type of error response.\n code (str): The error code for the error message.\n available (bool, optional): Availability, only used for JSON responses. Defaults to False.\n\n Returns:\n A JSON response or a form validation error.\n\n Raises:\n ValueError: If `return_type` is neither `FORM_VALIDATION_ERROR` nor `JSON_RESPONSE`.\n \"\"\" # noqa\n match return_type:\n case ValidationReturnType.FORM_VALIDATION_ERROR:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[code], code=code)\n case ValidationReturnType.JSON_RESPONSE:\n return JsonResponse({\"available\": available, \"code\": code, \"message\": DOMAIN_API_MESSAGES[code]})\n case _:\n raise ValueError(\"Invalid return type specified\")\n\n @classmethod\n def sld(cls, domain: str):\n \"\"\"\n Get the second level domain. Example: `gsa.gov` -> `gsa`.\n\n If no TLD is present, returns the original string.\n \"\"\"\n return domain.split(\".\")[0]\n\n @classmethod\n def tld(cls, domain: str):\n \"\"\"Get the top level domain. Example: `gsa.gov` -> `gov`.\"\"\"\n parts = domain.rsplit(\".\")\n return parts[-1] if len(parts) > 1 else \"\"\n", "path": "src/registrar/models/utility/domain_helper.py"}], "after_files": [{"content": "import re\n\nfrom django import forms\nfrom django.http import JsonResponse\n\nfrom api.views import DOMAIN_API_MESSAGES, check_domain_available\nfrom registrar.utility import errors\nfrom epplibwrapper.errors import RegistryError\nfrom registrar.utility.enums import ValidationReturnType\n\n\nclass DomainHelper:\n \"\"\"Utility functions and constants for domain names.\"\"\"\n\n # a domain name is alphanumeric or hyphen, up to 63 characters, doesn't\n # begin or end with a hyphen, followed by a TLD of 2-6 alphabetic characters\n DOMAIN_REGEX = re.compile(r\"^(?!-)[A-Za-z0-9-]{1,63}(?<!-)\\.[A-Za-z]{2,6}$\")\n\n # a domain can be no longer than 253 characters in total\n MAX_LENGTH = 253\n\n @classmethod\n def string_could_be_domain(cls, domain: str | None) -> bool:\n \"\"\"Return True if the string could be a domain name, otherwise False.\"\"\"\n if not isinstance(domain, str):\n return False\n return bool(cls.DOMAIN_REGEX.match(domain))\n\n @classmethod\n def validate(cls, domain: str, blank_ok=False) -> str:\n \"\"\"Attempt to determine if a domain name could be requested.\"\"\"\n\n # Split into pieces for the linter\n domain = cls._validate_domain_string(domain, blank_ok)\n\n try:\n if not check_domain_available(domain):\n raise errors.DomainUnavailableError()\n except RegistryError as err:\n raise errors.RegistrySystemError() from err\n return domain\n\n @staticmethod\n def _validate_domain_string(domain, blank_ok):\n \"\"\"Normalize the domain string, and check its content\"\"\"\n if domain is None:\n raise errors.BlankValueError()\n\n if not isinstance(domain, str):\n raise errors.InvalidDomainError()\n\n domain = domain.lower().strip()\n\n if domain == \"\" and not blank_ok:\n raise errors.BlankValueError()\n elif domain == \"\":\n # If blank ok is true, just return the domain\n return domain\n\n if domain.startswith(\"www.\"):\n domain = domain[4:]\n\n if domain.endswith(\".gov\"):\n domain = domain[:-4]\n\n if \".\" in domain:\n raise errors.ExtraDotsError()\n\n if not DomainHelper.string_could_be_domain(domain + \".gov\"):\n raise errors.InvalidDomainError()\n\n return domain\n\n @classmethod\n def validate_and_handle_errors(cls, domain, return_type, blank_ok=False):\n \"\"\"\n Validates a domain and returns an appropriate response based on the validation result.\n\n This method uses the `validate` method to validate the domain. If validation fails, it catches the exception,\n maps it to a corresponding error code, and returns a response based on the `return_type` parameter.\n\n Args:\n domain (str): The domain to validate.\n return_type (ValidationReturnType): Determines the type of response (JSON or form validation error).\n blank_ok (bool, optional): If True, blank input does not raise an exception. Defaults to False.\n\n Returns:\n tuple: The validated domain (or None if validation failed), and the response (success or error).\n \"\"\" # noqa\n\n # Map each exception to a corresponding error code\n error_map = {\n errors.BlankValueError: \"required\",\n errors.ExtraDotsError: \"extra_dots\",\n errors.DomainUnavailableError: \"unavailable\",\n errors.RegistrySystemError: \"error\",\n errors.InvalidDomainError: \"invalid\",\n }\n\n validated = None\n response = None\n\n try:\n # Attempt to validate the domain\n validated = cls.validate(domain, blank_ok)\n\n # Get a list of each possible exception, and the code to return\n except tuple(error_map.keys()) as error:\n # If an error is caught, get its type\n error_type = type(error)\n\n # Generate the response based on the error code and return type\n response = DomainHelper._return_form_error_or_json_response(return_type, code=error_map.get(error_type))\n else:\n # For form validation, we do not need to display the success message\n if return_type != ValidationReturnType.FORM_VALIDATION_ERROR:\n response = DomainHelper._return_form_error_or_json_response(return_type, code=\"success\", available=True)\n\n # Return the validated domain and the response (either error or success)\n return (validated, response)\n\n @staticmethod\n def _return_form_error_or_json_response(return_type: ValidationReturnType, code, available=False):\n \"\"\"\n Returns an error response based on the `return_type`.\n\n If `return_type` is `FORM_VALIDATION_ERROR`, raises a form validation error.\n If `return_type` is `JSON_RESPONSE`, returns a JSON response with 'available', 'code', and 'message' fields.\n If `return_type` is neither, raises a ValueError.\n\n Args:\n return_type (ValidationReturnType): The type of error response.\n code (str): The error code for the error message.\n available (bool, optional): Availability, only used for JSON responses. Defaults to False.\n\n Returns:\n A JSON response or a form validation error.\n\n Raises:\n ValueError: If `return_type` is neither `FORM_VALIDATION_ERROR` nor `JSON_RESPONSE`.\n \"\"\" # noqa\n match return_type:\n case ValidationReturnType.FORM_VALIDATION_ERROR:\n raise forms.ValidationError(DOMAIN_API_MESSAGES[code], code=code)\n case ValidationReturnType.JSON_RESPONSE:\n return JsonResponse({\"available\": available, \"code\": code, \"message\": DOMAIN_API_MESSAGES[code]})\n case _:\n raise ValueError(\"Invalid return type specified\")\n\n @classmethod\n def sld(cls, domain: str):\n \"\"\"\n Get the second level domain. Example: `gsa.gov` -> `gsa`.\n\n If no TLD is present, returns the original string.\n \"\"\"\n return domain.split(\".\")[0]\n\n @classmethod\n def tld(cls, domain: str):\n \"\"\"Get the top level domain. Example: `gsa.gov` -> `gov`.\"\"\"\n parts = domain.rsplit(\".\")\n return parts[-1] if len(parts) > 1 else \"\"\n", "path": "src/registrar/models/utility/domain_helper.py"}]} | 2,318 | 105 |
gh_patches_debug_1182 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-1049 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
efs tag support
I am finding that searching for tagging of EFS resources does not consistently report the correct results. It did find an EFS that was incorrectly tagged, but after it was corrected it continues to report the same resource. I use the same filter for other resource types and do not see this behavior.
```
- name: efs-tag-compliance
resource: efs
description:
Notify if an EFS does not comply with tagging best practices.
mode:
type: periodic
schedule: "rate(24 hours)"
role: arn:aws:iam::MYACCOUNT:role/cloud-custodian
filters:
- or:
- "tag:CostCenter": absent
- "tag:POC": absent
- "tag:Service": absent
- "tag:Name": absent
...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/efs.py`
Content:
```
1 # Copyright 2016 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from c7n.actions import Action
15 from c7n.manager import resources
16 from c7n.query import QueryResourceManager
17 from c7n.utils import local_session, type_schema, get_retry
18
19
20 @resources.register('efs')
21 class ElasticFileSystem(QueryResourceManager):
22
23 class resource_type(object):
24 service = 'efs'
25 enum_spec = ('describe_file_systems', 'FileSystems', None)
26 id = 'FileSystemId'
27 name = 'Name'
28 date = 'CreationTime'
29 dimension = None
30
31
32 @ElasticFileSystem.action_registry.register('delete')
33 class Delete(Action):
34
35 schema = type_schema('delete')
36 permissions = ('efs:DescribeMountTargets',
37 'efs:DeleteMountTargets',
38 'efs:DeleteFileSystem')
39
40 def process(self, resources):
41 client = local_session(self.manager.session_factory).client('efs')
42 self.unmount_filesystems(resources)
43 retry = get_retry(('FileSystemInUse',), 12)
44 for r in resources:
45 retry(client.delete_file_system, FileSystemId=r['FileSystemId'])
46
47 def unmount_filesystems(self, resources):
48 client = local_session(self.manager.session_factory).client('efs')
49 for r in resources:
50 if not r['NumberOfMountTargets']:
51 continue
52 for t in client.describe_mount_targets(
53 FileSystemId=r['FileSystemId'])['MountTargets']:
54 client.delete_mount_target(MountTargetId=t['MountTargetId'])
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/efs.py b/c7n/resources/efs.py
--- a/c7n/resources/efs.py
+++ b/c7n/resources/efs.py
@@ -27,6 +27,7 @@
name = 'Name'
date = 'CreationTime'
dimension = None
+ detail_spec = ('describe_tags', 'FileSystemId', 'FileSystemId', None)
@ElasticFileSystem.action_registry.register('delete')
| {"golden_diff": "diff --git a/c7n/resources/efs.py b/c7n/resources/efs.py\n--- a/c7n/resources/efs.py\n+++ b/c7n/resources/efs.py\n@@ -27,6 +27,7 @@\n name = 'Name'\n date = 'CreationTime'\n dimension = None\n+ detail_spec = ('describe_tags', 'FileSystemId', 'FileSystemId', None)\n \n \n @ElasticFileSystem.action_registry.register('delete')\n", "issue": "efs tag support\nI am finding that searching for tagging of EFS resources does not consistently report the correct results. It did find an EFS that was incorrectly tagged, but after it was corrected it continues to report the same resource. I use the same filter for other resource types and do not see this behavior.\r\n\r\n```\r\n- name: efs-tag-compliance\r\n resource: efs\r\n description:\r\n Notify if an EFS does not comply with tagging best practices.\r\n mode:\r\n type: periodic\r\n schedule: \"rate(24 hours)\"\r\n role: arn:aws:iam::MYACCOUNT:role/cloud-custodian\r\n filters:\r\n - or:\r\n - \"tag:CostCenter\": absent\r\n - \"tag:POC\": absent\r\n - \"tag:Service\": absent\r\n - \"tag:Name\": absent\r\n...\r\n```\n", "before_files": [{"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom c7n.actions import Action\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema, get_retry\n\n\[email protected]('efs')\nclass ElasticFileSystem(QueryResourceManager):\n\n class resource_type(object):\n service = 'efs'\n enum_spec = ('describe_file_systems', 'FileSystems', None)\n id = 'FileSystemId'\n name = 'Name'\n date = 'CreationTime'\n dimension = None\n\n\[email protected]_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = ('efs:DescribeMountTargets',\n 'efs:DeleteMountTargets',\n 'efs:DeleteFileSystem')\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n self.unmount_filesystems(resources)\n retry = get_retry(('FileSystemInUse',), 12)\n for r in resources:\n retry(client.delete_file_system, FileSystemId=r['FileSystemId'])\n\n def unmount_filesystems(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n for r in resources:\n if not r['NumberOfMountTargets']:\n continue\n for t in client.describe_mount_targets(\n FileSystemId=r['FileSystemId'])['MountTargets']:\n client.delete_mount_target(MountTargetId=t['MountTargetId'])\n", "path": "c7n/resources/efs.py"}], "after_files": [{"content": "# Copyright 2016 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom c7n.actions import Action\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema, get_retry\n\n\[email protected]('efs')\nclass ElasticFileSystem(QueryResourceManager):\n\n class resource_type(object):\n service = 'efs'\n enum_spec = ('describe_file_systems', 'FileSystems', None)\n id = 'FileSystemId'\n name = 'Name'\n date = 'CreationTime'\n dimension = None\n detail_spec = ('describe_tags', 'FileSystemId', 'FileSystemId', None)\n\n\[email protected]_registry.register('delete')\nclass Delete(Action):\n\n schema = type_schema('delete')\n permissions = ('efs:DescribeMountTargets',\n 'efs:DeleteMountTargets',\n 'efs:DeleteFileSystem')\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n self.unmount_filesystems(resources)\n retry = get_retry(('FileSystemInUse',), 12)\n for r in resources:\n retry(client.delete_file_system, FileSystemId=r['FileSystemId'])\n\n def unmount_filesystems(self, resources):\n client = local_session(self.manager.session_factory).client('efs')\n for r in resources:\n if not r['NumberOfMountTargets']:\n continue\n for t in client.describe_mount_targets(\n FileSystemId=r['FileSystemId'])['MountTargets']:\n client.delete_mount_target(MountTargetId=t['MountTargetId'])\n", "path": "c7n/resources/efs.py"}]} | 981 | 100 |
gh_patches_debug_56095 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4864 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]: colossalai 0.3.3 + torch 2.0.1 + baichuan-2 7b 训练保存 lr_scheduler 时会报 NotImplementedError 错
### 🐛 Describe the bug
用 colossalai 0.3.3 + torch 2.0.1 + baichuan-2 7b 训练保存 lr_scheduler 时 colossalai/nn/lr_scheduler/delayed.py 会报 NotImplementedError 错。
In [25]: lr_scheduler
Out[25]: <[colossalai.nn.lr](http://colossalai.nn.lr/)_scheduler.cosine.CosineAnnealingWarmupLR at 0x7f01cd616e00>
In [26]: booster.save_lr_scheduler(lr_scheduler, "/data/checkpoint/lr_scheduler")
```
in <module>:1
python3.10/site-packages/colossalai/booster/booster.py:308 in
save_lr_scheduler
305 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
306 checkpoint (str): Path to the checkpoint. It must be a local file path.
307 """
❱ 308 self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)
309
310 def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str) -> None:
311 """Load lr scheduler from checkpoint.
python3.10/site-packages/colossalai/booster/plugin/gemini_plugin.py:225
in save_lr_scheduler
222 Save model to checkpoint but only on master process.
223 """
224 if self.coordinator.is_master():
❱ 225 super().save_lr_scheduler(lr_scheduler, checkpoint)
226
227
228 class GeminiPlugin(DPPluginBase):
python3.10/site-packages/colossalai/checkpoint_io/checkpoint_io_base.py:
318 in save_lr_scheduler
315 lr_scheduler (LRScheduler): lr scheduler to be saved.
316 checkpoint: checkpoint path. The checkpoint path can only be a file path.
317 """
❱ 318 torch.save(lr_scheduler.state_dict(), checkpoint)
319
320 def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
321 """
python3.10/site-packages/colossalai/nn/lr_scheduler/delayed.py:93 in
state_dict
90 state_dict["after_scheduler_dict"] = state_dict["after_scheduler"].state_dic
91 del state_dict["after_scheduler"]
92 else:
❱ 93 raise NotImplementedError()
94 return state_dict
95
96 def get_lr(self):
```
进一步分析 lr_scheduler 里的信息
```
state_dict = {key: value for key, value in lr_scheduler.__dict__.items() if key not in "optimizer"}
# =>
{
'warmup_epochs': 2000,
'after_scheduler': <torch.optim.lr_scheduler.CosineAnnealingLR at 0x7f01cd6173a0>,
'finished': False,
'base_lrs': [0.0003],
'last_epoch': 1,
'verbose': False,
'_step_count': 2,
'_get_lr_called_within_step': False,
'_last_lr': [3e-07]
}
```
- 其中 after_scheduler 是 torch.optim.lr_scheduler.CosineAnnealingLR 的实例,而 torch.optim.lr_scheduler.CosineAnnealingLR 是继承的 LRScheduler,那么 after_scheduler 的父类是 LRScheduler
- _LRScheduler 是继承了 LRScheduler
- 而在 [save lr scheduler 时(delayed.py) 中](https://github.com/hpcaitech/ColossalAI/blob/822051d8884a46d4d8626330e21adfd6427c99a0/colossalai/nn/lr_scheduler/delayed.py#L88),是 `isinstance(state_dict['after_scheduler'], _LRScheduler)`
```
from torch.optim.lr_scheduler import _LRScheduler, LRScheduler
isinstance(state_dict['after_scheduler'], LRScheduler)
# => True
isinstance(state_dict['after_scheduler'], _LRScheduler)
# => False
```
**那这样,是否说明 应该用 `LRScheduler` 而不是 `_LRScheduler` 呢?**
注:baichuan-2 依赖 torch 2.0+,不能降到 2.0 以下(用 1.13 会报 TypeError: sdp_kernel() got an unexpected keyword argument 'enable_mem_efficient')
### Environment
- colossalai 0.3.3
- torch 2.0.1
- baichuan-2 7b
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/nn/lr_scheduler/delayed.py`
Content:
```
1 from torch.optim.lr_scheduler import _LRScheduler
2
3
4 class _enable_get_lr_call:
5 def __init__(self, o):
6 self.o = o
7
8 def __enter__(self):
9 self.o._get_lr_called_within_step = True
10 return self
11
12 def __exit__(self, type, value, traceback):
13 self.o._get_lr_called_within_step = False
14
15
16 class DelayerScheduler(_LRScheduler):
17 """Starts with a flat lr schedule until it reaches N epochs then applies
18 the specific scheduler (For example: ReduceLROnPlateau)
19
20 Args:
21 optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
22 delay_epochs (int): Number of epochs to keep the initial lr until starting applying the scheduler.
23 after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.
24 last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
25 the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
26 """
27
28 def __init__(self, optimizer, delay_epochs, after_scheduler, last_epoch=-1):
29 if delay_epochs < 0:
30 raise ValueError(f"delay_epochs must >= 0, got {delay_epochs}")
31 self.delay_epochs = delay_epochs
32 self.after_scheduler = after_scheduler
33 self.finished = False
34 super().__init__(optimizer, last_epoch)
35
36 def state_dict(self):
37 state_dict = {key: value for key, value in self.__dict__.items() if key not in "optimizer"}
38 if isinstance(state_dict["after_scheduler"], _LRScheduler):
39 state_dict["after_scheduler_type"] = type(state_dict["after_scheduler"]).__name__
40 state_dict["after_scheduler_dict"] = state_dict["after_scheduler"].state_dict()
41 del state_dict["after_scheduler"]
42 else:
43 raise NotImplementedError()
44 return state_dict
45
46 def get_lr(self):
47 if self.last_epoch >= self.delay_epochs:
48 if not self.finished:
49 self.after_scheduler.base_lrs = self.base_lrs
50 self.finished = True
51 with _enable_get_lr_call(self.after_scheduler):
52 return self.after_scheduler.get_lr()
53
54 return self.base_lrs
55
56 def step(self, epoch=None):
57 if self.finished:
58 if epoch is None:
59 self.after_scheduler.step(None)
60 self._last_lr = self.after_scheduler.get_last_lr()
61 else:
62 self.after_scheduler.step(epoch - self.delay_epochs)
63 self._last_lr = self.after_scheduler.get_last_lr()
64 else:
65 return super(DelayerScheduler, self).step(epoch)
66
67
68 class WarmupScheduler(_LRScheduler):
69 """Starts with a linear warmup lr schedule until it reaches N epochs then applies
70 the specific scheduler (For example: ReduceLROnPlateau).
71
72 Args:
73 optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
74 warmup_epochs (int): Number of epochs to linearly warmup lr until starting applying the scheduler.
75 after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.
76 last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
77 the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
78 """
79
80 def __init__(self, optimizer, warmup_epochs, after_scheduler, last_epoch=-1):
81 self.warmup_epochs = int(warmup_epochs)
82 self.after_scheduler = after_scheduler
83 self.finished = False
84 super().__init__(optimizer, last_epoch)
85
86 def state_dict(self):
87 state_dict = {key: value for key, value in self.__dict__.items() if key not in "optimizer"}
88 if isinstance(state_dict["after_scheduler"], _LRScheduler):
89 state_dict["after_scheduler_type"] = type(state_dict["after_scheduler"]).__name__
90 state_dict["after_scheduler_dict"] = state_dict["after_scheduler"].state_dict()
91 del state_dict["after_scheduler"]
92 else:
93 raise NotImplementedError()
94 return state_dict
95
96 def get_lr(self):
97 if self.last_epoch >= self.warmup_epochs:
98 if not self.finished:
99 self.after_scheduler.base_lrs = self.base_lrs
100 self.finished = True
101 return self.after_scheduler.get_lr()
102
103 return [(self.last_epoch + 1) / self.warmup_epochs * lr for lr in self.base_lrs]
104
105 def step(self, epoch=None):
106 if self.finished:
107 if epoch is None:
108 self.after_scheduler.step(None)
109 self._last_lr = self.after_scheduler.get_last_lr()
110 else:
111 self.after_scheduler.step(epoch - self.warmup_epochs)
112 self._last_lr = self.after_scheduler.get_last_lr()
113 else:
114 return super().step(epoch)
115
116
117 class WarmupDelayerScheduler(_LRScheduler):
118 """Starts with a linear warmup lr schedule until it reaches N epochs and a flat lr schedule
119 until it reaches M epochs then applies the specific scheduler (For example: ReduceLROnPlateau).
120
121 Args:
122 optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.
123 warmup_epochs (int): Number of epochs to linearly warmup lr until starting applying the scheduler.
124 delay_epochs (int): Number of epochs to keep the initial lr until starting applying the scheduler.
125 after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.
126 last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,
127 the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.
128 """
129
130 def __init__(self, optimizer, warmup_epochs, delay_epochs, after_scheduler, last_epoch=-1):
131 if delay_epochs < 0:
132 raise ValueError(f"delay_epochs must >= 0, got {delay_epochs}")
133 if warmup_epochs < 0:
134 raise ValueError(f"warmup_epochs must >= 0, got {warmup_epochs}")
135 self.warmup_epochs = warmup_epochs
136 self.delay_epochs = delay_epochs
137 self.after_scheduler = after_scheduler
138 self.finished = False
139 super().__init__(optimizer, last_epoch)
140
141 def state_dict(self):
142 state_dict = {key: value for key, value in self.__dict__.items() if key not in "optimizer"}
143 if isinstance(state_dict["after_scheduler"], _LRScheduler):
144 state_dict["after_scheduler_type"] = type(state_dict["after_scheduler"]).__name__
145 state_dict["after_scheduler_dict"] = state_dict["after_scheduler"].state_dict()
146 del state_dict["after_scheduler"]
147 else:
148 raise NotImplementedError()
149 return state_dict
150
151 def get_lr(self):
152 if self.last_epoch >= self.warmup_epochs + self.delay_epochs:
153 if not self.finished:
154 self.after_scheduler.base_lrs = self.base_lrs
155 # reset lr to base_lr
156 for group, base_lr in zip(self.optimizer.param_groups, self.base_lrs):
157 group["lr"] = base_lr
158 self.finished = True
159 with _enable_get_lr_call(self.after_scheduler):
160 return self.after_scheduler.get_lr()
161 elif self.last_epoch >= self.warmup_epochs:
162 return self.base_lrs
163
164 return [(self.last_epoch + 1) / self.warmup_epochs * lr for lr in self.base_lrs]
165
166 def step(self, epoch=None):
167 if self.finished:
168 if epoch is None:
169 self.after_scheduler.step(None)
170 self._last_lr = self.after_scheduler.get_last_lr()
171 else:
172 self.after_scheduler.step(epoch - self.warmup_epochs)
173 self._last_lr = self.after_scheduler.get_last_lr()
174 else:
175 return super().step(epoch)
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/nn/lr_scheduler/delayed.py b/colossalai/nn/lr_scheduler/delayed.py
--- a/colossalai/nn/lr_scheduler/delayed.py
+++ b/colossalai/nn/lr_scheduler/delayed.py
@@ -1,4 +1,10 @@
-from torch.optim.lr_scheduler import _LRScheduler
+import torch
+from packaging.version import Version
+
+if Version(torch.__version__) >= Version("2.0.0"):
+ from torch.optim.lr_scheduler import LRScheduler as _LRScheduler
+else:
+ from torch.optim.lr_scheduler import _LRScheduler
class _enable_get_lr_call:
| {"golden_diff": "diff --git a/colossalai/nn/lr_scheduler/delayed.py b/colossalai/nn/lr_scheduler/delayed.py\n--- a/colossalai/nn/lr_scheduler/delayed.py\n+++ b/colossalai/nn/lr_scheduler/delayed.py\n@@ -1,4 +1,10 @@\n-from torch.optim.lr_scheduler import _LRScheduler\n+import torch\n+from packaging.version import Version\n+\n+if Version(torch.__version__) >= Version(\"2.0.0\"):\n+ from torch.optim.lr_scheduler import LRScheduler as _LRScheduler\n+else:\n+ from torch.optim.lr_scheduler import _LRScheduler\n \n \n class _enable_get_lr_call:\n", "issue": "[BUG]: colossalai 0.3.3 + torch 2.0.1 + baichuan-2 7b \u8bad\u7ec3\u4fdd\u5b58 lr_scheduler \u65f6\u4f1a\u62a5 NotImplementedError \u9519\n### \ud83d\udc1b Describe the bug\r\n\r\n\u7528 colossalai 0.3.3 + torch 2.0.1 + baichuan-2 7b \u8bad\u7ec3\u4fdd\u5b58 lr_scheduler \u65f6 colossalai/nn/lr_scheduler/delayed.py \u4f1a\u62a5 NotImplementedError \u9519\u3002\r\n\r\nIn [25]: lr_scheduler\r\nOut[25]: <[colossalai.nn.lr](http://colossalai.nn.lr/)_scheduler.cosine.CosineAnnealingWarmupLR at 0x7f01cd616e00>\r\nIn [26]: booster.save_lr_scheduler(lr_scheduler, \"/data/checkpoint/lr_scheduler\")\r\n\r\n```\r\n in <module>:1 \r\n \r\n python3.10/site-packages/colossalai/booster/booster.py:308 in \r\n save_lr_scheduler \r\n \r\n 305 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster. \r\n 306 checkpoint (str): Path to the checkpoint. It must be a local file path. \r\n 307 \"\"\" \r\n \u2771 308 self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint) \r\n 309 \r\n 310 def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str) -> None: \r\n 311 \"\"\"Load lr scheduler from checkpoint. \r\n \r\n python3.10/site-packages/colossalai/booster/plugin/gemini_plugin.py:225 \r\n in save_lr_scheduler \r\n \r\n 222 Save model to checkpoint but only on master process. \r\n 223 \"\"\" \r\n 224 if self.coordinator.is_master(): \r\n \u2771 225 super().save_lr_scheduler(lr_scheduler, checkpoint) \r\n 226 \r\n 227 \r\n 228 class GeminiPlugin(DPPluginBase): \r\n \r\n python3.10/site-packages/colossalai/checkpoint_io/checkpoint_io_base.py: \r\n 318 in save_lr_scheduler \r\n \r\n 315 lr_scheduler (LRScheduler): lr scheduler to be saved. \r\n 316 checkpoint: checkpoint path. The checkpoint path can only be a file path. \r\n 317 \"\"\" \r\n \u2771 318 torch.save(lr_scheduler.state_dict(), checkpoint) \r\n 319 \r\n 320 def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str): \r\n 321 \"\"\" \r\n \r\n python3.10/site-packages/colossalai/nn/lr_scheduler/delayed.py:93 in \r\n state_dict \r\n \r\n 90 state_dict[\"after_scheduler_dict\"] = state_dict[\"after_scheduler\"].state_dic \r\n 91 del state_dict[\"after_scheduler\"] \r\n 92 else: \r\n \u2771 93 raise NotImplementedError() \r\n 94 return state_dict \r\n 95 \r\n 96 def get_lr(self):\r\n```\r\n\r\n\u8fdb\u4e00\u6b65\u5206\u6790 lr_scheduler \u91cc\u7684\u4fe1\u606f\r\n```\r\nstate_dict = {key: value for key, value in lr_scheduler.__dict__.items() if key not in \"optimizer\"}\r\n\r\n# =>\r\n{\r\n 'warmup_epochs': 2000,\r\n 'after_scheduler': <torch.optim.lr_scheduler.CosineAnnealingLR at 0x7f01cd6173a0>,\r\n 'finished': False,\r\n 'base_lrs': [0.0003],\r\n 'last_epoch': 1,\r\n 'verbose': False,\r\n '_step_count': 2,\r\n '_get_lr_called_within_step': False,\r\n '_last_lr': [3e-07]\r\n}\r\n```\r\n\r\n- \u5176\u4e2d after_scheduler \u662f torch.optim.lr_scheduler.CosineAnnealingLR \u7684\u5b9e\u4f8b\uff0c\u800c torch.optim.lr_scheduler.CosineAnnealingLR \u662f\u7ee7\u627f\u7684 LRScheduler\uff0c\u90a3\u4e48 after_scheduler \u7684\u7236\u7c7b\u662f LRScheduler\r\n\r\n- _LRScheduler \u662f\u7ee7\u627f\u4e86 LRScheduler\r\n\r\n- \u800c\u5728 [save lr scheduler \u65f6\uff08delayed.py) \u4e2d](https://github.com/hpcaitech/ColossalAI/blob/822051d8884a46d4d8626330e21adfd6427c99a0/colossalai/nn/lr_scheduler/delayed.py#L88)\uff0c\u662f `isinstance(state_dict['after_scheduler'], _LRScheduler)`\r\n\r\n```\r\nfrom torch.optim.lr_scheduler import _LRScheduler, LRScheduler\r\n\r\nisinstance(state_dict['after_scheduler'], LRScheduler)\r\n\r\n# => True\r\n\r\nisinstance(state_dict['after_scheduler'], _LRScheduler)\r\n\r\n# => False\r\n\r\n```\r\n\r\n**\u90a3\u8fd9\u6837\uff0c\u662f\u5426\u8bf4\u660e \u5e94\u8be5\u7528 `LRScheduler` \u800c\u4e0d\u662f `_LRScheduler` \u5462\uff1f**\r\n\r\n\r\n\u6ce8\uff1abaichuan-2 \u4f9d\u8d56 torch 2.0+\uff0c\u4e0d\u80fd\u964d\u5230 2.0 \u4ee5\u4e0b\uff08\u7528 1.13 \u4f1a\u62a5 TypeError: sdp_kernel() got an unexpected keyword argument 'enable_mem_efficient'\uff09\r\n\r\n### Environment\r\n\r\n- colossalai 0.3.3\r\n- torch 2.0.1\r\n- baichuan-2 7b \n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from torch.optim.lr_scheduler import _LRScheduler\n\n\nclass _enable_get_lr_call:\n def __init__(self, o):\n self.o = o\n\n def __enter__(self):\n self.o._get_lr_called_within_step = True\n return self\n\n def __exit__(self, type, value, traceback):\n self.o._get_lr_called_within_step = False\n\n\nclass DelayerScheduler(_LRScheduler):\n \"\"\"Starts with a flat lr schedule until it reaches N epochs then applies\n the specific scheduler (For example: ReduceLROnPlateau)\n\n Args:\n optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.\n delay_epochs (int): Number of epochs to keep the initial lr until starting applying the scheduler.\n after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.\n last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,\n the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.\n \"\"\"\n\n def __init__(self, optimizer, delay_epochs, after_scheduler, last_epoch=-1):\n if delay_epochs < 0:\n raise ValueError(f\"delay_epochs must >= 0, got {delay_epochs}\")\n self.delay_epochs = delay_epochs\n self.after_scheduler = after_scheduler\n self.finished = False\n super().__init__(optimizer, last_epoch)\n\n def state_dict(self):\n state_dict = {key: value for key, value in self.__dict__.items() if key not in \"optimizer\"}\n if isinstance(state_dict[\"after_scheduler\"], _LRScheduler):\n state_dict[\"after_scheduler_type\"] = type(state_dict[\"after_scheduler\"]).__name__\n state_dict[\"after_scheduler_dict\"] = state_dict[\"after_scheduler\"].state_dict()\n del state_dict[\"after_scheduler\"]\n else:\n raise NotImplementedError()\n return state_dict\n\n def get_lr(self):\n if self.last_epoch >= self.delay_epochs:\n if not self.finished:\n self.after_scheduler.base_lrs = self.base_lrs\n self.finished = True\n with _enable_get_lr_call(self.after_scheduler):\n return self.after_scheduler.get_lr()\n\n return self.base_lrs\n\n def step(self, epoch=None):\n if self.finished:\n if epoch is None:\n self.after_scheduler.step(None)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n self.after_scheduler.step(epoch - self.delay_epochs)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n return super(DelayerScheduler, self).step(epoch)\n\n\nclass WarmupScheduler(_LRScheduler):\n \"\"\"Starts with a linear warmup lr schedule until it reaches N epochs then applies\n the specific scheduler (For example: ReduceLROnPlateau).\n\n Args:\n optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.\n warmup_epochs (int): Number of epochs to linearly warmup lr until starting applying the scheduler.\n after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.\n last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,\n the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.\n \"\"\"\n\n def __init__(self, optimizer, warmup_epochs, after_scheduler, last_epoch=-1):\n self.warmup_epochs = int(warmup_epochs)\n self.after_scheduler = after_scheduler\n self.finished = False\n super().__init__(optimizer, last_epoch)\n\n def state_dict(self):\n state_dict = {key: value for key, value in self.__dict__.items() if key not in \"optimizer\"}\n if isinstance(state_dict[\"after_scheduler\"], _LRScheduler):\n state_dict[\"after_scheduler_type\"] = type(state_dict[\"after_scheduler\"]).__name__\n state_dict[\"after_scheduler_dict\"] = state_dict[\"after_scheduler\"].state_dict()\n del state_dict[\"after_scheduler\"]\n else:\n raise NotImplementedError()\n return state_dict\n\n def get_lr(self):\n if self.last_epoch >= self.warmup_epochs:\n if not self.finished:\n self.after_scheduler.base_lrs = self.base_lrs\n self.finished = True\n return self.after_scheduler.get_lr()\n\n return [(self.last_epoch + 1) / self.warmup_epochs * lr for lr in self.base_lrs]\n\n def step(self, epoch=None):\n if self.finished:\n if epoch is None:\n self.after_scheduler.step(None)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n self.after_scheduler.step(epoch - self.warmup_epochs)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n return super().step(epoch)\n\n\nclass WarmupDelayerScheduler(_LRScheduler):\n \"\"\"Starts with a linear warmup lr schedule until it reaches N epochs and a flat lr schedule\n until it reaches M epochs then applies the specific scheduler (For example: ReduceLROnPlateau).\n\n Args:\n optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.\n warmup_epochs (int): Number of epochs to linearly warmup lr until starting applying the scheduler.\n delay_epochs (int): Number of epochs to keep the initial lr until starting applying the scheduler.\n after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.\n last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,\n the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.\n \"\"\"\n\n def __init__(self, optimizer, warmup_epochs, delay_epochs, after_scheduler, last_epoch=-1):\n if delay_epochs < 0:\n raise ValueError(f\"delay_epochs must >= 0, got {delay_epochs}\")\n if warmup_epochs < 0:\n raise ValueError(f\"warmup_epochs must >= 0, got {warmup_epochs}\")\n self.warmup_epochs = warmup_epochs\n self.delay_epochs = delay_epochs\n self.after_scheduler = after_scheduler\n self.finished = False\n super().__init__(optimizer, last_epoch)\n\n def state_dict(self):\n state_dict = {key: value for key, value in self.__dict__.items() if key not in \"optimizer\"}\n if isinstance(state_dict[\"after_scheduler\"], _LRScheduler):\n state_dict[\"after_scheduler_type\"] = type(state_dict[\"after_scheduler\"]).__name__\n state_dict[\"after_scheduler_dict\"] = state_dict[\"after_scheduler\"].state_dict()\n del state_dict[\"after_scheduler\"]\n else:\n raise NotImplementedError()\n return state_dict\n\n def get_lr(self):\n if self.last_epoch >= self.warmup_epochs + self.delay_epochs:\n if not self.finished:\n self.after_scheduler.base_lrs = self.base_lrs\n # reset lr to base_lr\n for group, base_lr in zip(self.optimizer.param_groups, self.base_lrs):\n group[\"lr\"] = base_lr\n self.finished = True\n with _enable_get_lr_call(self.after_scheduler):\n return self.after_scheduler.get_lr()\n elif self.last_epoch >= self.warmup_epochs:\n return self.base_lrs\n\n return [(self.last_epoch + 1) / self.warmup_epochs * lr for lr in self.base_lrs]\n\n def step(self, epoch=None):\n if self.finished:\n if epoch is None:\n self.after_scheduler.step(None)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n self.after_scheduler.step(epoch - self.warmup_epochs)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n return super().step(epoch)\n", "path": "colossalai/nn/lr_scheduler/delayed.py"}], "after_files": [{"content": "import torch\nfrom packaging.version import Version\n\nif Version(torch.__version__) >= Version(\"2.0.0\"):\n from torch.optim.lr_scheduler import LRScheduler as _LRScheduler\nelse:\n from torch.optim.lr_scheduler import _LRScheduler\n\n\nclass _enable_get_lr_call:\n def __init__(self, o):\n self.o = o\n\n def __enter__(self):\n self.o._get_lr_called_within_step = True\n return self\n\n def __exit__(self, type, value, traceback):\n self.o._get_lr_called_within_step = False\n\n\nclass DelayerScheduler(_LRScheduler):\n \"\"\"Starts with a flat lr schedule until it reaches N epochs then applies\n the specific scheduler (For example: ReduceLROnPlateau)\n\n Args:\n optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.\n delay_epochs (int): Number of epochs to keep the initial lr until starting applying the scheduler.\n after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.\n last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,\n the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.\n \"\"\"\n\n def __init__(self, optimizer, delay_epochs, after_scheduler, last_epoch=-1):\n if delay_epochs < 0:\n raise ValueError(f\"delay_epochs must >= 0, got {delay_epochs}\")\n self.delay_epochs = delay_epochs\n self.after_scheduler = after_scheduler\n self.finished = False\n super().__init__(optimizer, last_epoch)\n\n def state_dict(self):\n state_dict = {key: value for key, value in self.__dict__.items() if key not in \"optimizer\"}\n if isinstance(state_dict[\"after_scheduler\"], _LRScheduler):\n state_dict[\"after_scheduler_type\"] = type(state_dict[\"after_scheduler\"]).__name__\n state_dict[\"after_scheduler_dict\"] = state_dict[\"after_scheduler\"].state_dict()\n del state_dict[\"after_scheduler\"]\n else:\n raise NotImplementedError()\n return state_dict\n\n def get_lr(self):\n if self.last_epoch >= self.delay_epochs:\n if not self.finished:\n self.after_scheduler.base_lrs = self.base_lrs\n self.finished = True\n with _enable_get_lr_call(self.after_scheduler):\n return self.after_scheduler.get_lr()\n\n return self.base_lrs\n\n def step(self, epoch=None):\n if self.finished:\n if epoch is None:\n self.after_scheduler.step(None)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n self.after_scheduler.step(epoch - self.delay_epochs)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n return super(DelayerScheduler, self).step(epoch)\n\n\nclass WarmupScheduler(_LRScheduler):\n \"\"\"Starts with a linear warmup lr schedule until it reaches N epochs then applies\n the specific scheduler (For example: ReduceLROnPlateau).\n\n Args:\n optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.\n warmup_epochs (int): Number of epochs to linearly warmup lr until starting applying the scheduler.\n after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.\n last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,\n the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.\n \"\"\"\n\n def __init__(self, optimizer, warmup_epochs, after_scheduler, last_epoch=-1):\n self.warmup_epochs = int(warmup_epochs)\n self.after_scheduler = after_scheduler\n self.finished = False\n super().__init__(optimizer, last_epoch)\n\n def state_dict(self):\n state_dict = {key: value for key, value in self.__dict__.items() if key not in \"optimizer\"}\n if isinstance(state_dict[\"after_scheduler\"], _LRScheduler):\n state_dict[\"after_scheduler_type\"] = type(state_dict[\"after_scheduler\"]).__name__\n state_dict[\"after_scheduler_dict\"] = state_dict[\"after_scheduler\"].state_dict()\n del state_dict[\"after_scheduler\"]\n else:\n raise NotImplementedError()\n return state_dict\n\n def get_lr(self):\n if self.last_epoch >= self.warmup_epochs:\n if not self.finished:\n self.after_scheduler.base_lrs = self.base_lrs\n self.finished = True\n return self.after_scheduler.get_lr()\n\n return [(self.last_epoch + 1) / self.warmup_epochs * lr for lr in self.base_lrs]\n\n def step(self, epoch=None):\n if self.finished:\n if epoch is None:\n self.after_scheduler.step(None)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n self.after_scheduler.step(epoch - self.warmup_epochs)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n return super().step(epoch)\n\n\nclass WarmupDelayerScheduler(_LRScheduler):\n \"\"\"Starts with a linear warmup lr schedule until it reaches N epochs and a flat lr schedule\n until it reaches M epochs then applies the specific scheduler (For example: ReduceLROnPlateau).\n\n Args:\n optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer.\n warmup_epochs (int): Number of epochs to linearly warmup lr until starting applying the scheduler.\n delay_epochs (int): Number of epochs to keep the initial lr until starting applying the scheduler.\n after_scheduler (:class:`torch.optim.lr_scheduler`): After target_epoch, use this scheduler.\n last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1,\n the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr.\n \"\"\"\n\n def __init__(self, optimizer, warmup_epochs, delay_epochs, after_scheduler, last_epoch=-1):\n if delay_epochs < 0:\n raise ValueError(f\"delay_epochs must >= 0, got {delay_epochs}\")\n if warmup_epochs < 0:\n raise ValueError(f\"warmup_epochs must >= 0, got {warmup_epochs}\")\n self.warmup_epochs = warmup_epochs\n self.delay_epochs = delay_epochs\n self.after_scheduler = after_scheduler\n self.finished = False\n super().__init__(optimizer, last_epoch)\n\n def state_dict(self):\n state_dict = {key: value for key, value in self.__dict__.items() if key not in \"optimizer\"}\n if isinstance(state_dict[\"after_scheduler\"], _LRScheduler):\n state_dict[\"after_scheduler_type\"] = type(state_dict[\"after_scheduler\"]).__name__\n state_dict[\"after_scheduler_dict\"] = state_dict[\"after_scheduler\"].state_dict()\n del state_dict[\"after_scheduler\"]\n else:\n raise NotImplementedError()\n return state_dict\n\n def get_lr(self):\n if self.last_epoch >= self.warmup_epochs + self.delay_epochs:\n if not self.finished:\n self.after_scheduler.base_lrs = self.base_lrs\n # reset lr to base_lr\n for group, base_lr in zip(self.optimizer.param_groups, self.base_lrs):\n group[\"lr\"] = base_lr\n self.finished = True\n with _enable_get_lr_call(self.after_scheduler):\n return self.after_scheduler.get_lr()\n elif self.last_epoch >= self.warmup_epochs:\n return self.base_lrs\n\n return [(self.last_epoch + 1) / self.warmup_epochs * lr for lr in self.base_lrs]\n\n def step(self, epoch=None):\n if self.finished:\n if epoch is None:\n self.after_scheduler.step(None)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n self.after_scheduler.step(epoch - self.warmup_epochs)\n self._last_lr = self.after_scheduler.get_last_lr()\n else:\n return super().step(epoch)\n", "path": "colossalai/nn/lr_scheduler/delayed.py"}]} | 3,606 | 155 |
gh_patches_debug_39412 | rasdani/github-patches | git_diff | encode__starlette-145 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop `StaticFile` app.
We have `FileResponse` and `StaticFiles`.
I think that including the `StaticFile` ASGI app complicates things unnecessarily, and that we should probably remove it.
* Drop `StaticFile` app.
* Put runtime checks that file exists, and file is a regular file in `FileResponse`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/staticfiles.py`
Content:
```
1 import os
2 import stat
3
4 from aiofiles.os import stat as aio_stat
5
6 from starlette.responses import PlainTextResponse, FileResponse, Response
7 from starlette.types import Send, Receive, Scope, ASGIInstance
8
9
10 class StaticFile:
11 def __init__(self, *, path: str) -> None:
12 self.path = path
13
14 def __call__(self, scope: Scope) -> ASGIInstance:
15 assert scope["type"] == "http"
16 if scope["method"] not in ("GET", "HEAD"):
17 return PlainTextResponse("Method Not Allowed", status_code=405)
18 return _StaticFileResponder(scope, path=self.path)
19
20
21 class StaticFiles:
22 def __init__(self, *, directory: str) -> None:
23 self.directory = directory
24 self.config_checked = False
25
26 def __call__(self, scope: Scope) -> ASGIInstance:
27 assert scope["type"] == "http"
28 if scope["method"] not in ("GET", "HEAD"):
29 return PlainTextResponse("Method Not Allowed", status_code=405)
30 path = os.path.normpath(os.path.join(*scope["path"].split("/")))
31 if path.startswith(".."):
32 return PlainTextResponse("Not Found", status_code=404)
33 path = os.path.join(self.directory, path)
34 if self.config_checked:
35 check_directory = None
36 else:
37 check_directory = self.directory
38 self.config_checked = True
39 return _StaticFilesResponder(scope, path=path, check_directory=check_directory)
40
41
42 class _StaticFileResponder:
43 def __init__(self, scope: Scope, path: str) -> None:
44 self.scope = scope
45 self.path = path
46
47 async def __call__(self, receive: Receive, send: Send) -> None:
48 try:
49 stat_result = await aio_stat(self.path)
50 except FileNotFoundError:
51 raise RuntimeError("StaticFile at path '%s' does not exist." % self.path)
52 else:
53 mode = stat_result.st_mode
54 if not stat.S_ISREG(mode):
55 raise RuntimeError("StaticFile at path '%s' is not a file." % self.path)
56
57 response = FileResponse(self.path, stat_result=stat_result)
58 await response(receive, send)
59
60
61 class _StaticFilesResponder:
62 def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:
63 self.scope = scope
64 self.path = path
65 self.check_directory = check_directory
66
67 async def check_directory_configured_correctly(self) -> None:
68 """
69 Perform a one-off configuration check that StaticFiles is actually
70 pointed at a directory, so that we can raise loud errors rather than
71 just returning 404 responses.
72 """
73 directory = self.check_directory
74 try:
75 stat_result = await aio_stat(directory)
76 except FileNotFoundError:
77 raise RuntimeError("StaticFiles directory '%s' does not exist." % directory)
78 if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):
79 raise RuntimeError("StaticFiles path '%s' is not a directory." % directory)
80
81 async def __call__(self, receive: Receive, send: Send) -> None:
82 if self.check_directory is not None:
83 await self.check_directory_configured_correctly()
84
85 try:
86 stat_result = await aio_stat(self.path)
87 except FileNotFoundError:
88 response = PlainTextResponse("Not Found", status_code=404) # type: Response
89 else:
90 mode = stat_result.st_mode
91 if not stat.S_ISREG(mode):
92 response = PlainTextResponse("Not Found", status_code=404)
93 else:
94 response = FileResponse(self.path, stat_result=stat_result)
95
96 await response(receive, send)
97
```
Path: `starlette/responses.py`
Content:
```
1 import hashlib
2 import os
3 import typing
4 import json
5
6 from email.utils import formatdate
7 from mimetypes import guess_type
8 from starlette.background import BackgroundTask
9 from starlette.datastructures import MutableHeaders, URL
10 from starlette.types import Receive, Send
11 from urllib.parse import quote_plus
12 import http.cookies
13
14 try:
15 import aiofiles
16 from aiofiles.os import stat as aio_stat
17 except ImportError: # pragma: nocover
18 aiofiles = None # type: ignore
19 aio_stat = None # type: ignore
20
21 try:
22 import ujson
23 except ImportError: # pragma: nocover
24 ujson = None # type: ignore
25
26
27 class Response:
28 media_type = None
29 charset = "utf-8"
30
31 def __init__(
32 self,
33 content: typing.Any,
34 status_code: int = 200,
35 headers: dict = None,
36 media_type: str = None,
37 background: BackgroundTask = None,
38 ) -> None:
39 self.body = self.render(content)
40 self.status_code = status_code
41 if media_type is not None:
42 self.media_type = media_type
43 self.background = background
44 self.init_headers(headers)
45
46 def render(self, content: typing.Any) -> bytes:
47 if isinstance(content, bytes):
48 return content
49 return content.encode(self.charset)
50
51 def init_headers(self, headers: typing.Mapping[str, str] = None) -> None:
52 if headers is None:
53 raw_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]
54 populate_content_length = True
55 populate_content_type = True
56 else:
57 raw_headers = [
58 (k.lower().encode("latin-1"), v.encode("latin-1"))
59 for k, v in headers.items()
60 ]
61 keys = [h[0] for h in raw_headers]
62 populate_content_length = b"content-length" in keys
63 populate_content_type = b"content-type" in keys
64
65 body = getattr(self, "body", None)
66 if body is not None and populate_content_length:
67 content_length = str(len(body))
68 raw_headers.append((b"content-length", content_length.encode("latin-1")))
69
70 content_type = self.media_type
71 if content_type is not None and populate_content_type:
72 if content_type.startswith("text/"):
73 content_type += "; charset=" + self.charset
74 raw_headers.append((b"content-type", content_type.encode("latin-1")))
75
76 self.raw_headers = raw_headers
77
78 @property
79 def headers(self) -> MutableHeaders:
80 if not hasattr(self, "_headers"):
81 self._headers = MutableHeaders(raw=self.raw_headers)
82 return self._headers
83
84 def set_cookie(
85 self,
86 key: str,
87 value: str = "",
88 max_age: int = None,
89 expires: int = None,
90 path: str = "/",
91 domain: str = None,
92 secure: bool = False,
93 httponly: bool = False,
94 ) -> None:
95 cookie = http.cookies.SimpleCookie()
96 cookie[key] = value
97 if max_age is not None:
98 cookie[key]["max-age"] = max_age # type: ignore
99 if expires is not None:
100 cookie[key]["expires"] = expires # type: ignore
101 if path is not None:
102 cookie[key]["path"] = path
103 if domain is not None:
104 cookie[key]["domain"] = domain
105 if secure:
106 cookie[key]["secure"] = True # type: ignore
107 if httponly:
108 cookie[key]["httponly"] = True # type: ignore
109 cookie_val = cookie.output(header="")
110 self.raw_headers.append((b"set-cookie", cookie_val.encode("latin-1")))
111
112 def delete_cookie(self, key: str, path: str = "/", domain: str = None) -> None:
113 self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
114
115 async def __call__(self, receive: Receive, send: Send) -> None:
116 await send(
117 {
118 "type": "http.response.start",
119 "status": self.status_code,
120 "headers": self.raw_headers,
121 }
122 )
123 await send({"type": "http.response.body", "body": self.body})
124
125 if self.background is not None:
126 await self.background()
127
128
129 class HTMLResponse(Response):
130 media_type = "text/html"
131
132
133 class PlainTextResponse(Response):
134 media_type = "text/plain"
135
136
137 class JSONResponse(Response):
138 media_type = "application/json"
139
140 def render(self, content: typing.Any) -> bytes:
141 return json.dumps(
142 content,
143 ensure_ascii=False,
144 allow_nan=False,
145 indent=None,
146 separators=(",", ":"),
147 ).encode("utf-8")
148
149
150 class UJSONResponse(JSONResponse):
151 media_type = "application/json"
152
153 def render(self, content: typing.Any) -> bytes:
154 return ujson.dumps(content, ensure_ascii=False).encode("utf-8")
155
156
157 class RedirectResponse(Response):
158 def __init__(
159 self, url: typing.Union[str, URL], status_code: int = 302, headers: dict = None
160 ) -> None:
161 super().__init__(content=b"", status_code=status_code, headers=headers)
162 self.headers["location"] = quote_plus(str(url), safe=":/#?&=@[]!$&'()*+,;")
163
164
165 class StreamingResponse(Response):
166 def __init__(
167 self,
168 content: typing.Any,
169 status_code: int = 200,
170 headers: dict = None,
171 media_type: str = None,
172 ) -> None:
173 self.body_iterator = content
174 self.status_code = status_code
175 self.media_type = self.media_type if media_type is None else media_type
176 self.init_headers(headers)
177
178 async def __call__(self, receive: Receive, send: Send) -> None:
179 await send(
180 {
181 "type": "http.response.start",
182 "status": self.status_code,
183 "headers": self.raw_headers,
184 }
185 )
186 async for chunk in self.body_iterator:
187 if not isinstance(chunk, bytes):
188 chunk = chunk.encode(self.charset)
189 await send({"type": "http.response.body", "body": chunk, "more_body": True})
190 await send({"type": "http.response.body", "body": b"", "more_body": False})
191
192
193 class FileResponse(Response):
194 chunk_size = 4096
195
196 def __init__(
197 self,
198 path: str,
199 headers: dict = None,
200 media_type: str = None,
201 filename: str = None,
202 stat_result: os.stat_result = None,
203 ) -> None:
204 assert aiofiles is not None, "'aiofiles' must be installed to use FileResponse"
205 self.path = path
206 self.status_code = 200
207 self.filename = filename
208 if media_type is None:
209 media_type = guess_type(filename or path)[0] or "text/plain"
210 self.media_type = media_type
211 self.init_headers(headers)
212 if self.filename is not None:
213 content_disposition = 'attachment; filename="{}"'.format(self.filename)
214 self.headers.setdefault("content-disposition", content_disposition)
215 self.stat_result = stat_result
216 if stat_result is not None:
217 self.set_stat_headers(stat_result)
218
219 def set_stat_headers(self, stat_result: os.stat_result) -> None:
220 content_length = str(stat_result.st_size)
221 last_modified = formatdate(stat_result.st_mtime, usegmt=True)
222 etag_base = str(stat_result.st_mtime) + "-" + str(stat_result.st_size)
223 etag = hashlib.md5(etag_base.encode()).hexdigest()
224 self.headers.setdefault("content-length", content_length)
225 self.headers.setdefault("last-modified", last_modified)
226 self.headers.setdefault("etag", etag)
227
228 async def __call__(self, receive: Receive, send: Send) -> None:
229 if self.stat_result is None:
230 stat_result = await aio_stat(self.path)
231 self.set_stat_headers(stat_result)
232 await send(
233 {
234 "type": "http.response.start",
235 "status": self.status_code,
236 "headers": self.raw_headers,
237 }
238 )
239 async with aiofiles.open(self.path, mode="rb") as file:
240 more_body = True
241 while more_body:
242 chunk = await file.read(self.chunk_size)
243 more_body = len(chunk) == self.chunk_size
244 await send(
245 {
246 "type": "http.response.body",
247 "body": chunk,
248 "more_body": more_body,
249 }
250 )
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/responses.py b/starlette/responses.py
--- a/starlette/responses.py
+++ b/starlette/responses.py
@@ -1,15 +1,16 @@
-import hashlib
import os
-import typing
import json
-
+import stat
+import typing
+import hashlib
+import http.cookies
from email.utils import formatdate
from mimetypes import guess_type
+from urllib.parse import quote_plus
+
from starlette.background import BackgroundTask
from starlette.datastructures import MutableHeaders, URL
from starlette.types import Receive, Send
-from urllib.parse import quote_plus
-import http.cookies
try:
import aiofiles
@@ -227,8 +228,15 @@
async def __call__(self, receive: Receive, send: Send) -> None:
if self.stat_result is None:
- stat_result = await aio_stat(self.path)
- self.set_stat_headers(stat_result)
+ try:
+ stat_result = await aio_stat(self.path)
+ self.set_stat_headers(stat_result)
+ except FileNotFoundError:
+ raise RuntimeError(f"File at path {self.path} does not exist.")
+ else:
+ mode = stat_result.st_mode
+ if not stat.S_ISREG(mode):
+ raise RuntimeError(f"File at path {self.path} is not a file.")
await send(
{
"type": "http.response.start",
diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -7,17 +7,6 @@
from starlette.types import Send, Receive, Scope, ASGIInstance
-class StaticFile:
- def __init__(self, *, path: str) -> None:
- self.path = path
-
- def __call__(self, scope: Scope) -> ASGIInstance:
- assert scope["type"] == "http"
- if scope["method"] not in ("GET", "HEAD"):
- return PlainTextResponse("Method Not Allowed", status_code=405)
- return _StaticFileResponder(scope, path=self.path)
-
-
class StaticFiles:
def __init__(self, *, directory: str) -> None:
self.directory = directory
@@ -39,25 +28,6 @@
return _StaticFilesResponder(scope, path=path, check_directory=check_directory)
-class _StaticFileResponder:
- def __init__(self, scope: Scope, path: str) -> None:
- self.scope = scope
- self.path = path
-
- async def __call__(self, receive: Receive, send: Send) -> None:
- try:
- stat_result = await aio_stat(self.path)
- except FileNotFoundError:
- raise RuntimeError("StaticFile at path '%s' does not exist." % self.path)
- else:
- mode = stat_result.st_mode
- if not stat.S_ISREG(mode):
- raise RuntimeError("StaticFile at path '%s' is not a file." % self.path)
-
- response = FileResponse(self.path, stat_result=stat_result)
- await response(receive, send)
-
-
class _StaticFilesResponder:
def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:
self.scope = scope
| {"golden_diff": "diff --git a/starlette/responses.py b/starlette/responses.py\n--- a/starlette/responses.py\n+++ b/starlette/responses.py\n@@ -1,15 +1,16 @@\n-import hashlib\n import os\n-import typing\n import json\n-\n+import stat\n+import typing\n+import hashlib\n+import http.cookies\n from email.utils import formatdate\n from mimetypes import guess_type\n+from urllib.parse import quote_plus\n+\n from starlette.background import BackgroundTask\n from starlette.datastructures import MutableHeaders, URL\n from starlette.types import Receive, Send\n-from urllib.parse import quote_plus\n-import http.cookies\n \n try:\n import aiofiles\n@@ -227,8 +228,15 @@\n \n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.stat_result is None:\n- stat_result = await aio_stat(self.path)\n- self.set_stat_headers(stat_result)\n+ try:\n+ stat_result = await aio_stat(self.path)\n+ self.set_stat_headers(stat_result)\n+ except FileNotFoundError:\n+ raise RuntimeError(f\"File at path {self.path} does not exist.\")\n+ else:\n+ mode = stat_result.st_mode\n+ if not stat.S_ISREG(mode):\n+ raise RuntimeError(f\"File at path {self.path} is not a file.\")\n await send(\n {\n \"type\": \"http.response.start\",\ndiff --git a/starlette/staticfiles.py b/starlette/staticfiles.py\n--- a/starlette/staticfiles.py\n+++ b/starlette/staticfiles.py\n@@ -7,17 +7,6 @@\n from starlette.types import Send, Receive, Scope, ASGIInstance\n \n \n-class StaticFile:\n- def __init__(self, *, path: str) -> None:\n- self.path = path\n-\n- def __call__(self, scope: Scope) -> ASGIInstance:\n- assert scope[\"type\"] == \"http\"\n- if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n- return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n- return _StaticFileResponder(scope, path=self.path)\n-\n-\n class StaticFiles:\n def __init__(self, *, directory: str) -> None:\n self.directory = directory\n@@ -39,25 +28,6 @@\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n \n \n-class _StaticFileResponder:\n- def __init__(self, scope: Scope, path: str) -> None:\n- self.scope = scope\n- self.path = path\n-\n- async def __call__(self, receive: Receive, send: Send) -> None:\n- try:\n- stat_result = await aio_stat(self.path)\n- except FileNotFoundError:\n- raise RuntimeError(\"StaticFile at path '%s' does not exist.\" % self.path)\n- else:\n- mode = stat_result.st_mode\n- if not stat.S_ISREG(mode):\n- raise RuntimeError(\"StaticFile at path '%s' is not a file.\" % self.path)\n-\n- response = FileResponse(self.path, stat_result=stat_result)\n- await response(receive, send)\n-\n-\n class _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n", "issue": "Drop `StaticFile` app.\nWe have `FileResponse` and `StaticFiles`.\r\n\r\nI think that including the `StaticFile` ASGI app complicates things unnecessarily, and that we should probably remove it.\r\n\r\n* Drop `StaticFile` app.\r\n* Put runtime checks that file exists, and file is a regular file in `FileResponse`.\n", "before_files": [{"content": "import os\nimport stat\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.responses import PlainTextResponse, FileResponse, Response\nfrom starlette.types import Send, Receive, Scope, ASGIInstance\n\n\nclass StaticFile:\n def __init__(self, *, path: str) -> None:\n self.path = path\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n return _StaticFileResponder(scope, path=self.path)\n\n\nclass StaticFiles:\n def __init__(self, *, directory: str) -> None:\n self.directory = directory\n self.config_checked = False\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n path = os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n if path.startswith(\"..\"):\n return PlainTextResponse(\"Not Found\", status_code=404)\n path = os.path.join(self.directory, path)\n if self.config_checked:\n check_directory = None\n else:\n check_directory = self.directory\n self.config_checked = True\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n\n\nclass _StaticFileResponder:\n def __init__(self, scope: Scope, path: str) -> None:\n self.scope = scope\n self.path = path\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFile at path '%s' does not exist.\" % self.path)\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n raise RuntimeError(\"StaticFile at path '%s' is not a file.\" % self.path)\n\n response = FileResponse(self.path, stat_result=stat_result)\n await response(receive, send)\n\n\nclass _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n self.path = path\n self.check_directory = check_directory\n\n async def check_directory_configured_correctly(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n directory = self.check_directory\n try:\n stat_result = await aio_stat(directory)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFiles directory '%s' does not exist.\" % directory)\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\"StaticFiles path '%s' is not a directory.\" % directory)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.check_directory is not None:\n await self.check_directory_configured_correctly()\n\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n response = PlainTextResponse(\"Not Found\", status_code=404) # type: Response\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n response = PlainTextResponse(\"Not Found\", status_code=404)\n else:\n response = FileResponse(self.path, stat_result=stat_result)\n\n await response(receive, send)\n", "path": "starlette/staticfiles.py"}, {"content": "import hashlib\nimport os\nimport typing\nimport json\n\nfrom email.utils import formatdate\nfrom mimetypes import guess_type\nfrom starlette.background import BackgroundTask\nfrom starlette.datastructures import MutableHeaders, URL\nfrom starlette.types import Receive, Send\nfrom urllib.parse import quote_plus\nimport http.cookies\n\ntry:\n import aiofiles\n from aiofiles.os import stat as aio_stat\nexcept ImportError: # pragma: nocover\n aiofiles = None # type: ignore\n aio_stat = None # type: ignore\n\ntry:\n import ujson\nexcept ImportError: # pragma: nocover\n ujson = None # type: ignore\n\n\nclass Response:\n media_type = None\n charset = \"utf-8\"\n\n def __init__(\n self,\n content: typing.Any,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ) -> None:\n self.body = self.render(content)\n self.status_code = status_code\n if media_type is not None:\n self.media_type = media_type\n self.background = background\n self.init_headers(headers)\n\n def render(self, content: typing.Any) -> bytes:\n if isinstance(content, bytes):\n return content\n return content.encode(self.charset)\n\n def init_headers(self, headers: typing.Mapping[str, str] = None) -> None:\n if headers is None:\n raw_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n populate_content_length = True\n populate_content_type = True\n else:\n raw_headers = [\n (k.lower().encode(\"latin-1\"), v.encode(\"latin-1\"))\n for k, v in headers.items()\n ]\n keys = [h[0] for h in raw_headers]\n populate_content_length = b\"content-length\" in keys\n populate_content_type = b\"content-type\" in keys\n\n body = getattr(self, \"body\", None)\n if body is not None and populate_content_length:\n content_length = str(len(body))\n raw_headers.append((b\"content-length\", content_length.encode(\"latin-1\")))\n\n content_type = self.media_type\n if content_type is not None and populate_content_type:\n if content_type.startswith(\"text/\"):\n content_type += \"; charset=\" + self.charset\n raw_headers.append((b\"content-type\", content_type.encode(\"latin-1\")))\n\n self.raw_headers = raw_headers\n\n @property\n def headers(self) -> MutableHeaders:\n if not hasattr(self, \"_headers\"):\n self._headers = MutableHeaders(raw=self.raw_headers)\n return self._headers\n\n def set_cookie(\n self,\n key: str,\n value: str = \"\",\n max_age: int = None,\n expires: int = None,\n path: str = \"/\",\n domain: str = None,\n secure: bool = False,\n httponly: bool = False,\n ) -> None:\n cookie = http.cookies.SimpleCookie()\n cookie[key] = value\n if max_age is not None:\n cookie[key][\"max-age\"] = max_age # type: ignore\n if expires is not None:\n cookie[key][\"expires\"] = expires # type: ignore\n if path is not None:\n cookie[key][\"path\"] = path\n if domain is not None:\n cookie[key][\"domain\"] = domain\n if secure:\n cookie[key][\"secure\"] = True # type: ignore\n if httponly:\n cookie[key][\"httponly\"] = True # type: ignore\n cookie_val = cookie.output(header=\"\")\n self.raw_headers.append((b\"set-cookie\", cookie_val.encode(\"latin-1\")))\n\n def delete_cookie(self, key: str, path: str = \"/\", domain: str = None) -> None:\n self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n await send({\"type\": \"http.response.body\", \"body\": self.body})\n\n if self.background is not None:\n await self.background()\n\n\nclass HTMLResponse(Response):\n media_type = \"text/html\"\n\n\nclass PlainTextResponse(Response):\n media_type = \"text/plain\"\n\n\nclass JSONResponse(Response):\n media_type = \"application/json\"\n\n def render(self, content: typing.Any) -> bytes:\n return json.dumps(\n content,\n ensure_ascii=False,\n allow_nan=False,\n indent=None,\n separators=(\",\", \":\"),\n ).encode(\"utf-8\")\n\n\nclass UJSONResponse(JSONResponse):\n media_type = \"application/json\"\n\n def render(self, content: typing.Any) -> bytes:\n return ujson.dumps(content, ensure_ascii=False).encode(\"utf-8\")\n\n\nclass RedirectResponse(Response):\n def __init__(\n self, url: typing.Union[str, URL], status_code: int = 302, headers: dict = None\n ) -> None:\n super().__init__(content=b\"\", status_code=status_code, headers=headers)\n self.headers[\"location\"] = quote_plus(str(url), safe=\":/#?&=@[]!$&'()*+,;\")\n\n\nclass StreamingResponse(Response):\n def __init__(\n self,\n content: typing.Any,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n ) -> None:\n self.body_iterator = content\n self.status_code = status_code\n self.media_type = self.media_type if media_type is None else media_type\n self.init_headers(headers)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n async for chunk in self.body_iterator:\n if not isinstance(chunk, bytes):\n chunk = chunk.encode(self.charset)\n await send({\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True})\n await send({\"type\": \"http.response.body\", \"body\": b\"\", \"more_body\": False})\n\n\nclass FileResponse(Response):\n chunk_size = 4096\n\n def __init__(\n self,\n path: str,\n headers: dict = None,\n media_type: str = None,\n filename: str = None,\n stat_result: os.stat_result = None,\n ) -> None:\n assert aiofiles is not None, \"'aiofiles' must be installed to use FileResponse\"\n self.path = path\n self.status_code = 200\n self.filename = filename\n if media_type is None:\n media_type = guess_type(filename or path)[0] or \"text/plain\"\n self.media_type = media_type\n self.init_headers(headers)\n if self.filename is not None:\n content_disposition = 'attachment; filename=\"{}\"'.format(self.filename)\n self.headers.setdefault(\"content-disposition\", content_disposition)\n self.stat_result = stat_result\n if stat_result is not None:\n self.set_stat_headers(stat_result)\n\n def set_stat_headers(self, stat_result: os.stat_result) -> None:\n content_length = str(stat_result.st_size)\n last_modified = formatdate(stat_result.st_mtime, usegmt=True)\n etag_base = str(stat_result.st_mtime) + \"-\" + str(stat_result.st_size)\n etag = hashlib.md5(etag_base.encode()).hexdigest()\n self.headers.setdefault(\"content-length\", content_length)\n self.headers.setdefault(\"last-modified\", last_modified)\n self.headers.setdefault(\"etag\", etag)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.stat_result is None:\n stat_result = await aio_stat(self.path)\n self.set_stat_headers(stat_result)\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n async with aiofiles.open(self.path, mode=\"rb\") as file:\n more_body = True\n while more_body:\n chunk = await file.read(self.chunk_size)\n more_body = len(chunk) == self.chunk_size\n await send(\n {\n \"type\": \"http.response.body\",\n \"body\": chunk,\n \"more_body\": more_body,\n }\n )\n", "path": "starlette/responses.py"}], "after_files": [{"content": "import os\nimport stat\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.responses import PlainTextResponse, FileResponse, Response\nfrom starlette.types import Send, Receive, Scope, ASGIInstance\n\n\nclass StaticFiles:\n def __init__(self, *, directory: str) -> None:\n self.directory = directory\n self.config_checked = False\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n assert scope[\"type\"] == \"http\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n path = os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n if path.startswith(\"..\"):\n return PlainTextResponse(\"Not Found\", status_code=404)\n path = os.path.join(self.directory, path)\n if self.config_checked:\n check_directory = None\n else:\n check_directory = self.directory\n self.config_checked = True\n return _StaticFilesResponder(scope, path=path, check_directory=check_directory)\n\n\nclass _StaticFilesResponder:\n def __init__(self, scope: Scope, path: str, check_directory: str = None) -> None:\n self.scope = scope\n self.path = path\n self.check_directory = check_directory\n\n async def check_directory_configured_correctly(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n directory = self.check_directory\n try:\n stat_result = await aio_stat(directory)\n except FileNotFoundError:\n raise RuntimeError(\"StaticFiles directory '%s' does not exist.\" % directory)\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\"StaticFiles path '%s' is not a directory.\" % directory)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.check_directory is not None:\n await self.check_directory_configured_correctly()\n\n try:\n stat_result = await aio_stat(self.path)\n except FileNotFoundError:\n response = PlainTextResponse(\"Not Found\", status_code=404) # type: Response\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n response = PlainTextResponse(\"Not Found\", status_code=404)\n else:\n response = FileResponse(self.path, stat_result=stat_result)\n\n await response(receive, send)\n", "path": "starlette/staticfiles.py"}, {"content": "import os\nimport json\nimport stat\nimport typing\nimport hashlib\nimport http.cookies\nfrom email.utils import formatdate\nfrom mimetypes import guess_type\nfrom urllib.parse import quote_plus\n\nfrom starlette.background import BackgroundTask\nfrom starlette.datastructures import MutableHeaders, URL\nfrom starlette.types import Receive, Send\n\ntry:\n import aiofiles\n from aiofiles.os import stat as aio_stat\nexcept ImportError: # pragma: nocover\n aiofiles = None # type: ignore\n aio_stat = None # type: ignore\n\ntry:\n import ujson\nexcept ImportError: # pragma: nocover\n ujson = None # type: ignore\n\n\nclass Response:\n media_type = None\n charset = \"utf-8\"\n\n def __init__(\n self,\n content: typing.Any,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ) -> None:\n self.body = self.render(content)\n self.status_code = status_code\n if media_type is not None:\n self.media_type = media_type\n self.background = background\n self.init_headers(headers)\n\n def render(self, content: typing.Any) -> bytes:\n if isinstance(content, bytes):\n return content\n return content.encode(self.charset)\n\n def init_headers(self, headers: typing.Mapping[str, str] = None) -> None:\n if headers is None:\n raw_headers = [] # type: typing.List[typing.Tuple[bytes, bytes]]\n populate_content_length = True\n populate_content_type = True\n else:\n raw_headers = [\n (k.lower().encode(\"latin-1\"), v.encode(\"latin-1\"))\n for k, v in headers.items()\n ]\n keys = [h[0] for h in raw_headers]\n populate_content_length = b\"content-length\" in keys\n populate_content_type = b\"content-type\" in keys\n\n body = getattr(self, \"body\", None)\n if body is not None and populate_content_length:\n content_length = str(len(body))\n raw_headers.append((b\"content-length\", content_length.encode(\"latin-1\")))\n\n content_type = self.media_type\n if content_type is not None and populate_content_type:\n if content_type.startswith(\"text/\"):\n content_type += \"; charset=\" + self.charset\n raw_headers.append((b\"content-type\", content_type.encode(\"latin-1\")))\n\n self.raw_headers = raw_headers\n\n @property\n def headers(self) -> MutableHeaders:\n if not hasattr(self, \"_headers\"):\n self._headers = MutableHeaders(raw=self.raw_headers)\n return self._headers\n\n def set_cookie(\n self,\n key: str,\n value: str = \"\",\n max_age: int = None,\n expires: int = None,\n path: str = \"/\",\n domain: str = None,\n secure: bool = False,\n httponly: bool = False,\n ) -> None:\n cookie = http.cookies.SimpleCookie()\n cookie[key] = value\n if max_age is not None:\n cookie[key][\"max-age\"] = max_age # type: ignore\n if expires is not None:\n cookie[key][\"expires\"] = expires # type: ignore\n if path is not None:\n cookie[key][\"path\"] = path\n if domain is not None:\n cookie[key][\"domain\"] = domain\n if secure:\n cookie[key][\"secure\"] = True # type: ignore\n if httponly:\n cookie[key][\"httponly\"] = True # type: ignore\n cookie_val = cookie.output(header=\"\")\n self.raw_headers.append((b\"set-cookie\", cookie_val.encode(\"latin-1\")))\n\n def delete_cookie(self, key: str, path: str = \"/\", domain: str = None) -> None:\n self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n await send({\"type\": \"http.response.body\", \"body\": self.body})\n\n if self.background is not None:\n await self.background()\n\n\nclass HTMLResponse(Response):\n media_type = \"text/html\"\n\n\nclass PlainTextResponse(Response):\n media_type = \"text/plain\"\n\n\nclass JSONResponse(Response):\n media_type = \"application/json\"\n\n def render(self, content: typing.Any) -> bytes:\n return json.dumps(\n content,\n ensure_ascii=False,\n allow_nan=False,\n indent=None,\n separators=(\",\", \":\"),\n ).encode(\"utf-8\")\n\n\nclass UJSONResponse(JSONResponse):\n media_type = \"application/json\"\n\n def render(self, content: typing.Any) -> bytes:\n return ujson.dumps(content, ensure_ascii=False).encode(\"utf-8\")\n\n\nclass RedirectResponse(Response):\n def __init__(\n self, url: typing.Union[str, URL], status_code: int = 302, headers: dict = None\n ) -> None:\n super().__init__(content=b\"\", status_code=status_code, headers=headers)\n self.headers[\"location\"] = quote_plus(str(url), safe=\":/#?&=@[]!$&'()*+,;\")\n\n\nclass StreamingResponse(Response):\n def __init__(\n self,\n content: typing.Any,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n ) -> None:\n self.body_iterator = content\n self.status_code = status_code\n self.media_type = self.media_type if media_type is None else media_type\n self.init_headers(headers)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n async for chunk in self.body_iterator:\n if not isinstance(chunk, bytes):\n chunk = chunk.encode(self.charset)\n await send({\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True})\n await send({\"type\": \"http.response.body\", \"body\": b\"\", \"more_body\": False})\n\n\nclass FileResponse(Response):\n chunk_size = 4096\n\n def __init__(\n self,\n path: str,\n headers: dict = None,\n media_type: str = None,\n filename: str = None,\n stat_result: os.stat_result = None,\n ) -> None:\n assert aiofiles is not None, \"'aiofiles' must be installed to use FileResponse\"\n self.path = path\n self.status_code = 200\n self.filename = filename\n if media_type is None:\n media_type = guess_type(filename or path)[0] or \"text/plain\"\n self.media_type = media_type\n self.init_headers(headers)\n if self.filename is not None:\n content_disposition = 'attachment; filename=\"{}\"'.format(self.filename)\n self.headers.setdefault(\"content-disposition\", content_disposition)\n self.stat_result = stat_result\n if stat_result is not None:\n self.set_stat_headers(stat_result)\n\n def set_stat_headers(self, stat_result: os.stat_result) -> None:\n content_length = str(stat_result.st_size)\n last_modified = formatdate(stat_result.st_mtime, usegmt=True)\n etag_base = str(stat_result.st_mtime) + \"-\" + str(stat_result.st_size)\n etag = hashlib.md5(etag_base.encode()).hexdigest()\n self.headers.setdefault(\"content-length\", content_length)\n self.headers.setdefault(\"last-modified\", last_modified)\n self.headers.setdefault(\"etag\", etag)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.stat_result is None:\n try:\n stat_result = await aio_stat(self.path)\n self.set_stat_headers(stat_result)\n except FileNotFoundError:\n raise RuntimeError(f\"File at path {self.path} does not exist.\")\n else:\n mode = stat_result.st_mode\n if not stat.S_ISREG(mode):\n raise RuntimeError(f\"File at path {self.path} is not a file.\")\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n async with aiofiles.open(self.path, mode=\"rb\") as file:\n more_body = True\n while more_body:\n chunk = await file.read(self.chunk_size)\n more_body = len(chunk) == self.chunk_size\n await send(\n {\n \"type\": \"http.response.body\",\n \"body\": chunk,\n \"more_body\": more_body,\n }\n )\n", "path": "starlette/responses.py"}]} | 3,884 | 735 |
gh_patches_debug_35227 | rasdani/github-patches | git_diff | openshift__openshift-ansible-3055 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
openshift_image_tag=latest broken again
https://github.com/openshift/openshift-ansible/pull/2882 allowed for `openshift_image_tag=latest`.
I think is was broken shortly thereafter with https://github.com/openshift/openshift-ansible/pull/2855 afaict
```
deployment_type=origin
openshift_image_tag=latest
```
```
TASK [openshift_master_facts : set_fact] ***************************************
fatal: [origin-master.local.variantweb.net]: FAILED! => {"failed": true, "msg": "Unknown short_version atest"}
```
Looks like a code path is assuming the first character of the image tag is 'v' and removing it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py`
Content:
```
1 # pylint: disable=missing-docstring
2
3 import re
4 from ansible.errors import AnsibleError
5 from ansible.plugins.lookup import LookupBase
6
7
8 class LookupModule(LookupBase):
9 # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
10
11 def run(self, terms, variables=None, zones_enabled=True, short_version=None,
12 deployment_type=None, **kwargs):
13
14 priorities = [
15 {'name': 'LeastRequestedPriority', 'weight': 1},
16 {'name': 'BalancedResourceAllocation', 'weight': 1},
17 {'name': 'SelectorSpreadPriority', 'weight': 1}
18 ]
19
20 if short_version is None or deployment_type is None:
21 if 'openshift' not in variables:
22 raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
23
24 if deployment_type is None:
25 if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
26 raise AnsibleError("This lookup module requires that the deployment_type be set")
27
28 deployment_type = variables['openshift']['common']['deployment_type']
29
30 if short_version is None:
31 if 'short_version' in variables['openshift']['common']:
32 short_version = variables['openshift']['common']['short_version']
33 elif 'openshift_release' in variables:
34 release = variables['openshift_release']
35 if release.startswith('v'):
36 short_version = release[1:]
37 else:
38 short_version = release
39 short_version = '.'.join(short_version.split('.')[0:2])
40 elif 'openshift_version' in variables:
41 version = variables['openshift_version']
42 short_version = '.'.join(version.split('.')[0:2])
43 else:
44 # pylint: disable=line-too-long
45 raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
46
47 if deployment_type == 'origin':
48 if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:
49 raise AnsibleError("Unknown short_version %s" % short_version)
50 elif deployment_type == 'openshift-enterprise':
51 if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:
52 raise AnsibleError("Unknown short_version %s" % short_version)
53 else:
54 raise AnsibleError("Unknown deployment_type %s" % deployment_type)
55
56 if deployment_type == 'openshift-enterprise':
57 # convert short_version to origin short_version
58 short_version = re.sub('^3.', '1.', short_version)
59
60 if short_version == '1.4':
61 priorities.append({'name': 'NodePreferAvoidPodsPriority', 'weight': 10000})
62
63 # only 1.1 didn't include NodeAffinityPriority
64 if short_version != '1.1':
65 priorities.append({'name': 'NodeAffinityPriority', 'weight': 1})
66
67 if short_version not in ['1.1', '1.2']:
68 priorities.append({'name': 'TaintTolerationPriority', 'weight': 1})
69
70 if short_version not in ['1.1', '1.2', '1.3']:
71 priorities.append({'name': 'InterPodAffinityPriority', 'weight': 1})
72
73 if zones_enabled:
74 zone_priority = {
75 'name': 'Zone',
76 'argument': {
77 'serviceAntiAffinity': {
78 'label': 'zone'
79 }
80 },
81 'weight': 2
82 }
83 priorities.append(zone_priority)
84
85 return priorities
86
```
Path: `roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py`
Content:
```
1 # pylint: disable=missing-docstring
2
3 import re
4 from ansible.errors import AnsibleError
5 from ansible.plugins.lookup import LookupBase
6
7
8 class LookupModule(LookupBase):
9 # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
10
11 def run(self, terms, variables=None, regions_enabled=True, short_version=None,
12 deployment_type=None, **kwargs):
13
14 predicates = []
15
16 if short_version is None or deployment_type is None:
17 if 'openshift' not in variables:
18 raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
19
20 if deployment_type is None:
21 if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
22 raise AnsibleError("This lookup module requires that the deployment_type be set")
23
24 deployment_type = variables['openshift']['common']['deployment_type']
25
26 if short_version is None:
27 if 'short_version' in variables['openshift']['common']:
28 short_version = variables['openshift']['common']['short_version']
29 elif 'openshift_release' in variables:
30 release = variables['openshift_release']
31 if release.startswith('v'):
32 short_version = release[1:]
33 else:
34 short_version = release
35 short_version = '.'.join(short_version.split('.')[0:2])
36 elif 'openshift_version' in variables:
37 version = variables['openshift_version']
38 short_version = '.'.join(version.split('.')[0:2])
39 else:
40 # pylint: disable=line-too-long
41 raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
42 if deployment_type == 'origin':
43 if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:
44 raise AnsibleError("Unknown short_version %s" % short_version)
45 elif deployment_type == 'openshift-enterprise':
46 if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:
47 raise AnsibleError("Unknown short_version %s" % short_version)
48 else:
49 raise AnsibleError("Unknown deployment_type %s" % deployment_type)
50
51 if deployment_type == 'openshift-enterprise':
52 # convert short_version to origin short_version
53 short_version = re.sub('^3.', '1.', short_version)
54
55 if short_version in ['1.1', '1.2']:
56 predicates.append({'name': 'PodFitsHostPorts'})
57 predicates.append({'name': 'PodFitsResources'})
58
59 # applies to all known versions
60 predicates.append({'name': 'NoDiskConflict'})
61
62 # only 1.1 didn't include NoVolumeZoneConflict
63 if short_version != '1.1':
64 predicates.append({'name': 'NoVolumeZoneConflict'})
65
66 if short_version in ['1.1', '1.2']:
67 predicates.append({'name': 'MatchNodeSelector'})
68
69 if short_version != '1.1':
70 predicates.append({'name': 'MaxEBSVolumeCount'})
71 predicates.append({'name': 'MaxGCEPDVolumeCount'})
72
73 if short_version not in ['1.1', '1.2']:
74 predicates.append({'name': 'GeneralPredicates'})
75 predicates.append({'name': 'PodToleratesNodeTaints'})
76 predicates.append({'name': 'CheckNodeMemoryPressure'})
77
78 if short_version not in ['1.1', '1.2', '1.3']:
79 predicates.append({'name': 'CheckNodeDiskPressure'})
80 predicates.append({'name': 'MatchInterPodAffinity'})
81
82 if regions_enabled:
83 region_predicate = {
84 'name': 'Region',
85 'argument': {
86 'serviceAffinity': {
87 'labels': ['region']
88 }
89 }
90 }
91 predicates.append(region_predicate)
92
93 return predicates
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -40,10 +40,10 @@
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
- if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -45,10 +45,10 @@
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
- if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
| {"golden_diff": "diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py\n--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py\n+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py\n@@ -40,10 +40,10 @@\n # pylint: disable=line-too-long\n raise AnsibleError(\"Either OpenShift needs to be installed or openshift_release needs to be specified\")\n if deployment_type == 'origin':\n- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:\n+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n elif deployment_type == 'openshift-enterprise':\n- if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:\n+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n else:\n raise AnsibleError(\"Unknown deployment_type %s\" % deployment_type)\ndiff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py\n--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py\n+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py\n@@ -45,10 +45,10 @@\n raise AnsibleError(\"Either OpenShift needs to be installed or openshift_release needs to be specified\")\n \n if deployment_type == 'origin':\n- if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:\n+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n elif deployment_type == 'openshift-enterprise':\n- if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:\n+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n else:\n raise AnsibleError(\"Unknown deployment_type %s\" % deployment_type)\n", "issue": "openshift_image_tag=latest broken again\nhttps://github.com/openshift/openshift-ansible/pull/2882 allowed for `openshift_image_tag=latest`.\r\nI think is was broken shortly thereafter with https://github.com/openshift/openshift-ansible/pull/2855 afaict\r\n\r\n```\r\ndeployment_type=origin\r\nopenshift_image_tag=latest\r\n```\r\n\r\n```\r\nTASK [openshift_master_facts : set_fact] ***************************************\r\nfatal: [origin-master.local.variantweb.net]: FAILED! => {\"failed\": true, \"msg\": \"Unknown short_version atest\"}\r\n```\r\n\r\nLooks like a code path is assuming the first character of the image tag is 'v' and removing it.\n", "before_files": [{"content": "# pylint: disable=missing-docstring\n\nimport re\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass LookupModule(LookupBase):\n # pylint: disable=too-many-branches,too-many-statements,too-many-arguments\n\n def run(self, terms, variables=None, zones_enabled=True, short_version=None,\n deployment_type=None, **kwargs):\n\n priorities = [\n {'name': 'LeastRequestedPriority', 'weight': 1},\n {'name': 'BalancedResourceAllocation', 'weight': 1},\n {'name': 'SelectorSpreadPriority', 'weight': 1}\n ]\n\n if short_version is None or deployment_type is None:\n if 'openshift' not in variables:\n raise AnsibleError(\"This lookup module requires openshift_facts to be run prior to use\")\n\n if deployment_type is None:\n if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:\n raise AnsibleError(\"This lookup module requires that the deployment_type be set\")\n\n deployment_type = variables['openshift']['common']['deployment_type']\n\n if short_version is None:\n if 'short_version' in variables['openshift']['common']:\n short_version = variables['openshift']['common']['short_version']\n elif 'openshift_release' in variables:\n release = variables['openshift_release']\n if release.startswith('v'):\n short_version = release[1:]\n else:\n short_version = release\n short_version = '.'.join(short_version.split('.')[0:2])\n elif 'openshift_version' in variables:\n version = variables['openshift_version']\n short_version = '.'.join(version.split('.')[0:2])\n else:\n # pylint: disable=line-too-long\n raise AnsibleError(\"Either OpenShift needs to be installed or openshift_release needs to be specified\")\n\n if deployment_type == 'origin':\n if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n elif deployment_type == 'openshift-enterprise':\n if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n else:\n raise AnsibleError(\"Unknown deployment_type %s\" % deployment_type)\n\n if deployment_type == 'openshift-enterprise':\n # convert short_version to origin short_version\n short_version = re.sub('^3.', '1.', short_version)\n\n if short_version == '1.4':\n priorities.append({'name': 'NodePreferAvoidPodsPriority', 'weight': 10000})\n\n # only 1.1 didn't include NodeAffinityPriority\n if short_version != '1.1':\n priorities.append({'name': 'NodeAffinityPriority', 'weight': 1})\n\n if short_version not in ['1.1', '1.2']:\n priorities.append({'name': 'TaintTolerationPriority', 'weight': 1})\n\n if short_version not in ['1.1', '1.2', '1.3']:\n priorities.append({'name': 'InterPodAffinityPriority', 'weight': 1})\n\n if zones_enabled:\n zone_priority = {\n 'name': 'Zone',\n 'argument': {\n 'serviceAntiAffinity': {\n 'label': 'zone'\n }\n },\n 'weight': 2\n }\n priorities.append(zone_priority)\n\n return priorities\n", "path": "roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py"}, {"content": "# pylint: disable=missing-docstring\n\nimport re\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass LookupModule(LookupBase):\n # pylint: disable=too-many-branches,too-many-statements,too-many-arguments\n\n def run(self, terms, variables=None, regions_enabled=True, short_version=None,\n deployment_type=None, **kwargs):\n\n predicates = []\n\n if short_version is None or deployment_type is None:\n if 'openshift' not in variables:\n raise AnsibleError(\"This lookup module requires openshift_facts to be run prior to use\")\n\n if deployment_type is None:\n if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:\n raise AnsibleError(\"This lookup module requires that the deployment_type be set\")\n\n deployment_type = variables['openshift']['common']['deployment_type']\n\n if short_version is None:\n if 'short_version' in variables['openshift']['common']:\n short_version = variables['openshift']['common']['short_version']\n elif 'openshift_release' in variables:\n release = variables['openshift_release']\n if release.startswith('v'):\n short_version = release[1:]\n else:\n short_version = release\n short_version = '.'.join(short_version.split('.')[0:2])\n elif 'openshift_version' in variables:\n version = variables['openshift_version']\n short_version = '.'.join(version.split('.')[0:2])\n else:\n # pylint: disable=line-too-long\n raise AnsibleError(\"Either OpenShift needs to be installed or openshift_release needs to be specified\")\n if deployment_type == 'origin':\n if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n elif deployment_type == 'openshift-enterprise':\n if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n else:\n raise AnsibleError(\"Unknown deployment_type %s\" % deployment_type)\n\n if deployment_type == 'openshift-enterprise':\n # convert short_version to origin short_version\n short_version = re.sub('^3.', '1.', short_version)\n\n if short_version in ['1.1', '1.2']:\n predicates.append({'name': 'PodFitsHostPorts'})\n predicates.append({'name': 'PodFitsResources'})\n\n # applies to all known versions\n predicates.append({'name': 'NoDiskConflict'})\n\n # only 1.1 didn't include NoVolumeZoneConflict\n if short_version != '1.1':\n predicates.append({'name': 'NoVolumeZoneConflict'})\n\n if short_version in ['1.1', '1.2']:\n predicates.append({'name': 'MatchNodeSelector'})\n\n if short_version != '1.1':\n predicates.append({'name': 'MaxEBSVolumeCount'})\n predicates.append({'name': 'MaxGCEPDVolumeCount'})\n\n if short_version not in ['1.1', '1.2']:\n predicates.append({'name': 'GeneralPredicates'})\n predicates.append({'name': 'PodToleratesNodeTaints'})\n predicates.append({'name': 'CheckNodeMemoryPressure'})\n\n if short_version not in ['1.1', '1.2', '1.3']:\n predicates.append({'name': 'CheckNodeDiskPressure'})\n predicates.append({'name': 'MatchInterPodAffinity'})\n\n if regions_enabled:\n region_predicate = {\n 'name': 'Region',\n 'argument': {\n 'serviceAffinity': {\n 'labels': ['region']\n }\n }\n }\n predicates.append(region_predicate)\n\n return predicates\n", "path": "roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py"}], "after_files": [{"content": "# pylint: disable=missing-docstring\n\nimport re\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass LookupModule(LookupBase):\n # pylint: disable=too-many-branches,too-many-statements,too-many-arguments\n\n def run(self, terms, variables=None, zones_enabled=True, short_version=None,\n deployment_type=None, **kwargs):\n\n priorities = [\n {'name': 'LeastRequestedPriority', 'weight': 1},\n {'name': 'BalancedResourceAllocation', 'weight': 1},\n {'name': 'SelectorSpreadPriority', 'weight': 1}\n ]\n\n if short_version is None or deployment_type is None:\n if 'openshift' not in variables:\n raise AnsibleError(\"This lookup module requires openshift_facts to be run prior to use\")\n\n if deployment_type is None:\n if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:\n raise AnsibleError(\"This lookup module requires that the deployment_type be set\")\n\n deployment_type = variables['openshift']['common']['deployment_type']\n\n if short_version is None:\n if 'short_version' in variables['openshift']['common']:\n short_version = variables['openshift']['common']['short_version']\n elif 'openshift_release' in variables:\n release = variables['openshift_release']\n if release.startswith('v'):\n short_version = release[1:]\n else:\n short_version = release\n short_version = '.'.join(short_version.split('.')[0:2])\n elif 'openshift_version' in variables:\n version = variables['openshift_version']\n short_version = '.'.join(version.split('.')[0:2])\n else:\n # pylint: disable=line-too-long\n raise AnsibleError(\"Either OpenShift needs to be installed or openshift_release needs to be specified\")\n\n if deployment_type == 'origin':\n if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n elif deployment_type == 'openshift-enterprise':\n if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n else:\n raise AnsibleError(\"Unknown deployment_type %s\" % deployment_type)\n\n if deployment_type == 'openshift-enterprise':\n # convert short_version to origin short_version\n short_version = re.sub('^3.', '1.', short_version)\n\n if short_version == '1.4':\n priorities.append({'name': 'NodePreferAvoidPodsPriority', 'weight': 10000})\n\n # only 1.1 didn't include NodeAffinityPriority\n if short_version != '1.1':\n priorities.append({'name': 'NodeAffinityPriority', 'weight': 1})\n\n if short_version not in ['1.1', '1.2']:\n priorities.append({'name': 'TaintTolerationPriority', 'weight': 1})\n\n if short_version not in ['1.1', '1.2', '1.3']:\n priorities.append({'name': 'InterPodAffinityPriority', 'weight': 1})\n\n if zones_enabled:\n zone_priority = {\n 'name': 'Zone',\n 'argument': {\n 'serviceAntiAffinity': {\n 'label': 'zone'\n }\n },\n 'weight': 2\n }\n priorities.append(zone_priority)\n\n return priorities\n", "path": "roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py"}, {"content": "# pylint: disable=missing-docstring\n\nimport re\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass LookupModule(LookupBase):\n # pylint: disable=too-many-branches,too-many-statements,too-many-arguments\n\n def run(self, terms, variables=None, regions_enabled=True, short_version=None,\n deployment_type=None, **kwargs):\n\n predicates = []\n\n if short_version is None or deployment_type is None:\n if 'openshift' not in variables:\n raise AnsibleError(\"This lookup module requires openshift_facts to be run prior to use\")\n\n if deployment_type is None:\n if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:\n raise AnsibleError(\"This lookup module requires that the deployment_type be set\")\n\n deployment_type = variables['openshift']['common']['deployment_type']\n\n if short_version is None:\n if 'short_version' in variables['openshift']['common']:\n short_version = variables['openshift']['common']['short_version']\n elif 'openshift_release' in variables:\n release = variables['openshift_release']\n if release.startswith('v'):\n short_version = release[1:]\n else:\n short_version = release\n short_version = '.'.join(short_version.split('.')[0:2])\n elif 'openshift_version' in variables:\n version = variables['openshift_version']\n short_version = '.'.join(version.split('.')[0:2])\n else:\n # pylint: disable=line-too-long\n raise AnsibleError(\"Either OpenShift needs to be installed or openshift_release needs to be specified\")\n if deployment_type == 'origin':\n if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n elif deployment_type == 'openshift-enterprise':\n if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:\n raise AnsibleError(\"Unknown short_version %s\" % short_version)\n else:\n raise AnsibleError(\"Unknown deployment_type %s\" % deployment_type)\n\n if deployment_type == 'openshift-enterprise':\n # convert short_version to origin short_version\n short_version = re.sub('^3.', '1.', short_version)\n\n if short_version in ['1.1', '1.2']:\n predicates.append({'name': 'PodFitsHostPorts'})\n predicates.append({'name': 'PodFitsResources'})\n\n # applies to all known versions\n predicates.append({'name': 'NoDiskConflict'})\n\n # only 1.1 didn't include NoVolumeZoneConflict\n if short_version != '1.1':\n predicates.append({'name': 'NoVolumeZoneConflict'})\n\n if short_version in ['1.1', '1.2']:\n predicates.append({'name': 'MatchNodeSelector'})\n\n if short_version != '1.1':\n predicates.append({'name': 'MaxEBSVolumeCount'})\n predicates.append({'name': 'MaxGCEPDVolumeCount'})\n\n if short_version not in ['1.1', '1.2']:\n predicates.append({'name': 'GeneralPredicates'})\n predicates.append({'name': 'PodToleratesNodeTaints'})\n predicates.append({'name': 'CheckNodeMemoryPressure'})\n\n if short_version not in ['1.1', '1.2', '1.3']:\n predicates.append({'name': 'CheckNodeDiskPressure'})\n predicates.append({'name': 'MatchInterPodAffinity'})\n\n if regions_enabled:\n region_predicate = {\n 'name': 'Region',\n 'argument': {\n 'serviceAffinity': {\n 'labels': ['region']\n }\n }\n }\n predicates.append(region_predicate)\n\n return predicates\n", "path": "roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py"}]} | 2,475 | 722 |
gh_patches_debug_13132 | rasdani/github-patches | git_diff | conan-io__conan-14185 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] Can't call `conan upload --recipe-only` twice with backup sources enabled
### Steps to reproduce
1. Enable backup sources
2. Export a recipe that downloads file
3. Call conan upload only recipe for ref
4. Do it again, it fails due to KeyError
Found while prepping for https://github.com/conan-io/conan-center-index/pull/18082
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/downloaders/download_cache.py`
Content:
```
1 import json
2 import os
3 from contextlib import contextmanager
4 from threading import Lock
5
6 from conans.util.dates import timestamp_now
7 from conans.util.files import load, save
8 from conans.util.locks import SimpleLock
9 from conans.util.sha import sha256 as compute_sha256
10
11
12 class DownloadCache:
13 """ The download cache has 3 folders
14 - "s": SOURCE_BACKUP for the files.download(internet_url) backup sources feature
15 - "c": CONAN_CACHE: for caching Conan packages artifacts
16 - "locks": The LOCKS folder containing the file locks for concurrent access to the cache
17 """
18 _LOCKS = "locks"
19 _SOURCE_BACKUP = "s"
20 _CONAN_CACHE = "c"
21
22 def __init__(self, path: str):
23 self._path: str = path
24
25 def source_path(self, sha256):
26 return os.path.join(self._path, self._SOURCE_BACKUP, sha256)
27
28 def cached_path(self, url):
29 h = compute_sha256(url.encode())
30 return os.path.join(self._path, self._CONAN_CACHE, h), h
31
32 _thread_locks = {} # Needs to be shared among all instances
33
34 @contextmanager
35 def lock(self, lock_id):
36 lock = os.path.join(self._path, self._LOCKS, lock_id)
37 with SimpleLock(lock):
38 # Once the process has access, make sure multithread is locked too
39 # as SimpleLock doesn't work multithread
40 thread_lock = self._thread_locks.setdefault(lock, Lock())
41 thread_lock.acquire()
42 try:
43 yield
44 finally:
45 thread_lock.release()
46
47 def get_backup_sources_files_to_upload(self, package_list, excluded_urls):
48 """ from a package_list of packages to upload, collect from the backup-sources cache
49 the matching references to upload those backups too
50 """
51 def should_upload_sources(package):
52 return any(prev["upload"] for prev in package["revisions"].values())
53
54 files_to_upload = []
55 path_backups = os.path.join(self._path, self._SOURCE_BACKUP)
56
57 if not os.path.exists(path_backups):
58 return []
59
60 if excluded_urls is None:
61 excluded_urls = []
62
63 all_refs = {str(k) for k, ref in package_list.refs()
64 if ref.get("upload") or any(should_upload_sources(p)
65 for p in ref["packages"].values())}
66 for f in os.listdir(path_backups):
67 if f.endswith(".json"):
68 f = os.path.join(path_backups, f)
69 content = json.loads(load(f))
70 refs = content["references"]
71 # unknown entries are not uploaded at this moment, the flow is not expected.
72 for ref, urls in refs.items():
73 is_excluded = all(any(url.startswith(excluded_url)
74 for excluded_url in excluded_urls)
75 for url in urls)
76 if not is_excluded and ref in all_refs:
77 files_to_upload.append(f)
78 files_to_upload.append(f[:-5])
79 break
80 return files_to_upload
81
82 @staticmethod
83 def update_backup_sources_json(cached_path, conanfile, urls):
84 """ create or update the sha256.json file with the references and new urls used
85 """
86 summary_path = cached_path + ".json"
87 if os.path.exists(summary_path):
88 summary = json.loads(load(summary_path))
89 else:
90 summary = {"references": {}, "timestamp": timestamp_now()}
91
92 try:
93 summary_key = str(conanfile.ref)
94 except AttributeError:
95 # The recipe path would be different between machines
96 # So best we can do is to set this as unknown
97 summary_key = "unknown"
98
99 if not isinstance(urls, (list, tuple)):
100 urls = [urls]
101 existing_urls = summary["references"].setdefault(summary_key, [])
102 existing_urls.extend(url for url in urls if url not in existing_urls)
103 conanfile.output.verbose(f"Updating ${summary_path} summary file")
104 summary_dump = json.dumps(summary)
105 conanfile.output.debug(f"New summary: ${summary_dump}")
106 save(summary_path, json.dumps(summary))
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/downloaders/download_cache.py b/conans/client/downloaders/download_cache.py
--- a/conans/client/downloaders/download_cache.py
+++ b/conans/client/downloaders/download_cache.py
@@ -60,9 +60,12 @@
if excluded_urls is None:
excluded_urls = []
- all_refs = {str(k) for k, ref in package_list.refs()
- if ref.get("upload") or any(should_upload_sources(p)
- for p in ref["packages"].values())}
+ all_refs = set()
+ for k, ref in package_list.refs():
+ packages = ref.get("packages", {}).values()
+ if ref.get("upload") or any(should_upload_sources(p) for p in packages):
+ all_refs.add(str(k))
+
for f in os.listdir(path_backups):
if f.endswith(".json"):
f = os.path.join(path_backups, f)
| {"golden_diff": "diff --git a/conans/client/downloaders/download_cache.py b/conans/client/downloaders/download_cache.py\n--- a/conans/client/downloaders/download_cache.py\n+++ b/conans/client/downloaders/download_cache.py\n@@ -60,9 +60,12 @@\n if excluded_urls is None:\n excluded_urls = []\n \n- all_refs = {str(k) for k, ref in package_list.refs()\n- if ref.get(\"upload\") or any(should_upload_sources(p)\n- for p in ref[\"packages\"].values())}\n+ all_refs = set()\n+ for k, ref in package_list.refs():\n+ packages = ref.get(\"packages\", {}).values()\n+ if ref.get(\"upload\") or any(should_upload_sources(p) for p in packages):\n+ all_refs.add(str(k))\n+\n for f in os.listdir(path_backups):\n if f.endswith(\".json\"):\n f = os.path.join(path_backups, f)\n", "issue": "[bug] Can't call `conan upload --recipe-only` twice with backup sources enabled\n### Steps to reproduce\r\n\r\n1. Enable backup sources\r\n2. Export a recipe that downloads file\r\n3. Call conan upload only recipe for ref\r\n4. Do it again, it fails due to KeyError\r\n\r\n\r\nFound while prepping for https://github.com/conan-io/conan-center-index/pull/18082\n", "before_files": [{"content": "import json\nimport os\nfrom contextlib import contextmanager\nfrom threading import Lock\n\nfrom conans.util.dates import timestamp_now\nfrom conans.util.files import load, save\nfrom conans.util.locks import SimpleLock\nfrom conans.util.sha import sha256 as compute_sha256\n\n\nclass DownloadCache:\n \"\"\" The download cache has 3 folders\n - \"s\": SOURCE_BACKUP for the files.download(internet_url) backup sources feature\n - \"c\": CONAN_CACHE: for caching Conan packages artifacts\n - \"locks\": The LOCKS folder containing the file locks for concurrent access to the cache\n \"\"\"\n _LOCKS = \"locks\"\n _SOURCE_BACKUP = \"s\"\n _CONAN_CACHE = \"c\"\n\n def __init__(self, path: str):\n self._path: str = path\n\n def source_path(self, sha256):\n return os.path.join(self._path, self._SOURCE_BACKUP, sha256)\n\n def cached_path(self, url):\n h = compute_sha256(url.encode())\n return os.path.join(self._path, self._CONAN_CACHE, h), h\n\n _thread_locks = {} # Needs to be shared among all instances\n\n @contextmanager\n def lock(self, lock_id):\n lock = os.path.join(self._path, self._LOCKS, lock_id)\n with SimpleLock(lock):\n # Once the process has access, make sure multithread is locked too\n # as SimpleLock doesn't work multithread\n thread_lock = self._thread_locks.setdefault(lock, Lock())\n thread_lock.acquire()\n try:\n yield\n finally:\n thread_lock.release()\n\n def get_backup_sources_files_to_upload(self, package_list, excluded_urls):\n \"\"\" from a package_list of packages to upload, collect from the backup-sources cache\n the matching references to upload those backups too\n \"\"\"\n def should_upload_sources(package):\n return any(prev[\"upload\"] for prev in package[\"revisions\"].values())\n\n files_to_upload = []\n path_backups = os.path.join(self._path, self._SOURCE_BACKUP)\n\n if not os.path.exists(path_backups):\n return []\n\n if excluded_urls is None:\n excluded_urls = []\n\n all_refs = {str(k) for k, ref in package_list.refs()\n if ref.get(\"upload\") or any(should_upload_sources(p)\n for p in ref[\"packages\"].values())}\n for f in os.listdir(path_backups):\n if f.endswith(\".json\"):\n f = os.path.join(path_backups, f)\n content = json.loads(load(f))\n refs = content[\"references\"]\n # unknown entries are not uploaded at this moment, the flow is not expected.\n for ref, urls in refs.items():\n is_excluded = all(any(url.startswith(excluded_url)\n for excluded_url in excluded_urls)\n for url in urls)\n if not is_excluded and ref in all_refs:\n files_to_upload.append(f)\n files_to_upload.append(f[:-5])\n break\n return files_to_upload\n\n @staticmethod\n def update_backup_sources_json(cached_path, conanfile, urls):\n \"\"\" create or update the sha256.json file with the references and new urls used\n \"\"\"\n summary_path = cached_path + \".json\"\n if os.path.exists(summary_path):\n summary = json.loads(load(summary_path))\n else:\n summary = {\"references\": {}, \"timestamp\": timestamp_now()}\n\n try:\n summary_key = str(conanfile.ref)\n except AttributeError:\n # The recipe path would be different between machines\n # So best we can do is to set this as unknown\n summary_key = \"unknown\"\n\n if not isinstance(urls, (list, tuple)):\n urls = [urls]\n existing_urls = summary[\"references\"].setdefault(summary_key, [])\n existing_urls.extend(url for url in urls if url not in existing_urls)\n conanfile.output.verbose(f\"Updating ${summary_path} summary file\")\n summary_dump = json.dumps(summary)\n conanfile.output.debug(f\"New summary: ${summary_dump}\")\n save(summary_path, json.dumps(summary))\n", "path": "conans/client/downloaders/download_cache.py"}], "after_files": [{"content": "import json\nimport os\nfrom contextlib import contextmanager\nfrom threading import Lock\n\nfrom conans.util.dates import timestamp_now\nfrom conans.util.files import load, save\nfrom conans.util.locks import SimpleLock\nfrom conans.util.sha import sha256 as compute_sha256\n\n\nclass DownloadCache:\n \"\"\" The download cache has 3 folders\n - \"s\": SOURCE_BACKUP for the files.download(internet_url) backup sources feature\n - \"c\": CONAN_CACHE: for caching Conan packages artifacts\n - \"locks\": The LOCKS folder containing the file locks for concurrent access to the cache\n \"\"\"\n _LOCKS = \"locks\"\n _SOURCE_BACKUP = \"s\"\n _CONAN_CACHE = \"c\"\n\n def __init__(self, path: str):\n self._path: str = path\n\n def source_path(self, sha256):\n return os.path.join(self._path, self._SOURCE_BACKUP, sha256)\n\n def cached_path(self, url):\n h = compute_sha256(url.encode())\n return os.path.join(self._path, self._CONAN_CACHE, h), h\n\n _thread_locks = {} # Needs to be shared among all instances\n\n @contextmanager\n def lock(self, lock_id):\n lock = os.path.join(self._path, self._LOCKS, lock_id)\n with SimpleLock(lock):\n # Once the process has access, make sure multithread is locked too\n # as SimpleLock doesn't work multithread\n thread_lock = self._thread_locks.setdefault(lock, Lock())\n thread_lock.acquire()\n try:\n yield\n finally:\n thread_lock.release()\n\n def get_backup_sources_files_to_upload(self, package_list, excluded_urls):\n \"\"\" from a package_list of packages to upload, collect from the backup-sources cache\n the matching references to upload those backups too\n \"\"\"\n def should_upload_sources(package):\n return any(prev[\"upload\"] for prev in package[\"revisions\"].values())\n\n files_to_upload = []\n path_backups = os.path.join(self._path, self._SOURCE_BACKUP)\n\n if not os.path.exists(path_backups):\n return []\n\n if excluded_urls is None:\n excluded_urls = []\n\n all_refs = set()\n for k, ref in package_list.refs():\n packages = ref.get(\"packages\", {}).values()\n if ref.get(\"upload\") or any(should_upload_sources(p) for p in packages):\n all_refs.add(str(k))\n\n for f in os.listdir(path_backups):\n if f.endswith(\".json\"):\n f = os.path.join(path_backups, f)\n content = json.loads(load(f))\n refs = content[\"references\"]\n # unknown entries are not uploaded at this moment, the flow is not expected.\n for ref, urls in refs.items():\n is_excluded = all(any(url.startswith(excluded_url)\n for excluded_url in excluded_urls)\n for url in urls)\n if not is_excluded and ref in all_refs:\n files_to_upload.append(f)\n files_to_upload.append(f[:-5])\n break\n return files_to_upload\n\n @staticmethod\n def update_backup_sources_json(cached_path, conanfile, urls):\n \"\"\" create or update the sha256.json file with the references and new urls used\n \"\"\"\n summary_path = cached_path + \".json\"\n if os.path.exists(summary_path):\n summary = json.loads(load(summary_path))\n else:\n summary = {\"references\": {}, \"timestamp\": timestamp_now()}\n\n try:\n summary_key = str(conanfile.ref)\n except AttributeError:\n # The recipe path would be different between machines\n # So best we can do is to set this as unknown\n summary_key = \"unknown\"\n\n if not isinstance(urls, (list, tuple)):\n urls = [urls]\n existing_urls = summary[\"references\"].setdefault(summary_key, [])\n existing_urls.extend(url for url in urls if url not in existing_urls)\n conanfile.output.verbose(f\"Updating ${summary_path} summary file\")\n summary_dump = json.dumps(summary)\n conanfile.output.debug(f\"New summary: ${summary_dump}\")\n save(summary_path, json.dumps(summary))\n", "path": "conans/client/downloaders/download_cache.py"}]} | 1,470 | 205 |
gh_patches_debug_8952 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-1413 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
utcnow is deprecated in python 3.12
#### Environment details
- OS: Linux
- Python version: 3.12.0
- pip version: 23.2.1
- `google-auth` version: 2.9.1
#### Issue
Here is the related code
https://github.com/googleapis/google-auth-library-python/blob/d2ab3afdb567850121fec7de1d86fb5fb0fa80ed/google/auth/_helpers.py#L89-L95
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/auth/_helpers.py`
Content:
```
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Helper functions for commonly used utilities."""
16
17 import base64
18 import calendar
19 import datetime
20 from email.message import Message
21 import sys
22 import urllib
23
24 from google.auth import exceptions
25
26 # Token server doesn't provide a new a token when doing refresh unless the
27 # token is expiring within 30 seconds, so refresh threshold should not be
28 # more than 30 seconds. Otherwise auth lib will send tons of refresh requests
29 # until 30 seconds before the expiration, and cause a spike of CPU usage.
30 REFRESH_THRESHOLD = datetime.timedelta(seconds=20)
31
32
33 def copy_docstring(source_class):
34 """Decorator that copies a method's docstring from another class.
35
36 Args:
37 source_class (type): The class that has the documented method.
38
39 Returns:
40 Callable: A decorator that will copy the docstring of the same
41 named method in the source class to the decorated method.
42 """
43
44 def decorator(method):
45 """Decorator implementation.
46
47 Args:
48 method (Callable): The method to copy the docstring to.
49
50 Returns:
51 Callable: the same method passed in with an updated docstring.
52
53 Raises:
54 google.auth.exceptions.InvalidOperation: if the method already has a docstring.
55 """
56 if method.__doc__:
57 raise exceptions.InvalidOperation("Method already has a docstring.")
58
59 source_method = getattr(source_class, method.__name__)
60 method.__doc__ = source_method.__doc__
61
62 return method
63
64 return decorator
65
66
67 def parse_content_type(header_value):
68 """Parse a 'content-type' header value to get just the plain media-type (without parameters).
69
70 This is done using the class Message from email.message as suggested in PEP 594
71 (because the cgi is now deprecated and will be removed in python 3.13,
72 see https://peps.python.org/pep-0594/#cgi).
73
74 Args:
75 header_value (str): The value of a 'content-type' header as a string.
76
77 Returns:
78 str: A string with just the lowercase media-type from the parsed 'content-type' header.
79 If the provided content-type is not parsable, returns 'text/plain',
80 the default value for textual files.
81 """
82 m = Message()
83 m["content-type"] = header_value
84 return (
85 m.get_content_type()
86 ) # Despite the name, actually returns just the media-type
87
88
89 def utcnow():
90 """Returns the current UTC datetime.
91
92 Returns:
93 datetime: The current time in UTC.
94 """
95 return datetime.datetime.utcnow()
96
97
98 def datetime_to_secs(value):
99 """Convert a datetime object to the number of seconds since the UNIX epoch.
100
101 Args:
102 value (datetime): The datetime to convert.
103
104 Returns:
105 int: The number of seconds since the UNIX epoch.
106 """
107 return calendar.timegm(value.utctimetuple())
108
109
110 def to_bytes(value, encoding="utf-8"):
111 """Converts a string value to bytes, if necessary.
112
113 Args:
114 value (Union[str, bytes]): The value to be converted.
115 encoding (str): The encoding to use to convert unicode to bytes.
116 Defaults to "utf-8".
117
118 Returns:
119 bytes: The original value converted to bytes (if unicode) or as
120 passed in if it started out as bytes.
121
122 Raises:
123 google.auth.exceptions.InvalidValue: If the value could not be converted to bytes.
124 """
125 result = value.encode(encoding) if isinstance(value, str) else value
126 if isinstance(result, bytes):
127 return result
128 else:
129 raise exceptions.InvalidValue(
130 "{0!r} could not be converted to bytes".format(value)
131 )
132
133
134 def from_bytes(value):
135 """Converts bytes to a string value, if necessary.
136
137 Args:
138 value (Union[str, bytes]): The value to be converted.
139
140 Returns:
141 str: The original value converted to unicode (if bytes) or as passed in
142 if it started out as unicode.
143
144 Raises:
145 google.auth.exceptions.InvalidValue: If the value could not be converted to unicode.
146 """
147 result = value.decode("utf-8") if isinstance(value, bytes) else value
148 if isinstance(result, str):
149 return result
150 else:
151 raise exceptions.InvalidValue(
152 "{0!r} could not be converted to unicode".format(value)
153 )
154
155
156 def update_query(url, params, remove=None):
157 """Updates a URL's query parameters.
158
159 Replaces any current values if they are already present in the URL.
160
161 Args:
162 url (str): The URL to update.
163 params (Mapping[str, str]): A mapping of query parameter
164 keys to values.
165 remove (Sequence[str]): Parameters to remove from the query string.
166
167 Returns:
168 str: The URL with updated query parameters.
169
170 Examples:
171
172 >>> url = 'http://example.com?a=1'
173 >>> update_query(url, {'a': '2'})
174 http://example.com?a=2
175 >>> update_query(url, {'b': '3'})
176 http://example.com?a=1&b=3
177 >> update_query(url, {'b': '3'}, remove=['a'])
178 http://example.com?b=3
179
180 """
181 if remove is None:
182 remove = []
183
184 # Split the URL into parts.
185 parts = urllib.parse.urlparse(url)
186 # Parse the query string.
187 query_params = urllib.parse.parse_qs(parts.query)
188 # Update the query parameters with the new parameters.
189 query_params.update(params)
190 # Remove any values specified in remove.
191 query_params = {
192 key: value for key, value in query_params.items() if key not in remove
193 }
194 # Re-encoded the query string.
195 new_query = urllib.parse.urlencode(query_params, doseq=True)
196 # Unsplit the url.
197 new_parts = parts._replace(query=new_query)
198 return urllib.parse.urlunparse(new_parts)
199
200
201 def scopes_to_string(scopes):
202 """Converts scope value to a string suitable for sending to OAuth 2.0
203 authorization servers.
204
205 Args:
206 scopes (Sequence[str]): The sequence of scopes to convert.
207
208 Returns:
209 str: The scopes formatted as a single string.
210 """
211 return " ".join(scopes)
212
213
214 def string_to_scopes(scopes):
215 """Converts stringifed scopes value to a list.
216
217 Args:
218 scopes (Union[Sequence, str]): The string of space-separated scopes
219 to convert.
220 Returns:
221 Sequence(str): The separated scopes.
222 """
223 if not scopes:
224 return []
225
226 return scopes.split(" ")
227
228
229 def padded_urlsafe_b64decode(value):
230 """Decodes base64 strings lacking padding characters.
231
232 Google infrastructure tends to omit the base64 padding characters.
233
234 Args:
235 value (Union[str, bytes]): The encoded value.
236
237 Returns:
238 bytes: The decoded value
239 """
240 b64string = to_bytes(value)
241 padded = b64string + b"=" * (-len(b64string) % 4)
242 return base64.urlsafe_b64decode(padded)
243
244
245 def unpadded_urlsafe_b64encode(value):
246 """Encodes base64 strings removing any padding characters.
247
248 `rfc 7515`_ defines Base64url to NOT include any padding
249 characters, but the stdlib doesn't do that by default.
250
251 _rfc7515: https://tools.ietf.org/html/rfc7515#page-6
252
253 Args:
254 value (Union[str|bytes]): The bytes-like value to encode
255
256 Returns:
257 Union[str|bytes]: The encoded value
258 """
259 return base64.urlsafe_b64encode(value).rstrip(b"=")
260
261
262 def is_python_3():
263 """Check if the Python interpreter is Python 2 or 3.
264
265 Returns:
266 bool: True if the Python interpreter is Python 3 and False otherwise.
267 """
268 return sys.version_info > (3, 0)
269
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/google/auth/_helpers.py b/google/auth/_helpers.py
--- a/google/auth/_helpers.py
+++ b/google/auth/_helpers.py
@@ -92,7 +92,14 @@
Returns:
datetime: The current time in UTC.
"""
- return datetime.datetime.utcnow()
+ # We used datetime.utcnow() before, since it's deprecated from python 3.12,
+ # we are using datetime.now(timezone.utc) now. "utcnow()" is offset-native
+ # (no timezone info), but "now()" is offset-aware (with timezone info).
+ # This will cause datetime comparison problem. For backward compatibility,
+ # we need to remove the timezone info.
+ now = datetime.datetime.now(datetime.timezone.utc)
+ now = now.replace(tzinfo=None)
+ return now
def datetime_to_secs(value):
| {"golden_diff": "diff --git a/google/auth/_helpers.py b/google/auth/_helpers.py\n--- a/google/auth/_helpers.py\n+++ b/google/auth/_helpers.py\n@@ -92,7 +92,14 @@\n Returns:\n datetime: The current time in UTC.\n \"\"\"\n- return datetime.datetime.utcnow()\n+ # We used datetime.utcnow() before, since it's deprecated from python 3.12,\n+ # we are using datetime.now(timezone.utc) now. \"utcnow()\" is offset-native\n+ # (no timezone info), but \"now()\" is offset-aware (with timezone info).\n+ # This will cause datetime comparison problem. For backward compatibility,\n+ # we need to remove the timezone info.\n+ now = datetime.datetime.now(datetime.timezone.utc)\n+ now = now.replace(tzinfo=None)\n+ return now\n \n \n def datetime_to_secs(value):\n", "issue": "utcnow is deprecated in python 3.12\n\r\n#### Environment details\r\n\r\n - OS: Linux\r\n - Python version: 3.12.0\r\n - pip version: 23.2.1\r\n - `google-auth` version: 2.9.1\r\n\r\n#### Issue\r\nHere is the related code\r\n\r\nhttps://github.com/googleapis/google-auth-library-python/blob/d2ab3afdb567850121fec7de1d86fb5fb0fa80ed/google/auth/_helpers.py#L89-L95\r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper functions for commonly used utilities.\"\"\"\n\nimport base64\nimport calendar\nimport datetime\nfrom email.message import Message\nimport sys\nimport urllib\n\nfrom google.auth import exceptions\n\n# Token server doesn't provide a new a token when doing refresh unless the\n# token is expiring within 30 seconds, so refresh threshold should not be\n# more than 30 seconds. Otherwise auth lib will send tons of refresh requests\n# until 30 seconds before the expiration, and cause a spike of CPU usage.\nREFRESH_THRESHOLD = datetime.timedelta(seconds=20)\n\n\ndef copy_docstring(source_class):\n \"\"\"Decorator that copies a method's docstring from another class.\n\n Args:\n source_class (type): The class that has the documented method.\n\n Returns:\n Callable: A decorator that will copy the docstring of the same\n named method in the source class to the decorated method.\n \"\"\"\n\n def decorator(method):\n \"\"\"Decorator implementation.\n\n Args:\n method (Callable): The method to copy the docstring to.\n\n Returns:\n Callable: the same method passed in with an updated docstring.\n\n Raises:\n google.auth.exceptions.InvalidOperation: if the method already has a docstring.\n \"\"\"\n if method.__doc__:\n raise exceptions.InvalidOperation(\"Method already has a docstring.\")\n\n source_method = getattr(source_class, method.__name__)\n method.__doc__ = source_method.__doc__\n\n return method\n\n return decorator\n\n\ndef parse_content_type(header_value):\n \"\"\"Parse a 'content-type' header value to get just the plain media-type (without parameters).\n\n This is done using the class Message from email.message as suggested in PEP 594\n (because the cgi is now deprecated and will be removed in python 3.13,\n see https://peps.python.org/pep-0594/#cgi).\n\n Args:\n header_value (str): The value of a 'content-type' header as a string.\n\n Returns:\n str: A string with just the lowercase media-type from the parsed 'content-type' header.\n If the provided content-type is not parsable, returns 'text/plain',\n the default value for textual files.\n \"\"\"\n m = Message()\n m[\"content-type\"] = header_value\n return (\n m.get_content_type()\n ) # Despite the name, actually returns just the media-type\n\n\ndef utcnow():\n \"\"\"Returns the current UTC datetime.\n\n Returns:\n datetime: The current time in UTC.\n \"\"\"\n return datetime.datetime.utcnow()\n\n\ndef datetime_to_secs(value):\n \"\"\"Convert a datetime object to the number of seconds since the UNIX epoch.\n\n Args:\n value (datetime): The datetime to convert.\n\n Returns:\n int: The number of seconds since the UNIX epoch.\n \"\"\"\n return calendar.timegm(value.utctimetuple())\n\n\ndef to_bytes(value, encoding=\"utf-8\"):\n \"\"\"Converts a string value to bytes, if necessary.\n\n Args:\n value (Union[str, bytes]): The value to be converted.\n encoding (str): The encoding to use to convert unicode to bytes.\n Defaults to \"utf-8\".\n\n Returns:\n bytes: The original value converted to bytes (if unicode) or as\n passed in if it started out as bytes.\n\n Raises:\n google.auth.exceptions.InvalidValue: If the value could not be converted to bytes.\n \"\"\"\n result = value.encode(encoding) if isinstance(value, str) else value\n if isinstance(result, bytes):\n return result\n else:\n raise exceptions.InvalidValue(\n \"{0!r} could not be converted to bytes\".format(value)\n )\n\n\ndef from_bytes(value):\n \"\"\"Converts bytes to a string value, if necessary.\n\n Args:\n value (Union[str, bytes]): The value to be converted.\n\n Returns:\n str: The original value converted to unicode (if bytes) or as passed in\n if it started out as unicode.\n\n Raises:\n google.auth.exceptions.InvalidValue: If the value could not be converted to unicode.\n \"\"\"\n result = value.decode(\"utf-8\") if isinstance(value, bytes) else value\n if isinstance(result, str):\n return result\n else:\n raise exceptions.InvalidValue(\n \"{0!r} could not be converted to unicode\".format(value)\n )\n\n\ndef update_query(url, params, remove=None):\n \"\"\"Updates a URL's query parameters.\n\n Replaces any current values if they are already present in the URL.\n\n Args:\n url (str): The URL to update.\n params (Mapping[str, str]): A mapping of query parameter\n keys to values.\n remove (Sequence[str]): Parameters to remove from the query string.\n\n Returns:\n str: The URL with updated query parameters.\n\n Examples:\n\n >>> url = 'http://example.com?a=1'\n >>> update_query(url, {'a': '2'})\n http://example.com?a=2\n >>> update_query(url, {'b': '3'})\n http://example.com?a=1&b=3\n >> update_query(url, {'b': '3'}, remove=['a'])\n http://example.com?b=3\n\n \"\"\"\n if remove is None:\n remove = []\n\n # Split the URL into parts.\n parts = urllib.parse.urlparse(url)\n # Parse the query string.\n query_params = urllib.parse.parse_qs(parts.query)\n # Update the query parameters with the new parameters.\n query_params.update(params)\n # Remove any values specified in remove.\n query_params = {\n key: value for key, value in query_params.items() if key not in remove\n }\n # Re-encoded the query string.\n new_query = urllib.parse.urlencode(query_params, doseq=True)\n # Unsplit the url.\n new_parts = parts._replace(query=new_query)\n return urllib.parse.urlunparse(new_parts)\n\n\ndef scopes_to_string(scopes):\n \"\"\"Converts scope value to a string suitable for sending to OAuth 2.0\n authorization servers.\n\n Args:\n scopes (Sequence[str]): The sequence of scopes to convert.\n\n Returns:\n str: The scopes formatted as a single string.\n \"\"\"\n return \" \".join(scopes)\n\n\ndef string_to_scopes(scopes):\n \"\"\"Converts stringifed scopes value to a list.\n\n Args:\n scopes (Union[Sequence, str]): The string of space-separated scopes\n to convert.\n Returns:\n Sequence(str): The separated scopes.\n \"\"\"\n if not scopes:\n return []\n\n return scopes.split(\" \")\n\n\ndef padded_urlsafe_b64decode(value):\n \"\"\"Decodes base64 strings lacking padding characters.\n\n Google infrastructure tends to omit the base64 padding characters.\n\n Args:\n value (Union[str, bytes]): The encoded value.\n\n Returns:\n bytes: The decoded value\n \"\"\"\n b64string = to_bytes(value)\n padded = b64string + b\"=\" * (-len(b64string) % 4)\n return base64.urlsafe_b64decode(padded)\n\n\ndef unpadded_urlsafe_b64encode(value):\n \"\"\"Encodes base64 strings removing any padding characters.\n\n `rfc 7515`_ defines Base64url to NOT include any padding\n characters, but the stdlib doesn't do that by default.\n\n _rfc7515: https://tools.ietf.org/html/rfc7515#page-6\n\n Args:\n value (Union[str|bytes]): The bytes-like value to encode\n\n Returns:\n Union[str|bytes]: The encoded value\n \"\"\"\n return base64.urlsafe_b64encode(value).rstrip(b\"=\")\n\n\ndef is_python_3():\n \"\"\"Check if the Python interpreter is Python 2 or 3.\n\n Returns:\n bool: True if the Python interpreter is Python 3 and False otherwise.\n \"\"\"\n return sys.version_info > (3, 0)\n", "path": "google/auth/_helpers.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper functions for commonly used utilities.\"\"\"\n\nimport base64\nimport calendar\nimport datetime\nfrom email.message import Message\nimport sys\nimport urllib\n\nfrom google.auth import exceptions\n\n# Token server doesn't provide a new a token when doing refresh unless the\n# token is expiring within 30 seconds, so refresh threshold should not be\n# more than 30 seconds. Otherwise auth lib will send tons of refresh requests\n# until 30 seconds before the expiration, and cause a spike of CPU usage.\nREFRESH_THRESHOLD = datetime.timedelta(seconds=20)\n\n\ndef copy_docstring(source_class):\n \"\"\"Decorator that copies a method's docstring from another class.\n\n Args:\n source_class (type): The class that has the documented method.\n\n Returns:\n Callable: A decorator that will copy the docstring of the same\n named method in the source class to the decorated method.\n \"\"\"\n\n def decorator(method):\n \"\"\"Decorator implementation.\n\n Args:\n method (Callable): The method to copy the docstring to.\n\n Returns:\n Callable: the same method passed in with an updated docstring.\n\n Raises:\n google.auth.exceptions.InvalidOperation: if the method already has a docstring.\n \"\"\"\n if method.__doc__:\n raise exceptions.InvalidOperation(\"Method already has a docstring.\")\n\n source_method = getattr(source_class, method.__name__)\n method.__doc__ = source_method.__doc__\n\n return method\n\n return decorator\n\n\ndef parse_content_type(header_value):\n \"\"\"Parse a 'content-type' header value to get just the plain media-type (without parameters).\n\n This is done using the class Message from email.message as suggested in PEP 594\n (because the cgi is now deprecated and will be removed in python 3.13,\n see https://peps.python.org/pep-0594/#cgi).\n\n Args:\n header_value (str): The value of a 'content-type' header as a string.\n\n Returns:\n str: A string with just the lowercase media-type from the parsed 'content-type' header.\n If the provided content-type is not parsable, returns 'text/plain',\n the default value for textual files.\n \"\"\"\n m = Message()\n m[\"content-type\"] = header_value\n return (\n m.get_content_type()\n ) # Despite the name, actually returns just the media-type\n\n\ndef utcnow():\n \"\"\"Returns the current UTC datetime.\n\n Returns:\n datetime: The current time in UTC.\n \"\"\"\n # We used datetime.utcnow() before, since it's deprecated from python 3.12,\n # we are using datetime.now(timezone.utc) now. \"utcnow()\" is offset-native\n # (no timezone info), but \"now()\" is offset-aware (with timezone info).\n # This will cause datetime comparison problem. For backward compatibility,\n # we need to remove the timezone info.\n now = datetime.datetime.now(datetime.timezone.utc)\n now = now.replace(tzinfo=None)\n return now\n\n\ndef datetime_to_secs(value):\n \"\"\"Convert a datetime object to the number of seconds since the UNIX epoch.\n\n Args:\n value (datetime): The datetime to convert.\n\n Returns:\n int: The number of seconds since the UNIX epoch.\n \"\"\"\n return calendar.timegm(value.utctimetuple())\n\n\ndef to_bytes(value, encoding=\"utf-8\"):\n \"\"\"Converts a string value to bytes, if necessary.\n\n Args:\n value (Union[str, bytes]): The value to be converted.\n encoding (str): The encoding to use to convert unicode to bytes.\n Defaults to \"utf-8\".\n\n Returns:\n bytes: The original value converted to bytes (if unicode) or as\n passed in if it started out as bytes.\n\n Raises:\n google.auth.exceptions.InvalidValue: If the value could not be converted to bytes.\n \"\"\"\n result = value.encode(encoding) if isinstance(value, str) else value\n if isinstance(result, bytes):\n return result\n else:\n raise exceptions.InvalidValue(\n \"{0!r} could not be converted to bytes\".format(value)\n )\n\n\ndef from_bytes(value):\n \"\"\"Converts bytes to a string value, if necessary.\n\n Args:\n value (Union[str, bytes]): The value to be converted.\n\n Returns:\n str: The original value converted to unicode (if bytes) or as passed in\n if it started out as unicode.\n\n Raises:\n google.auth.exceptions.InvalidValue: If the value could not be converted to unicode.\n \"\"\"\n result = value.decode(\"utf-8\") if isinstance(value, bytes) else value\n if isinstance(result, str):\n return result\n else:\n raise exceptions.InvalidValue(\n \"{0!r} could not be converted to unicode\".format(value)\n )\n\n\ndef update_query(url, params, remove=None):\n \"\"\"Updates a URL's query parameters.\n\n Replaces any current values if they are already present in the URL.\n\n Args:\n url (str): The URL to update.\n params (Mapping[str, str]): A mapping of query parameter\n keys to values.\n remove (Sequence[str]): Parameters to remove from the query string.\n\n Returns:\n str: The URL with updated query parameters.\n\n Examples:\n\n >>> url = 'http://example.com?a=1'\n >>> update_query(url, {'a': '2'})\n http://example.com?a=2\n >>> update_query(url, {'b': '3'})\n http://example.com?a=1&b=3\n >> update_query(url, {'b': '3'}, remove=['a'])\n http://example.com?b=3\n\n \"\"\"\n if remove is None:\n remove = []\n\n # Split the URL into parts.\n parts = urllib.parse.urlparse(url)\n # Parse the query string.\n query_params = urllib.parse.parse_qs(parts.query)\n # Update the query parameters with the new parameters.\n query_params.update(params)\n # Remove any values specified in remove.\n query_params = {\n key: value for key, value in query_params.items() if key not in remove\n }\n # Re-encoded the query string.\n new_query = urllib.parse.urlencode(query_params, doseq=True)\n # Unsplit the url.\n new_parts = parts._replace(query=new_query)\n return urllib.parse.urlunparse(new_parts)\n\n\ndef scopes_to_string(scopes):\n \"\"\"Converts scope value to a string suitable for sending to OAuth 2.0\n authorization servers.\n\n Args:\n scopes (Sequence[str]): The sequence of scopes to convert.\n\n Returns:\n str: The scopes formatted as a single string.\n \"\"\"\n return \" \".join(scopes)\n\n\ndef string_to_scopes(scopes):\n \"\"\"Converts stringifed scopes value to a list.\n\n Args:\n scopes (Union[Sequence, str]): The string of space-separated scopes\n to convert.\n Returns:\n Sequence(str): The separated scopes.\n \"\"\"\n if not scopes:\n return []\n\n return scopes.split(\" \")\n\n\ndef padded_urlsafe_b64decode(value):\n \"\"\"Decodes base64 strings lacking padding characters.\n\n Google infrastructure tends to omit the base64 padding characters.\n\n Args:\n value (Union[str, bytes]): The encoded value.\n\n Returns:\n bytes: The decoded value\n \"\"\"\n b64string = to_bytes(value)\n padded = b64string + b\"=\" * (-len(b64string) % 4)\n return base64.urlsafe_b64decode(padded)\n\n\ndef unpadded_urlsafe_b64encode(value):\n \"\"\"Encodes base64 strings removing any padding characters.\n\n `rfc 7515`_ defines Base64url to NOT include any padding\n characters, but the stdlib doesn't do that by default.\n\n _rfc7515: https://tools.ietf.org/html/rfc7515#page-6\n\n Args:\n value (Union[str|bytes]): The bytes-like value to encode\n\n Returns:\n Union[str|bytes]: The encoded value\n \"\"\"\n return base64.urlsafe_b64encode(value).rstrip(b\"=\")\n\n\ndef is_python_3():\n \"\"\"Check if the Python interpreter is Python 2 or 3.\n\n Returns:\n bool: True if the Python interpreter is Python 3 and False otherwise.\n \"\"\"\n return sys.version_info > (3, 0)\n", "path": "google/auth/_helpers.py"}]} | 2,966 | 189 |
gh_patches_debug_15574 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-872 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Verbose output not shown unless -s is specified
I am running a test suite with hypothesis using py.test, when setting HYPOTHESIS_VERBOSITY_LEVEL=verbose environment variable I expected to see the intermediate results. However I need to specify -s when invokin py.test otherwise the intermediate results are suppressed.
Python 3.6.0a1
py.test 2.9.2
hypothesis 3.4.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2017 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 # -*- coding: utf-8 -*-
19
20 from __future__ import division, print_function, absolute_import
21
22 # on_rtd is whether we are on readthedocs.org
23 import os
24 import sys
25 import datetime
26
27 from hypothesis import __version__
28
29 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
30
31 sys.path.append(
32 os.path.join(os.path.dirname(__file__), '..', 'src')
33 )
34
35
36 autodoc_member_order = 'bysource'
37
38 extensions = [
39 'sphinx.ext.autodoc',
40 'sphinx.ext.doctest',
41 'sphinx.ext.extlinks',
42 'sphinx.ext.viewcode',
43 'sphinx.ext.intersphinx',
44 ]
45
46 templates_path = ['_templates']
47
48 source_suffix = '.rst'
49
50 # The master toctree document.
51 master_doc = 'index'
52
53 # General information about the project.
54 project = u'Hypothesis'
55 copyright = u'2013-%s, David R. MacIver' % datetime.datetime.utcnow().year
56 author = u'David R. MacIver'
57
58 version = __version__
59 release = __version__
60
61 language = None
62
63 exclude_patterns = ['_build']
64
65 pygments_style = 'sphinx'
66
67 todo_include_todos = False
68
69 intersphinx_mapping = {
70 'python': ('https://docs.python.org/3/', None),
71 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
72 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None)
73 }
74
75 autodoc_mock_imports = ['numpy', 'pandas']
76
77 doctest_global_setup = '''
78 # Some standard imports
79 from hypothesis import *
80 from hypothesis.strategies import *
81 # Ensure that output (including from strategies) is deterministic
82 import random
83 random.seed(0)
84 # don't save examples
85 settings.register_profile('doctests', settings(database=None))
86 settings.load_profile('doctests')
87 import warnings
88 warnings.filterwarnings('error', category=HypothesisDeprecationWarning)
89 '''
90
91 # This config value must be a dictionary of external sites, mapping unique
92 # short alias names to a base URL and a prefix.
93 # See http://sphinx-doc.org/ext/extlinks.html
94 extlinks = {
95 'commit': ('https://github.com/HypothesisWorks/hypothesis-python/commit/%s', 'commit '),
96 'gh-file': ('https://github.com/HypothesisWorks/hypothesis-python/blob/master/%s', ''),
97 'gh-link': ('https://github.com/HypothesisWorks/hypothesis-python/%s', ''),
98 'issue': ('https://github.com/HypothesisWorks/hypothesis-python/issues/%s', 'issue #'),
99 'pull': ('https://github.com/HypothesisWorks/hypothesis-python/pulls/%s', 'pull request #'),
100 }
101
102 # -- Options for HTML output ----------------------------------------------
103
104 if not on_rtd: # only import and set the theme if we're building docs locally
105 import sphinx_rtd_theme
106 html_theme = 'sphinx_rtd_theme'
107 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
108
109 html_static_path = ['_static']
110
111 htmlhelp_basename = 'Hypothesisdoc'
112
113 # -- Options for LaTeX output ---------------------------------------------
114
115 latex_elements = {
116 }
117
118 latex_documents = [
119 (master_doc, 'Hypothesis.tex', u'Hypothesis Documentation',
120 u'David R. MacIver', 'manual'),
121 ]
122
123 man_pages = [
124 (master_doc, 'hypothesis', u'Hypothesis Documentation',
125 [author], 1)
126 ]
127
128 texinfo_documents = [
129 (master_doc, 'Hypothesis', u'Hypothesis Documentation',
130 author, 'Hypothesis', 'One line description of project.',
131 'Miscellaneous'),
132 ]
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -69,7 +69,8 @@
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
- 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None)
+ 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
+ 'pytest': ('https://docs.pytest.org/en/stable/', None),
}
autodoc_mock_imports = ['numpy', 'pandas']
@@ -127,6 +128,6 @@
texinfo_documents = [
(master_doc, 'Hypothesis', u'Hypothesis Documentation',
- author, 'Hypothesis', 'One line description of project.',
+ author, 'Hypothesis', 'Advanced property-based testing for Python.',
'Miscellaneous'),
]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -69,7 +69,8 @@\n intersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n- 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None)\n+ 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),\n+ 'pytest': ('https://docs.pytest.org/en/stable/', None),\n }\n \n autodoc_mock_imports = ['numpy', 'pandas']\n@@ -127,6 +128,6 @@\n \n texinfo_documents = [\n (master_doc, 'Hypothesis', u'Hypothesis Documentation',\n- author, 'Hypothesis', 'One line description of project.',\n+ author, 'Hypothesis', 'Advanced property-based testing for Python.',\n 'Miscellaneous'),\n ]\n", "issue": "Verbose output not shown unless -s is specified\nI am running a test suite with hypothesis using py.test, when setting HYPOTHESIS_VERBOSITY_LEVEL=verbose environment variable I expected to see the intermediate results. However I need to specify -s when invokin py.test otherwise the intermediate results are suppressed.\n\nPython 3.6.0a1\npy.test 2.9.2\nhypothesis 3.4.2\n\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\n\n# on_rtd is whether we are on readthedocs.org\nimport os\nimport sys\nimport datetime\n\nfrom hypothesis import __version__\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nsys.path.append(\n os.path.join(os.path.dirname(__file__), '..', 'src')\n)\n\n\nautodoc_member_order = 'bysource'\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx',\n]\n\ntemplates_path = ['_templates']\n\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Hypothesis'\ncopyright = u'2013-%s, David R. MacIver' % datetime.datetime.utcnow().year\nauthor = u'David R. MacIver'\n\nversion = __version__\nrelease = __version__\n\nlanguage = None\n\nexclude_patterns = ['_build']\n\npygments_style = 'sphinx'\n\ntodo_include_todos = False\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None)\n}\n\nautodoc_mock_imports = ['numpy', 'pandas']\n\ndoctest_global_setup = '''\n# Some standard imports\nfrom hypothesis import *\nfrom hypothesis.strategies import *\n# Ensure that output (including from strategies) is deterministic\nimport random\nrandom.seed(0)\n# don't save examples\nsettings.register_profile('doctests', settings(database=None))\nsettings.load_profile('doctests')\nimport warnings\nwarnings.filterwarnings('error', category=HypothesisDeprecationWarning)\n'''\n\n# This config value must be a dictionary of external sites, mapping unique\n# short alias names to a base URL and a prefix.\n# See http://sphinx-doc.org/ext/extlinks.html\nextlinks = {\n 'commit': ('https://github.com/HypothesisWorks/hypothesis-python/commit/%s', 'commit '),\n 'gh-file': ('https://github.com/HypothesisWorks/hypothesis-python/blob/master/%s', ''),\n 'gh-link': ('https://github.com/HypothesisWorks/hypothesis-python/%s', ''),\n 'issue': ('https://github.com/HypothesisWorks/hypothesis-python/issues/%s', 'issue #'),\n 'pull': ('https://github.com/HypothesisWorks/hypothesis-python/pulls/%s', 'pull request #'),\n}\n\n# -- Options for HTML output ----------------------------------------------\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nhtml_static_path = ['_static']\n\nhtmlhelp_basename = 'Hypothesisdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n}\n\nlatex_documents = [\n (master_doc, 'Hypothesis.tex', u'Hypothesis Documentation',\n u'David R. MacIver', 'manual'),\n]\n\nman_pages = [\n (master_doc, 'hypothesis', u'Hypothesis Documentation',\n [author], 1)\n]\n\ntexinfo_documents = [\n (master_doc, 'Hypothesis', u'Hypothesis Documentation',\n author, 'Hypothesis', 'One line description of project.',\n 'Miscellaneous'),\n]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2017 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\n\n# on_rtd is whether we are on readthedocs.org\nimport os\nimport sys\nimport datetime\n\nfrom hypothesis import __version__\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nsys.path.append(\n os.path.join(os.path.dirname(__file__), '..', 'src')\n)\n\n\nautodoc_member_order = 'bysource'\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx',\n]\n\ntemplates_path = ['_templates']\n\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Hypothesis'\ncopyright = u'2013-%s, David R. MacIver' % datetime.datetime.utcnow().year\nauthor = u'David R. MacIver'\n\nversion = __version__\nrelease = __version__\n\nlanguage = None\n\nexclude_patterns = ['_build']\n\npygments_style = 'sphinx'\n\ntodo_include_todos = False\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),\n 'pytest': ('https://docs.pytest.org/en/stable/', None),\n}\n\nautodoc_mock_imports = ['numpy', 'pandas']\n\ndoctest_global_setup = '''\n# Some standard imports\nfrom hypothesis import *\nfrom hypothesis.strategies import *\n# Ensure that output (including from strategies) is deterministic\nimport random\nrandom.seed(0)\n# don't save examples\nsettings.register_profile('doctests', settings(database=None))\nsettings.load_profile('doctests')\nimport warnings\nwarnings.filterwarnings('error', category=HypothesisDeprecationWarning)\n'''\n\n# This config value must be a dictionary of external sites, mapping unique\n# short alias names to a base URL and a prefix.\n# See http://sphinx-doc.org/ext/extlinks.html\nextlinks = {\n 'commit': ('https://github.com/HypothesisWorks/hypothesis-python/commit/%s', 'commit '),\n 'gh-file': ('https://github.com/HypothesisWorks/hypothesis-python/blob/master/%s', ''),\n 'gh-link': ('https://github.com/HypothesisWorks/hypothesis-python/%s', ''),\n 'issue': ('https://github.com/HypothesisWorks/hypothesis-python/issues/%s', 'issue #'),\n 'pull': ('https://github.com/HypothesisWorks/hypothesis-python/pulls/%s', 'pull request #'),\n}\n\n# -- Options for HTML output ----------------------------------------------\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nhtml_static_path = ['_static']\n\nhtmlhelp_basename = 'Hypothesisdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n}\n\nlatex_documents = [\n (master_doc, 'Hypothesis.tex', u'Hypothesis Documentation',\n u'David R. MacIver', 'manual'),\n]\n\nman_pages = [\n (master_doc, 'hypothesis', u'Hypothesis Documentation',\n [author], 1)\n]\n\ntexinfo_documents = [\n (master_doc, 'Hypothesis', u'Hypothesis Documentation',\n author, 'Hypothesis', 'Advanced property-based testing for Python.',\n 'Miscellaneous'),\n]\n", "path": "docs/conf.py"}]} | 1,641 | 228 |
gh_patches_debug_11949 | rasdani/github-patches | git_diff | cupy__cupy-2923 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows: module 'numpy' has no attribute 'complex256'
In file: [cupy/cupyx/scipy/ndimage/filters.py] (https://github.com/cupy/cupy/blob/master/cupyx/scipy/ndimage/filters.py)
Line 83: ` if input.dtype in (numpy.complex64, numpy.complex128, numpy.complex256):`
There is a check for numpy.complex256. On Windows there is no numpy.complex256, so this line leads to the following error:
module 'numpy' has no attribute 'complex256'
a simple solution could be to use the function [numpy.iscomplexobj
](https://docs.scipy.org/doc/numpy/reference/generated/numpy.iscomplexobj.html#numpy.iscomplexobj) or (maybe faster) to stringify the dtype of the array and check for substring "complex"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupyx/scipy/ndimage/filters.py`
Content:
```
1 import numpy
2
3 import cupy
4 from cupy import util
5
6
7 def correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0):
8 """Multi-dimensional correlate.
9
10 The array is correlated with the given kernel.
11
12 Args:
13 input (cupy.ndarray): The input array.
14 weights (cupy.ndarray): Array of weights, same number of dimensions as
15 input
16 output (cupy.ndarray, dtype or None): The array in which to place the
17 output.
18 mode (str): The array borders are handled according to the given mode
19 (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
20 ``'wrap'``). Default is ``'reflect'``.
21 cval (scalar): Value to fill past edges of input if mode is
22 ``constant``. Default is ``0.0``.
23 origin (scalar or tuple of scalar): The origin parameter controls the
24 placement of the filter, relative to the center of the current
25 element of the input. Default of 0 is equivalent to
26 ``(0,)*input.ndim``.
27
28 Returns:
29 cupy.ndarray: The result of correlate.
30
31 .. seealso:: :func:`scipy.ndimage.correlate`
32 """
33 return _correlate_or_convolve(input, weights, output, mode, cval, origin,
34 False)
35
36
37 def convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0):
38 """Multi-dimensional convolution.
39
40 The array is convolved with the given kernel.
41
42 Args:
43 input (cupy.ndarray): The input array.
44 weights (cupy.ndarray): Array of weights, same number of dimensions as
45 input
46 output (cupy.ndarray, dtype or None): The array in which to place the
47 output.
48 mode (str): The array borders are handled according to the given mode
49 (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
50 ``'wrap'``). Default is ``'reflect'``.
51 cval (scalar): Value to fill past edges of input if mode is
52 ``constant``. Default is ``0.0``.
53 origin (scalar or tuple of scalar): The origin parameter controls the
54 placement of the filter, relative to the center of the current
55 element of the input. Default of 0 is equivalent to
56 ``(0,)*input.ndim``.
57
58 Returns:
59 cupy.ndarray: The result of convolution.
60
61 .. seealso:: :func:`scipy.ndimage.convolve`
62 """
63 return _correlate_or_convolve(input, weights, output, mode, cval, origin,
64 True)
65
66
67 def _get_output(output, input, shape=None):
68 if shape is None:
69 shape = input.shape
70 if isinstance(output, cupy.ndarray):
71 if output.shape != tuple(shape):
72 raise ValueError('output shape is not correct')
73 else:
74 dtype = output
75 if dtype is None:
76 dtype = input.dtype
77 output = cupy.zeros(shape, dtype)
78 return output
79
80
81 def _correlate_or_convolve(input, weights, output, mode, cval, origin,
82 convolution):
83 if input.dtype in (numpy.complex64, numpy.complex128, numpy.complex256):
84 raise TypeError('Complex type not supported.')
85 if not hasattr(origin, '__getitem__'):
86 origin = [origin, ] * input.ndim
87 else:
88 origin = list(origin)
89 wshape = [ii for ii in weights.shape if ii > 0]
90 if len(wshape) != input.ndim:
91 raise RuntimeError('filter weights array has incorrect shape.')
92 if convolution:
93 weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
94 for ii in range(len(origin)):
95 origin[ii] = -origin[ii]
96 if weights.shape[ii] % 2 == 0:
97 origin[ii] -= 1
98 for _origin, lenw in zip(origin, wshape):
99 if (lenw // 2 + _origin < 0) or (lenw // 2 + _origin >= lenw):
100 raise ValueError('invalid origin')
101 if mode not in ('reflect', 'constant', 'nearest', 'mirror', 'wrap'):
102 msg = 'boundary mode not supported (actual: {}).'.format(mode)
103 raise RuntimeError(msg)
104
105 output = _get_output(output, input)
106 if weights.size == 0:
107 return output
108 input = cupy.ascontiguousarray(input)
109 weights = cupy.ascontiguousarray(weights, cupy.float64)
110 return _get_correlete_kernel(
111 input.ndim, mode, cval, input.shape, tuple(wshape), tuple(origin))(
112 input, weights, output)
113
114
115 def _generate_boundary_condition_ops(mode, ix, xsize):
116 if mode == 'reflect':
117 ops = '''
118 if ({ix} < 0) {{
119 {ix} = - 1 - {ix};
120 }}
121 {ix} %= {xsize} * 2;
122 {ix} = min({ix}, 2 * {xsize} - 1 - {ix});'''.format(ix=ix, xsize=xsize)
123 elif mode == 'mirror':
124 ops = '''
125 if ({ix} < 0) {{
126 {ix} = - {ix};
127 }}
128 if ({xsize} == 1) {{
129 {ix} = 0;
130 }} else {{
131 {ix} = 1 + ({ix} - 1) % (({xsize} - 1) * 2);
132 {ix} = min({ix}, 2 * {xsize} - 2 - {ix});
133 }}'''.format(ix=ix, xsize=xsize)
134 elif mode == 'nearest':
135 ops = '''
136 {ix} = min(max({ix}, 0), {xsize} - 1);'''.format(ix=ix, xsize=xsize)
137 elif mode == 'wrap':
138 ops = '''
139 if ({ix} < 0) {{
140 {ix} += (1 - ({ix} / {xsize})) * {xsize};
141 }}
142 {ix} %= {xsize};'''.format(ix=ix, xsize=xsize)
143 elif mode == 'constant':
144 ops = '''
145 if ({ix} >= {xsize}) {{
146 {ix} = -1;
147 }}'''.format(ix=ix, xsize=xsize)
148 return ops
149
150
151 def _generate_correlete_kernel(ndim, mode, cval, xshape, wshape, origin):
152 in_params = 'raw X x, raw W w'
153 out_params = 'Y y'
154
155 ops = []
156 ops.append('const int sx_{} = 1;'.format(ndim-1))
157 for j in range(ndim-1, 0, -1):
158 ops.append('int sx_{jm} = sx_{j} * {xsize_j};'.
159 format(jm=j-1, j=j, xsize_j=xshape[j]))
160 ops.append('int _i = i;')
161 for j in range(ndim-1, -1, -1):
162 ops.append('int cx_{j} = _i % {xsize} - ({wsize} / 2) - ({origin});'
163 .format(j=j, xsize=xshape[j], wsize=wshape[j],
164 origin=origin[j]))
165 if (j > 0):
166 ops.append('_i /= {xsize};'.format(xsize=xshape[j]))
167 ops.append('W sum = (W)0;')
168 ops.append('int iw = 0;')
169
170 for j in range(ndim):
171 ops.append('''
172 for (int iw_{j} = 0; iw_{j} < {wsize}; iw_{j}++)
173 {{
174 int ix_{j} = cx_{j} + iw_{j};'''.format(j=j, wsize=wshape[j]))
175 ixvar = 'ix_{}'.format(j)
176 ops.append(_generate_boundary_condition_ops(mode, ixvar, xshape[j]))
177 ops.append(' ix_{j} *= sx_{j};'.format(j=j))
178
179 _cond = ' || '.join(['(ix_{0} < 0)'.format(j) for j in range(ndim)])
180 _expr = ' + '.join(['ix_{0}'.format(j) for j in range(ndim)])
181 ops.append('''
182 if ({cond}) {{
183 sum += (W){cval} * w[iw];
184 }} else {{
185 int ix = {expr};
186 sum += (W)x[ix] * w[iw];
187 }}
188 iw += 1;'''.format(cond=_cond, expr=_expr, cval=cval))
189
190 ops.append('} ' * ndim)
191 ops.append('y = (Y)sum;')
192 operation = '\n'.join(ops)
193
194 name = 'cupy_ndimage_correlate_{}d_{}_x{}_w{}'.format(
195 ndim, mode, '_'.join(['{}'.format(j) for j in xshape]),
196 '_'.join(['{}'.format(j) for j in wshape]))
197 return in_params, out_params, operation, name
198
199
200 @util.memoize()
201 def _get_correlete_kernel(ndim, mode, cval, xshape, wshape, origin):
202 # weights is always casted to float64 in order to get an output compatible
203 # with SciPy, thought float32 might be sufficient when input dtype is low
204 # precision.
205 in_params, out_params, operation, name = _generate_correlete_kernel(
206 ndim, mode, cval, xshape, wshape, origin)
207 return cupy.ElementwiseKernel(in_params, out_params, operation, name)
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupyx/scipy/ndimage/filters.py b/cupyx/scipy/ndimage/filters.py
--- a/cupyx/scipy/ndimage/filters.py
+++ b/cupyx/scipy/ndimage/filters.py
@@ -1,5 +1,3 @@
-import numpy
-
import cupy
from cupy import util
@@ -80,7 +78,7 @@
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
- if input.dtype in (numpy.complex64, numpy.complex128, numpy.complex256):
+ if input.dtype.kind == 'c':
raise TypeError('Complex type not supported.')
if not hasattr(origin, '__getitem__'):
origin = [origin, ] * input.ndim
| {"golden_diff": "diff --git a/cupyx/scipy/ndimage/filters.py b/cupyx/scipy/ndimage/filters.py\n--- a/cupyx/scipy/ndimage/filters.py\n+++ b/cupyx/scipy/ndimage/filters.py\n@@ -1,5 +1,3 @@\n-import numpy\n-\n import cupy\n from cupy import util\n \n@@ -80,7 +78,7 @@\n \n def _correlate_or_convolve(input, weights, output, mode, cval, origin,\n convolution):\n- if input.dtype in (numpy.complex64, numpy.complex128, numpy.complex256):\n+ if input.dtype.kind == 'c':\n raise TypeError('Complex type not supported.')\n if not hasattr(origin, '__getitem__'):\n origin = [origin, ] * input.ndim\n", "issue": "Windows: module 'numpy' has no attribute 'complex256'\nIn file: [cupy/cupyx/scipy/ndimage/filters.py] (https://github.com/cupy/cupy/blob/master/cupyx/scipy/ndimage/filters.py)\r\n\r\nLine 83: ` if input.dtype in (numpy.complex64, numpy.complex128, numpy.complex256):`\r\n\r\nThere is a check for numpy.complex256. On Windows there is no numpy.complex256, so this line leads to the following error:\r\n\r\nmodule 'numpy' has no attribute 'complex256'\r\n\r\na simple solution could be to use the function [numpy.iscomplexobj\r\n](https://docs.scipy.org/doc/numpy/reference/generated/numpy.iscomplexobj.html#numpy.iscomplexobj) or (maybe faster) to stringify the dtype of the array and check for substring \"complex\"\n", "before_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy import util\n\n\ndef correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0):\n \"\"\"Multi-dimensional correlate.\n\n The array is correlated with the given kernel.\n\n Args:\n input (cupy.ndarray): The input array.\n weights (cupy.ndarray): Array of weights, same number of dimensions as\n input\n output (cupy.ndarray, dtype or None): The array in which to place the\n output.\n mode (str): The array borders are handled according to the given mode\n (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,\n ``'wrap'``). Default is ``'reflect'``.\n cval (scalar): Value to fill past edges of input if mode is\n ``constant``. Default is ``0.0``.\n origin (scalar or tuple of scalar): The origin parameter controls the\n placement of the filter, relative to the center of the current\n element of the input. Default of 0 is equivalent to\n ``(0,)*input.ndim``.\n\n Returns:\n cupy.ndarray: The result of correlate.\n\n .. seealso:: :func:`scipy.ndimage.correlate`\n \"\"\"\n return _correlate_or_convolve(input, weights, output, mode, cval, origin,\n False)\n\n\ndef convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0):\n \"\"\"Multi-dimensional convolution.\n\n The array is convolved with the given kernel.\n\n Args:\n input (cupy.ndarray): The input array.\n weights (cupy.ndarray): Array of weights, same number of dimensions as\n input\n output (cupy.ndarray, dtype or None): The array in which to place the\n output.\n mode (str): The array borders are handled according to the given mode\n (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,\n ``'wrap'``). Default is ``'reflect'``.\n cval (scalar): Value to fill past edges of input if mode is\n ``constant``. Default is ``0.0``.\n origin (scalar or tuple of scalar): The origin parameter controls the\n placement of the filter, relative to the center of the current\n element of the input. Default of 0 is equivalent to\n ``(0,)*input.ndim``.\n\n Returns:\n cupy.ndarray: The result of convolution.\n\n .. seealso:: :func:`scipy.ndimage.convolve`\n \"\"\"\n return _correlate_or_convolve(input, weights, output, mode, cval, origin,\n True)\n\n\ndef _get_output(output, input, shape=None):\n if shape is None:\n shape = input.shape\n if isinstance(output, cupy.ndarray):\n if output.shape != tuple(shape):\n raise ValueError('output shape is not correct')\n else:\n dtype = output\n if dtype is None:\n dtype = input.dtype\n output = cupy.zeros(shape, dtype)\n return output\n\n\ndef _correlate_or_convolve(input, weights, output, mode, cval, origin,\n convolution):\n if input.dtype in (numpy.complex64, numpy.complex128, numpy.complex256):\n raise TypeError('Complex type not supported.')\n if not hasattr(origin, '__getitem__'):\n origin = [origin, ] * input.ndim\n else:\n origin = list(origin)\n wshape = [ii for ii in weights.shape if ii > 0]\n if len(wshape) != input.ndim:\n raise RuntimeError('filter weights array has incorrect shape.')\n if convolution:\n weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]\n for ii in range(len(origin)):\n origin[ii] = -origin[ii]\n if weights.shape[ii] % 2 == 0:\n origin[ii] -= 1\n for _origin, lenw in zip(origin, wshape):\n if (lenw // 2 + _origin < 0) or (lenw // 2 + _origin >= lenw):\n raise ValueError('invalid origin')\n if mode not in ('reflect', 'constant', 'nearest', 'mirror', 'wrap'):\n msg = 'boundary mode not supported (actual: {}).'.format(mode)\n raise RuntimeError(msg)\n\n output = _get_output(output, input)\n if weights.size == 0:\n return output\n input = cupy.ascontiguousarray(input)\n weights = cupy.ascontiguousarray(weights, cupy.float64)\n return _get_correlete_kernel(\n input.ndim, mode, cval, input.shape, tuple(wshape), tuple(origin))(\n input, weights, output)\n\n\ndef _generate_boundary_condition_ops(mode, ix, xsize):\n if mode == 'reflect':\n ops = '''\n if ({ix} < 0) {{\n {ix} = - 1 - {ix};\n }}\n {ix} %= {xsize} * 2;\n {ix} = min({ix}, 2 * {xsize} - 1 - {ix});'''.format(ix=ix, xsize=xsize)\n elif mode == 'mirror':\n ops = '''\n if ({ix} < 0) {{\n {ix} = - {ix};\n }}\n if ({xsize} == 1) {{\n {ix} = 0;\n }} else {{\n {ix} = 1 + ({ix} - 1) % (({xsize} - 1) * 2);\n {ix} = min({ix}, 2 * {xsize} - 2 - {ix});\n }}'''.format(ix=ix, xsize=xsize)\n elif mode == 'nearest':\n ops = '''\n {ix} = min(max({ix}, 0), {xsize} - 1);'''.format(ix=ix, xsize=xsize)\n elif mode == 'wrap':\n ops = '''\n if ({ix} < 0) {{\n {ix} += (1 - ({ix} / {xsize})) * {xsize};\n }}\n {ix} %= {xsize};'''.format(ix=ix, xsize=xsize)\n elif mode == 'constant':\n ops = '''\n if ({ix} >= {xsize}) {{\n {ix} = -1;\n }}'''.format(ix=ix, xsize=xsize)\n return ops\n\n\ndef _generate_correlete_kernel(ndim, mode, cval, xshape, wshape, origin):\n in_params = 'raw X x, raw W w'\n out_params = 'Y y'\n\n ops = []\n ops.append('const int sx_{} = 1;'.format(ndim-1))\n for j in range(ndim-1, 0, -1):\n ops.append('int sx_{jm} = sx_{j} * {xsize_j};'.\n format(jm=j-1, j=j, xsize_j=xshape[j]))\n ops.append('int _i = i;')\n for j in range(ndim-1, -1, -1):\n ops.append('int cx_{j} = _i % {xsize} - ({wsize} / 2) - ({origin});'\n .format(j=j, xsize=xshape[j], wsize=wshape[j],\n origin=origin[j]))\n if (j > 0):\n ops.append('_i /= {xsize};'.format(xsize=xshape[j]))\n ops.append('W sum = (W)0;')\n ops.append('int iw = 0;')\n\n for j in range(ndim):\n ops.append('''\n for (int iw_{j} = 0; iw_{j} < {wsize}; iw_{j}++)\n {{\n int ix_{j} = cx_{j} + iw_{j};'''.format(j=j, wsize=wshape[j]))\n ixvar = 'ix_{}'.format(j)\n ops.append(_generate_boundary_condition_ops(mode, ixvar, xshape[j]))\n ops.append(' ix_{j} *= sx_{j};'.format(j=j))\n\n _cond = ' || '.join(['(ix_{0} < 0)'.format(j) for j in range(ndim)])\n _expr = ' + '.join(['ix_{0}'.format(j) for j in range(ndim)])\n ops.append('''\n if ({cond}) {{\n sum += (W){cval} * w[iw];\n }} else {{\n int ix = {expr};\n sum += (W)x[ix] * w[iw];\n }}\n iw += 1;'''.format(cond=_cond, expr=_expr, cval=cval))\n\n ops.append('} ' * ndim)\n ops.append('y = (Y)sum;')\n operation = '\\n'.join(ops)\n\n name = 'cupy_ndimage_correlate_{}d_{}_x{}_w{}'.format(\n ndim, mode, '_'.join(['{}'.format(j) for j in xshape]),\n '_'.join(['{}'.format(j) for j in wshape]))\n return in_params, out_params, operation, name\n\n\[email protected]()\ndef _get_correlete_kernel(ndim, mode, cval, xshape, wshape, origin):\n # weights is always casted to float64 in order to get an output compatible\n # with SciPy, thought float32 might be sufficient when input dtype is low\n # precision.\n in_params, out_params, operation, name = _generate_correlete_kernel(\n ndim, mode, cval, xshape, wshape, origin)\n return cupy.ElementwiseKernel(in_params, out_params, operation, name)\n", "path": "cupyx/scipy/ndimage/filters.py"}], "after_files": [{"content": "import cupy\nfrom cupy import util\n\n\ndef correlate(input, weights, output=None, mode='reflect', cval=0.0, origin=0):\n \"\"\"Multi-dimensional correlate.\n\n The array is correlated with the given kernel.\n\n Args:\n input (cupy.ndarray): The input array.\n weights (cupy.ndarray): Array of weights, same number of dimensions as\n input\n output (cupy.ndarray, dtype or None): The array in which to place the\n output.\n mode (str): The array borders are handled according to the given mode\n (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,\n ``'wrap'``). Default is ``'reflect'``.\n cval (scalar): Value to fill past edges of input if mode is\n ``constant``. Default is ``0.0``.\n origin (scalar or tuple of scalar): The origin parameter controls the\n placement of the filter, relative to the center of the current\n element of the input. Default of 0 is equivalent to\n ``(0,)*input.ndim``.\n\n Returns:\n cupy.ndarray: The result of correlate.\n\n .. seealso:: :func:`scipy.ndimage.correlate`\n \"\"\"\n return _correlate_or_convolve(input, weights, output, mode, cval, origin,\n False)\n\n\ndef convolve(input, weights, output=None, mode='reflect', cval=0.0, origin=0):\n \"\"\"Multi-dimensional convolution.\n\n The array is convolved with the given kernel.\n\n Args:\n input (cupy.ndarray): The input array.\n weights (cupy.ndarray): Array of weights, same number of dimensions as\n input\n output (cupy.ndarray, dtype or None): The array in which to place the\n output.\n mode (str): The array borders are handled according to the given mode\n (``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,\n ``'wrap'``). Default is ``'reflect'``.\n cval (scalar): Value to fill past edges of input if mode is\n ``constant``. Default is ``0.0``.\n origin (scalar or tuple of scalar): The origin parameter controls the\n placement of the filter, relative to the center of the current\n element of the input. Default of 0 is equivalent to\n ``(0,)*input.ndim``.\n\n Returns:\n cupy.ndarray: The result of convolution.\n\n .. seealso:: :func:`scipy.ndimage.convolve`\n \"\"\"\n return _correlate_or_convolve(input, weights, output, mode, cval, origin,\n True)\n\n\ndef _get_output(output, input, shape=None):\n if shape is None:\n shape = input.shape\n if isinstance(output, cupy.ndarray):\n if output.shape != tuple(shape):\n raise ValueError('output shape is not correct')\n else:\n dtype = output\n if dtype is None:\n dtype = input.dtype\n output = cupy.zeros(shape, dtype)\n return output\n\n\ndef _correlate_or_convolve(input, weights, output, mode, cval, origin,\n convolution):\n if input.dtype.kind == 'c':\n raise TypeError('Complex type not supported.')\n if not hasattr(origin, '__getitem__'):\n origin = [origin, ] * input.ndim\n else:\n origin = list(origin)\n wshape = [ii for ii in weights.shape if ii > 0]\n if len(wshape) != input.ndim:\n raise RuntimeError('filter weights array has incorrect shape.')\n if convolution:\n weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]\n for ii in range(len(origin)):\n origin[ii] = -origin[ii]\n if weights.shape[ii] % 2 == 0:\n origin[ii] -= 1\n for _origin, lenw in zip(origin, wshape):\n if (lenw // 2 + _origin < 0) or (lenw // 2 + _origin >= lenw):\n raise ValueError('invalid origin')\n if mode not in ('reflect', 'constant', 'nearest', 'mirror', 'wrap'):\n msg = 'boundary mode not supported (actual: {}).'.format(mode)\n raise RuntimeError(msg)\n\n output = _get_output(output, input)\n if weights.size == 0:\n return output\n input = cupy.ascontiguousarray(input)\n weights = cupy.ascontiguousarray(weights, cupy.float64)\n return _get_correlete_kernel(\n input.ndim, mode, cval, input.shape, tuple(wshape), tuple(origin))(\n input, weights, output)\n\n\ndef _generate_boundary_condition_ops(mode, ix, xsize):\n if mode == 'reflect':\n ops = '''\n if ({ix} < 0) {{\n {ix} = - 1 - {ix};\n }}\n {ix} %= {xsize} * 2;\n {ix} = min({ix}, 2 * {xsize} - 1 - {ix});'''.format(ix=ix, xsize=xsize)\n elif mode == 'mirror':\n ops = '''\n if ({ix} < 0) {{\n {ix} = - {ix};\n }}\n if ({xsize} == 1) {{\n {ix} = 0;\n }} else {{\n {ix} = 1 + ({ix} - 1) % (({xsize} - 1) * 2);\n {ix} = min({ix}, 2 * {xsize} - 2 - {ix});\n }}'''.format(ix=ix, xsize=xsize)\n elif mode == 'nearest':\n ops = '''\n {ix} = min(max({ix}, 0), {xsize} - 1);'''.format(ix=ix, xsize=xsize)\n elif mode == 'wrap':\n ops = '''\n if ({ix} < 0) {{\n {ix} += (1 - ({ix} / {xsize})) * {xsize};\n }}\n {ix} %= {xsize};'''.format(ix=ix, xsize=xsize)\n elif mode == 'constant':\n ops = '''\n if ({ix} >= {xsize}) {{\n {ix} = -1;\n }}'''.format(ix=ix, xsize=xsize)\n return ops\n\n\ndef _generate_correlete_kernel(ndim, mode, cval, xshape, wshape, origin):\n in_params = 'raw X x, raw W w'\n out_params = 'Y y'\n\n ops = []\n ops.append('const int sx_{} = 1;'.format(ndim-1))\n for j in range(ndim-1, 0, -1):\n ops.append('int sx_{jm} = sx_{j} * {xsize_j};'.\n format(jm=j-1, j=j, xsize_j=xshape[j]))\n ops.append('int _i = i;')\n for j in range(ndim-1, -1, -1):\n ops.append('int cx_{j} = _i % {xsize} - ({wsize} / 2) - ({origin});'\n .format(j=j, xsize=xshape[j], wsize=wshape[j],\n origin=origin[j]))\n if (j > 0):\n ops.append('_i /= {xsize};'.format(xsize=xshape[j]))\n ops.append('W sum = (W)0;')\n ops.append('int iw = 0;')\n\n for j in range(ndim):\n ops.append('''\n for (int iw_{j} = 0; iw_{j} < {wsize}; iw_{j}++)\n {{\n int ix_{j} = cx_{j} + iw_{j};'''.format(j=j, wsize=wshape[j]))\n ixvar = 'ix_{}'.format(j)\n ops.append(_generate_boundary_condition_ops(mode, ixvar, xshape[j]))\n ops.append(' ix_{j} *= sx_{j};'.format(j=j))\n\n _cond = ' || '.join(['(ix_{0} < 0)'.format(j) for j in range(ndim)])\n _expr = ' + '.join(['ix_{0}'.format(j) for j in range(ndim)])\n ops.append('''\n if ({cond}) {{\n sum += (W){cval} * w[iw];\n }} else {{\n int ix = {expr};\n sum += (W)x[ix] * w[iw];\n }}\n iw += 1;'''.format(cond=_cond, expr=_expr, cval=cval))\n\n ops.append('} ' * ndim)\n ops.append('y = (Y)sum;')\n operation = '\\n'.join(ops)\n\n name = 'cupy_ndimage_correlate_{}d_{}_x{}_w{}'.format(\n ndim, mode, '_'.join(['{}'.format(j) for j in xshape]),\n '_'.join(['{}'.format(j) for j in wshape]))\n return in_params, out_params, operation, name\n\n\[email protected]()\ndef _get_correlete_kernel(ndim, mode, cval, xshape, wshape, origin):\n # weights is always casted to float64 in order to get an output compatible\n # with SciPy, thought float32 might be sufficient when input dtype is low\n # precision.\n in_params, out_params, operation, name = _generate_correlete_kernel(\n ndim, mode, cval, xshape, wshape, origin)\n return cupy.ElementwiseKernel(in_params, out_params, operation, name)\n", "path": "cupyx/scipy/ndimage/filters.py"}]} | 3,135 | 184 |
gh_patches_debug_9373 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSeg-1788 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kappa系数大于1
版本:Paddle2.2,PaddleSeg版本:2.3.0。在评估模型时发现**Kappa系数大于1**。请问这是什么情况?
如下图所示:

配置文件如下:
```batch_size: 2
iters: 80000
model:
type: DeepLabV3P
backbone:
type: ResNet101_vd
output_stride: 8
multi_grid: [1, 2, 4]
pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz
num_classes: 2
backbone_indices: [0, 3]
aspp_ratios: [1, 12, 24, 36]
aspp_out_channels: 256
align_corners: False
pretrained: null
train_dataset:
type: Dataset
dataset_root: data/seg_data
train_path: data/seg_data/train.txt
num_classes: 2
transforms:
- type: ResizeStepScaling
min_scale_factor: 0.5
max_scale_factor: 2.0
scale_step_size: 0.25
- type: RandomPaddingCrop
crop_size: [512, 512]
- type: RandomHorizontalFlip
- type: RandomDistort
brightness_range: 0.4
contrast_range: 0.4
saturation_range: 0.4
- type: Normalize
mode: train
val_dataset:
type: Dataset
dataset_root: data/seg_data
val_path: data/seg_data/val.txt
num_classes: 2
transforms:
- type: Resize
target_size: [512, 512]
- type: Normalize
mode: val
optimizer:
type: sgd
momentum: 0.9
weight_decay: 4.0e-5
lr_scheduler:
type: PolynomialDecay
learning_rate: 0.01
end_lr: 0
power: 0.9
loss:
types:
- type: CrossEntropyLoss
coef: [1]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `paddleseg/utils/metrics.py`
Content:
```
1 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import numpy as np
16 import paddle
17 import paddle.nn.functional as F
18 import sklearn.metrics as skmetrics
19
20
21 def calculate_area(pred, label, num_classes, ignore_index=255):
22 """
23 Calculate intersect, prediction and label area
24
25 Args:
26 pred (Tensor): The prediction by model.
27 label (Tensor): The ground truth of image.
28 num_classes (int): The unique number of target classes.
29 ignore_index (int): Specifies a target value that is ignored. Default: 255.
30
31 Returns:
32 Tensor: The intersection area of prediction and the ground on all class.
33 Tensor: The prediction area on all class.
34 Tensor: The ground truth area on all class
35 """
36 if len(pred.shape) == 4:
37 pred = paddle.squeeze(pred, axis=1)
38 if len(label.shape) == 4:
39 label = paddle.squeeze(label, axis=1)
40 if not pred.shape == label.shape:
41 raise ValueError('Shape of `pred` and `label should be equal, '
42 'but there are {} and {}.'.format(
43 pred.shape, label.shape))
44 pred_area = []
45 label_area = []
46 intersect_area = []
47 mask = label != ignore_index
48
49 for i in range(num_classes):
50 pred_i = paddle.logical_and(pred == i, mask)
51 label_i = label == i
52 intersect_i = paddle.logical_and(pred_i, label_i)
53 pred_area.append(paddle.sum(paddle.cast(pred_i, "int32")))
54 label_area.append(paddle.sum(paddle.cast(label_i, "int32")))
55 intersect_area.append(paddle.sum(paddle.cast(intersect_i, "int32")))
56
57 pred_area = paddle.concat(pred_area)
58 label_area = paddle.concat(label_area)
59 intersect_area = paddle.concat(intersect_area)
60
61 return intersect_area, pred_area, label_area
62
63
64 def auc_roc(logits, label, num_classes, ignore_index=None):
65 """
66 Calculate area under the roc curve
67
68 Args:
69 logits (Tensor): The prediction by model on testset, of shape (N,C,H,W) .
70 label (Tensor): The ground truth of image. (N,1,H,W)
71 num_classes (int): The unique number of target classes.
72 ignore_index (int): Specifies a target value that is ignored. Default: 255.
73
74 Returns:
75 auc_roc(float): The area under roc curve
76 """
77 if ignore_index or len(np.unique(label)) > num_classes:
78 raise RuntimeError('labels with ignore_index is not supported yet.')
79
80 if len(label.shape) != 4:
81 raise ValueError(
82 'The shape of label is not 4 dimension as (N, C, H, W), it is {}'.
83 format(label.shape))
84
85 if len(logits.shape) != 4:
86 raise ValueError(
87 'The shape of logits is not 4 dimension as (N, C, H, W), it is {}'.
88 format(logits.shape))
89
90 N, C, H, W = logits.shape
91 logits = np.transpose(logits, (1, 0, 2, 3))
92 logits = logits.reshape([C, N * H * W]).transpose([1, 0])
93
94 label = np.transpose(label, (1, 0, 2, 3))
95 label = label.reshape([1, N * H * W]).squeeze()
96
97 if not logits.shape[0] == label.shape[0]:
98 raise ValueError('length of `logit` and `label` should be equal, '
99 'but they are {} and {}.'.format(
100 logits.shape[0], label.shape[0]))
101
102 if num_classes == 2:
103 auc = skmetrics.roc_auc_score(label, logits[:, 1])
104 else:
105 auc = skmetrics.roc_auc_score(label, logits, multi_class='ovr')
106
107 return auc
108
109
110 def mean_iou(intersect_area, pred_area, label_area):
111 """
112 Calculate iou.
113
114 Args:
115 intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.
116 pred_area (Tensor): The prediction area on all classes.
117 label_area (Tensor): The ground truth area on all classes.
118
119 Returns:
120 np.ndarray: iou on all classes.
121 float: mean iou of all classes.
122 """
123 intersect_area = intersect_area.numpy()
124 pred_area = pred_area.numpy()
125 label_area = label_area.numpy()
126 union = pred_area + label_area - intersect_area
127 class_iou = []
128 for i in range(len(intersect_area)):
129 if union[i] == 0:
130 iou = 0
131 else:
132 iou = intersect_area[i] / union[i]
133 class_iou.append(iou)
134 miou = np.mean(class_iou)
135 return np.array(class_iou), miou
136
137
138 def dice(intersect_area, pred_area, label_area):
139 """
140 Calculate DICE.
141
142 Args:
143 intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.
144 pred_area (Tensor): The prediction area on all classes.
145 label_area (Tensor): The ground truth area on all classes.
146
147 Returns:
148 np.ndarray: DICE on all classes.
149 float: mean DICE of all classes.
150 """
151 intersect_area = intersect_area.numpy()
152 pred_area = pred_area.numpy()
153 label_area = label_area.numpy()
154 union = pred_area + label_area
155 class_dice = []
156 for i in range(len(intersect_area)):
157 if union[i] == 0:
158 dice = 0
159 else:
160 dice = (2 * intersect_area[i]) / union[i]
161 class_dice.append(dice)
162 mdice = np.mean(class_dice)
163 return np.array(class_dice), mdice
164
165
166 def accuracy(intersect_area, pred_area):
167 """
168 Calculate accuracy
169
170 Args:
171 intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..
172 pred_area (Tensor): The prediction area on all classes.
173
174 Returns:
175 np.ndarray: accuracy on all classes.
176 float: mean accuracy.
177 """
178 intersect_area = intersect_area.numpy()
179 pred_area = pred_area.numpy()
180 class_acc = []
181 for i in range(len(intersect_area)):
182 if pred_area[i] == 0:
183 acc = 0
184 else:
185 acc = intersect_area[i] / pred_area[i]
186 class_acc.append(acc)
187 macc = np.sum(intersect_area) / np.sum(pred_area)
188 return np.array(class_acc), macc
189
190
191 def kappa(intersect_area, pred_area, label_area):
192 """
193 Calculate kappa coefficient
194
195 Args:
196 intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..
197 pred_area (Tensor): The prediction area on all classes.
198 label_area (Tensor): The ground truth area on all classes.
199
200 Returns:
201 float: kappa coefficient.
202 """
203 intersect_area = intersect_area.numpy()
204 pred_area = pred_area.numpy()
205 label_area = label_area.numpy()
206 total_area = np.sum(label_area)
207 po = np.sum(intersect_area) / total_area
208 pe = np.sum(pred_area * label_area) / (total_area * total_area)
209 kappa = (po - pe) / (1 - pe)
210 return kappa
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/paddleseg/utils/metrics.py b/paddleseg/utils/metrics.py
--- a/paddleseg/utils/metrics.py
+++ b/paddleseg/utils/metrics.py
@@ -200,9 +200,9 @@
Returns:
float: kappa coefficient.
"""
- intersect_area = intersect_area.numpy()
- pred_area = pred_area.numpy()
- label_area = label_area.numpy()
+ intersect_area = intersect_area.numpy().astype(np.float64)
+ pred_area = pred_area.numpy().astype(np.float64)
+ label_area = label_area.numpy().astype(np.float64)
total_area = np.sum(label_area)
po = np.sum(intersect_area) / total_area
pe = np.sum(pred_area * label_area) / (total_area * total_area)
| {"golden_diff": "diff --git a/paddleseg/utils/metrics.py b/paddleseg/utils/metrics.py\n--- a/paddleseg/utils/metrics.py\n+++ b/paddleseg/utils/metrics.py\n@@ -200,9 +200,9 @@\n Returns:\n float: kappa coefficient.\n \"\"\"\n- intersect_area = intersect_area.numpy()\n- pred_area = pred_area.numpy()\n- label_area = label_area.numpy()\n+ intersect_area = intersect_area.numpy().astype(np.float64)\n+ pred_area = pred_area.numpy().astype(np.float64)\n+ label_area = label_area.numpy().astype(np.float64)\n total_area = np.sum(label_area)\n po = np.sum(intersect_area) / total_area\n pe = np.sum(pred_area * label_area) / (total_area * total_area)\n", "issue": "Kappa\u7cfb\u6570\u5927\u4e8e1\n\u7248\u672c\uff1aPaddle2.2\uff0cPaddleSeg\u7248\u672c\uff1a2.3.0\u3002\u5728\u8bc4\u4f30\u6a21\u578b\u65f6\u53d1\u73b0**Kappa\u7cfb\u6570\u5927\u4e8e1**\u3002\u8bf7\u95ee\u8fd9\u662f\u4ec0\u4e48\u60c5\u51b5\uff1f\r\n\u5982\u4e0b\u56fe\u6240\u793a\uff1a\r\n\r\n\r\n\u914d\u7f6e\u6587\u4ef6\u5982\u4e0b\uff1a\r\n```batch_size: 2\r\niters: 80000\r\n\r\nmodel:\r\n type: DeepLabV3P\r\n backbone:\r\n type: ResNet101_vd\r\n output_stride: 8\r\n multi_grid: [1, 2, 4]\r\n pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz\r\n num_classes: 2\r\n backbone_indices: [0, 3]\r\n aspp_ratios: [1, 12, 24, 36]\r\n aspp_out_channels: 256\r\n align_corners: False\r\n pretrained: null\r\n\r\n\r\ntrain_dataset:\r\n type: Dataset\r\n dataset_root: data/seg_data\r\n train_path: data/seg_data/train.txt\r\n num_classes: 2\r\n transforms:\r\n - type: ResizeStepScaling\r\n min_scale_factor: 0.5\r\n max_scale_factor: 2.0\r\n scale_step_size: 0.25\r\n - type: RandomPaddingCrop\r\n crop_size: [512, 512]\r\n - type: RandomHorizontalFlip\r\n - type: RandomDistort\r\n brightness_range: 0.4\r\n contrast_range: 0.4\r\n saturation_range: 0.4\r\n - type: Normalize\r\n mode: train\r\n\r\nval_dataset:\r\n type: Dataset\r\n dataset_root: data/seg_data\r\n val_path: data/seg_data/val.txt\r\n num_classes: 2\r\n transforms:\r\n - type: Resize\r\n target_size: [512, 512]\r\n - type: Normalize\r\n mode: val\r\n\r\n\r\noptimizer:\r\n type: sgd\r\n momentum: 0.9\r\n weight_decay: 4.0e-5\r\n\r\nlr_scheduler:\r\n type: PolynomialDecay\r\n learning_rate: 0.01\r\n end_lr: 0\r\n power: 0.9\r\n\r\nloss:\r\n types:\r\n - type: CrossEntropyLoss\r\n coef: [1]\r\n```\n", "before_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport paddle\nimport paddle.nn.functional as F\nimport sklearn.metrics as skmetrics\n\n\ndef calculate_area(pred, label, num_classes, ignore_index=255):\n \"\"\"\n Calculate intersect, prediction and label area\n\n Args:\n pred (Tensor): The prediction by model.\n label (Tensor): The ground truth of image.\n num_classes (int): The unique number of target classes.\n ignore_index (int): Specifies a target value that is ignored. Default: 255.\n\n Returns:\n Tensor: The intersection area of prediction and the ground on all class.\n Tensor: The prediction area on all class.\n Tensor: The ground truth area on all class\n \"\"\"\n if len(pred.shape) == 4:\n pred = paddle.squeeze(pred, axis=1)\n if len(label.shape) == 4:\n label = paddle.squeeze(label, axis=1)\n if not pred.shape == label.shape:\n raise ValueError('Shape of `pred` and `label should be equal, '\n 'but there are {} and {}.'.format(\n pred.shape, label.shape))\n pred_area = []\n label_area = []\n intersect_area = []\n mask = label != ignore_index\n\n for i in range(num_classes):\n pred_i = paddle.logical_and(pred == i, mask)\n label_i = label == i\n intersect_i = paddle.logical_and(pred_i, label_i)\n pred_area.append(paddle.sum(paddle.cast(pred_i, \"int32\")))\n label_area.append(paddle.sum(paddle.cast(label_i, \"int32\")))\n intersect_area.append(paddle.sum(paddle.cast(intersect_i, \"int32\")))\n\n pred_area = paddle.concat(pred_area)\n label_area = paddle.concat(label_area)\n intersect_area = paddle.concat(intersect_area)\n\n return intersect_area, pred_area, label_area\n\n\ndef auc_roc(logits, label, num_classes, ignore_index=None):\n \"\"\"\n Calculate area under the roc curve\n\n Args:\n logits (Tensor): The prediction by model on testset, of shape (N,C,H,W) .\n label (Tensor): The ground truth of image. (N,1,H,W)\n num_classes (int): The unique number of target classes.\n ignore_index (int): Specifies a target value that is ignored. Default: 255.\n\n Returns:\n auc_roc(float): The area under roc curve\n \"\"\"\n if ignore_index or len(np.unique(label)) > num_classes:\n raise RuntimeError('labels with ignore_index is not supported yet.')\n\n if len(label.shape) != 4:\n raise ValueError(\n 'The shape of label is not 4 dimension as (N, C, H, W), it is {}'.\n format(label.shape))\n\n if len(logits.shape) != 4:\n raise ValueError(\n 'The shape of logits is not 4 dimension as (N, C, H, W), it is {}'.\n format(logits.shape))\n\n N, C, H, W = logits.shape\n logits = np.transpose(logits, (1, 0, 2, 3))\n logits = logits.reshape([C, N * H * W]).transpose([1, 0])\n\n label = np.transpose(label, (1, 0, 2, 3))\n label = label.reshape([1, N * H * W]).squeeze()\n\n if not logits.shape[0] == label.shape[0]:\n raise ValueError('length of `logit` and `label` should be equal, '\n 'but they are {} and {}.'.format(\n logits.shape[0], label.shape[0]))\n\n if num_classes == 2:\n auc = skmetrics.roc_auc_score(label, logits[:, 1])\n else:\n auc = skmetrics.roc_auc_score(label, logits, multi_class='ovr')\n\n return auc\n\n\ndef mean_iou(intersect_area, pred_area, label_area):\n \"\"\"\n Calculate iou.\n\n Args:\n intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.\n pred_area (Tensor): The prediction area on all classes.\n label_area (Tensor): The ground truth area on all classes.\n\n Returns:\n np.ndarray: iou on all classes.\n float: mean iou of all classes.\n \"\"\"\n intersect_area = intersect_area.numpy()\n pred_area = pred_area.numpy()\n label_area = label_area.numpy()\n union = pred_area + label_area - intersect_area\n class_iou = []\n for i in range(len(intersect_area)):\n if union[i] == 0:\n iou = 0\n else:\n iou = intersect_area[i] / union[i]\n class_iou.append(iou)\n miou = np.mean(class_iou)\n return np.array(class_iou), miou\n\n\ndef dice(intersect_area, pred_area, label_area):\n \"\"\"\n Calculate DICE.\n\n Args:\n intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.\n pred_area (Tensor): The prediction area on all classes.\n label_area (Tensor): The ground truth area on all classes.\n\n Returns:\n np.ndarray: DICE on all classes.\n float: mean DICE of all classes.\n \"\"\"\n intersect_area = intersect_area.numpy()\n pred_area = pred_area.numpy()\n label_area = label_area.numpy()\n union = pred_area + label_area\n class_dice = []\n for i in range(len(intersect_area)):\n if union[i] == 0:\n dice = 0\n else:\n dice = (2 * intersect_area[i]) / union[i]\n class_dice.append(dice)\n mdice = np.mean(class_dice)\n return np.array(class_dice), mdice\n\n\ndef accuracy(intersect_area, pred_area):\n \"\"\"\n Calculate accuracy\n\n Args:\n intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..\n pred_area (Tensor): The prediction area on all classes.\n\n Returns:\n np.ndarray: accuracy on all classes.\n float: mean accuracy.\n \"\"\"\n intersect_area = intersect_area.numpy()\n pred_area = pred_area.numpy()\n class_acc = []\n for i in range(len(intersect_area)):\n if pred_area[i] == 0:\n acc = 0\n else:\n acc = intersect_area[i] / pred_area[i]\n class_acc.append(acc)\n macc = np.sum(intersect_area) / np.sum(pred_area)\n return np.array(class_acc), macc\n\n\ndef kappa(intersect_area, pred_area, label_area):\n \"\"\"\n Calculate kappa coefficient\n\n Args:\n intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..\n pred_area (Tensor): The prediction area on all classes.\n label_area (Tensor): The ground truth area on all classes.\n\n Returns:\n float: kappa coefficient.\n \"\"\"\n intersect_area = intersect_area.numpy()\n pred_area = pred_area.numpy()\n label_area = label_area.numpy()\n total_area = np.sum(label_area)\n po = np.sum(intersect_area) / total_area\n pe = np.sum(pred_area * label_area) / (total_area * total_area)\n kappa = (po - pe) / (1 - pe)\n return kappa\n", "path": "paddleseg/utils/metrics.py"}], "after_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport paddle\nimport paddle.nn.functional as F\nimport sklearn.metrics as skmetrics\n\n\ndef calculate_area(pred, label, num_classes, ignore_index=255):\n \"\"\"\n Calculate intersect, prediction and label area\n\n Args:\n pred (Tensor): The prediction by model.\n label (Tensor): The ground truth of image.\n num_classes (int): The unique number of target classes.\n ignore_index (int): Specifies a target value that is ignored. Default: 255.\n\n Returns:\n Tensor: The intersection area of prediction and the ground on all class.\n Tensor: The prediction area on all class.\n Tensor: The ground truth area on all class\n \"\"\"\n if len(pred.shape) == 4:\n pred = paddle.squeeze(pred, axis=1)\n if len(label.shape) == 4:\n label = paddle.squeeze(label, axis=1)\n if not pred.shape == label.shape:\n raise ValueError('Shape of `pred` and `label should be equal, '\n 'but there are {} and {}.'.format(\n pred.shape, label.shape))\n pred_area = []\n label_area = []\n intersect_area = []\n mask = label != ignore_index\n\n for i in range(num_classes):\n pred_i = paddle.logical_and(pred == i, mask)\n label_i = label == i\n intersect_i = paddle.logical_and(pred_i, label_i)\n pred_area.append(paddle.sum(paddle.cast(pred_i, \"int32\")))\n label_area.append(paddle.sum(paddle.cast(label_i, \"int32\")))\n intersect_area.append(paddle.sum(paddle.cast(intersect_i, \"int32\")))\n\n pred_area = paddle.concat(pred_area)\n label_area = paddle.concat(label_area)\n intersect_area = paddle.concat(intersect_area)\n\n return intersect_area, pred_area, label_area\n\n\ndef auc_roc(logits, label, num_classes, ignore_index=None):\n \"\"\"\n Calculate area under the roc curve\n\n Args:\n logits (Tensor): The prediction by model on testset, of shape (N,C,H,W) .\n label (Tensor): The ground truth of image. (N,1,H,W)\n num_classes (int): The unique number of target classes.\n ignore_index (int): Specifies a target value that is ignored. Default: 255.\n\n Returns:\n auc_roc(float): The area under roc curve\n \"\"\"\n if ignore_index or len(np.unique(label)) > num_classes:\n raise RuntimeError('labels with ignore_index is not supported yet.')\n\n if len(label.shape) != 4:\n raise ValueError(\n 'The shape of label is not 4 dimension as (N, C, H, W), it is {}'.\n format(label.shape))\n\n if len(logits.shape) != 4:\n raise ValueError(\n 'The shape of logits is not 4 dimension as (N, C, H, W), it is {}'.\n format(logits.shape))\n\n N, C, H, W = logits.shape\n logits = np.transpose(logits, (1, 0, 2, 3))\n logits = logits.reshape([C, N * H * W]).transpose([1, 0])\n\n label = np.transpose(label, (1, 0, 2, 3))\n label = label.reshape([1, N * H * W]).squeeze()\n\n if not logits.shape[0] == label.shape[0]:\n raise ValueError('length of `logit` and `label` should be equal, '\n 'but they are {} and {}.'.format(\n logits.shape[0], label.shape[0]))\n\n if num_classes == 2:\n auc = skmetrics.roc_auc_score(label, logits[:, 1])\n else:\n auc = skmetrics.roc_auc_score(label, logits, multi_class='ovr')\n\n return auc\n\n\ndef mean_iou(intersect_area, pred_area, label_area):\n \"\"\"\n Calculate iou.\n\n Args:\n intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.\n pred_area (Tensor): The prediction area on all classes.\n label_area (Tensor): The ground truth area on all classes.\n\n Returns:\n np.ndarray: iou on all classes.\n float: mean iou of all classes.\n \"\"\"\n intersect_area = intersect_area.numpy()\n pred_area = pred_area.numpy()\n label_area = label_area.numpy()\n union = pred_area + label_area - intersect_area\n class_iou = []\n for i in range(len(intersect_area)):\n if union[i] == 0:\n iou = 0\n else:\n iou = intersect_area[i] / union[i]\n class_iou.append(iou)\n miou = np.mean(class_iou)\n return np.array(class_iou), miou\n\n\ndef dice(intersect_area, pred_area, label_area):\n \"\"\"\n Calculate DICE.\n\n Args:\n intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.\n pred_area (Tensor): The prediction area on all classes.\n label_area (Tensor): The ground truth area on all classes.\n\n Returns:\n np.ndarray: DICE on all classes.\n float: mean DICE of all classes.\n \"\"\"\n intersect_area = intersect_area.numpy()\n pred_area = pred_area.numpy()\n label_area = label_area.numpy()\n union = pred_area + label_area\n class_dice = []\n for i in range(len(intersect_area)):\n if union[i] == 0:\n dice = 0\n else:\n dice = (2 * intersect_area[i]) / union[i]\n class_dice.append(dice)\n mdice = np.mean(class_dice)\n return np.array(class_dice), mdice\n\n\ndef accuracy(intersect_area, pred_area):\n \"\"\"\n Calculate accuracy\n\n Args:\n intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..\n pred_area (Tensor): The prediction area on all classes.\n\n Returns:\n np.ndarray: accuracy on all classes.\n float: mean accuracy.\n \"\"\"\n intersect_area = intersect_area.numpy()\n pred_area = pred_area.numpy()\n class_acc = []\n for i in range(len(intersect_area)):\n if pred_area[i] == 0:\n acc = 0\n else:\n acc = intersect_area[i] / pred_area[i]\n class_acc.append(acc)\n macc = np.sum(intersect_area) / np.sum(pred_area)\n return np.array(class_acc), macc\n\n\ndef kappa(intersect_area, pred_area, label_area):\n \"\"\"\n Calculate kappa coefficient\n\n Args:\n intersect_area (Tensor): The intersection area of prediction and ground truth on all classes..\n pred_area (Tensor): The prediction area on all classes.\n label_area (Tensor): The ground truth area on all classes.\n\n Returns:\n float: kappa coefficient.\n \"\"\"\n intersect_area = intersect_area.numpy().astype(np.float64)\n pred_area = pred_area.numpy().astype(np.float64)\n label_area = label_area.numpy().astype(np.float64)\n total_area = np.sum(label_area)\n po = np.sum(intersect_area) / total_area\n pe = np.sum(pred_area * label_area) / (total_area * total_area)\n kappa = (po - pe) / (1 - pe)\n return kappa\n", "path": "paddleseg/utils/metrics.py"}]} | 3,087 | 178 |
gh_patches_debug_47466 | rasdani/github-patches | git_diff | bokeh__bokeh-8634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stocks Example is not working properly
https://github.com/bokeh/bokeh/tree/master/examples/app/stocks
The example suppose to change the stats according to the selected points. For some reason
def selection_change(attrname, old, new):
print('lol')
t1, t2 = ticker1.value, ticker2.value
data = get_data(t1, t2)
selected = source.selected.indices
if selected:
data = data.iloc[selected, :]
update_stats(data, t1, t2)
source.on_change('selected', selection_change)
The code never prints 'lol'.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/app/stocks/main.py`
Content:
```
1 ''' Create a simple stocks correlation dashboard.
2
3 Choose stocks to compare in the drop down widgets, and make selections
4 on the plots to update the summary and histograms accordingly.
5
6 .. note::
7 Running this example requires downloading sample data. See
8 the included `README`_ for more information.
9
10 Use the ``bokeh serve`` command to run the example by executing:
11
12 bokeh serve stocks
13
14 at your command prompt. Then navigate to the URL
15
16 http://localhost:5006/stocks
17
18 .. _README: https://github.com/bokeh/bokeh/blob/master/examples/app/stocks/README.md
19
20 '''
21 try:
22 from functools import lru_cache
23 except ImportError:
24 # Python 2 does stdlib does not have lru_cache so let's just
25 # create a dummy decorator to avoid crashing
26 print ("WARNING: Cache for this example is available on Python 3 only.")
27 def lru_cache():
28 def dec(f):
29 def _(*args, **kws):
30 return f(*args, **kws)
31 return _
32 return dec
33
34 from os.path import dirname, join
35
36 import pandas as pd
37
38 from bokeh.io import curdoc
39 from bokeh.layouts import row, column
40 from bokeh.models import ColumnDataSource
41 from bokeh.models.widgets import PreText, Select
42 from bokeh.plotting import figure
43
44 DATA_DIR = join(dirname(__file__), 'daily')
45
46 DEFAULT_TICKERS = ['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
47
48 def nix(val, lst):
49 return [x for x in lst if x != val]
50
51 @lru_cache()
52 def load_ticker(ticker):
53 fname = join(DATA_DIR, 'table_%s.csv' % ticker.lower())
54 data = pd.read_csv(fname, header=None, parse_dates=['date'],
55 names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'])
56 data = data.set_index('date')
57 return pd.DataFrame({ticker: data.c, ticker+'_returns': data.c.diff()})
58
59 @lru_cache()
60 def get_data(t1, t2):
61 df1 = load_ticker(t1)
62 df2 = load_ticker(t2)
63 data = pd.concat([df1, df2], axis=1)
64 data = data.dropna()
65 data['t1'] = data[t1]
66 data['t2'] = data[t2]
67 data['t1_returns'] = data[t1+'_returns']
68 data['t2_returns'] = data[t2+'_returns']
69 return data
70
71 # set up widgets
72
73 stats = PreText(text='', width=500)
74 ticker1 = Select(value='AAPL', options=nix('GOOG', DEFAULT_TICKERS))
75 ticker2 = Select(value='GOOG', options=nix('AAPL', DEFAULT_TICKERS))
76
77 # set up plots
78
79 source = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))
80 source_static = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))
81 tools = 'pan,wheel_zoom,xbox_select,reset'
82
83 corr = figure(plot_width=350, plot_height=350,
84 tools='pan,wheel_zoom,box_select,reset')
85 corr.circle('t1_returns', 't2_returns', size=2, source=source,
86 selection_color="orange", alpha=0.6, nonselection_alpha=0.1, selection_alpha=0.4)
87
88 ts1 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag="xbox_select")
89 ts1.line('date', 't1', source=source_static)
90 ts1.circle('date', 't1', size=1, source=source, color=None, selection_color="orange")
91
92 ts2 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag="xbox_select")
93 ts2.x_range = ts1.x_range
94 ts2.line('date', 't2', source=source_static)
95 ts2.circle('date', 't2', size=1, source=source, color=None, selection_color="orange")
96
97 # set up callbacks
98
99 def ticker1_change(attrname, old, new):
100 ticker2.options = nix(new, DEFAULT_TICKERS)
101 update()
102
103 def ticker2_change(attrname, old, new):
104 ticker1.options = nix(new, DEFAULT_TICKERS)
105 update()
106
107 def update(selected=None):
108 t1, t2 = ticker1.value, ticker2.value
109
110 data = get_data(t1, t2)
111 source.data = source.from_df(data[['t1', 't2', 't1_returns', 't2_returns']])
112 source_static.data = source.data
113
114 update_stats(data, t1, t2)
115
116 corr.title.text = '%s returns vs. %s returns' % (t1, t2)
117 ts1.title.text, ts2.title.text = t1, t2
118
119 def update_stats(data, t1, t2):
120 stats.text = str(data[[t1, t2, t1+'_returns', t2+'_returns']].describe())
121
122 ticker1.on_change('value', ticker1_change)
123 ticker2.on_change('value', ticker2_change)
124
125 def selection_change(attrname, old, new):
126 t1, t2 = ticker1.value, ticker2.value
127 data = get_data(t1, t2)
128 selected = source.selected.indices
129 if selected:
130 data = data.iloc[selected, :]
131 update_stats(data, t1, t2)
132
133 source.on_change('selected', selection_change)
134
135 # set up layout
136 widgets = column(ticker1, ticker2, stats)
137 main_row = row(corr, widgets)
138 series = column(ts1, ts2)
139 layout = column(main_row, series)
140
141 # initialize
142 update()
143
144 curdoc().add_root(layout)
145 curdoc().title = "Stocks"
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/app/stocks/main.py b/examples/app/stocks/main.py
--- a/examples/app/stocks/main.py
+++ b/examples/app/stocks/main.py
@@ -130,7 +130,7 @@
data = data.iloc[selected, :]
update_stats(data, t1, t2)
-source.on_change('selected', selection_change)
+source.selected.on_change('indices', selection_change)
# set up layout
widgets = column(ticker1, ticker2, stats)
| {"golden_diff": "diff --git a/examples/app/stocks/main.py b/examples/app/stocks/main.py\n--- a/examples/app/stocks/main.py\n+++ b/examples/app/stocks/main.py\n@@ -130,7 +130,7 @@\n data = data.iloc[selected, :]\n update_stats(data, t1, t2)\n \n-source.on_change('selected', selection_change)\n+source.selected.on_change('indices', selection_change)\n \n # set up layout\n widgets = column(ticker1, ticker2, stats)\n", "issue": "Stocks Example is not working properly \nhttps://github.com/bokeh/bokeh/tree/master/examples/app/stocks\r\n\r\nThe example suppose to change the stats according to the selected points. For some reason \r\n\r\n def selection_change(attrname, old, new):\r\n print('lol')\r\n t1, t2 = ticker1.value, ticker2.value\r\n data = get_data(t1, t2)\r\n selected = source.selected.indices\r\n if selected:\r\n data = data.iloc[selected, :]\r\n update_stats(data, t1, t2)\r\n\r\n source.on_change('selected', selection_change)\r\n\r\nThe code never prints 'lol'. \n", "before_files": [{"content": "''' Create a simple stocks correlation dashboard.\n\nChoose stocks to compare in the drop down widgets, and make selections\non the plots to update the summary and histograms accordingly.\n\n.. note::\n Running this example requires downloading sample data. See\n the included `README`_ for more information.\n\nUse the ``bokeh serve`` command to run the example by executing:\n\n bokeh serve stocks\n\nat your command prompt. Then navigate to the URL\n\n http://localhost:5006/stocks\n\n.. _README: https://github.com/bokeh/bokeh/blob/master/examples/app/stocks/README.md\n\n'''\ntry:\n from functools import lru_cache\nexcept ImportError:\n # Python 2 does stdlib does not have lru_cache so let's just\n # create a dummy decorator to avoid crashing\n print (\"WARNING: Cache for this example is available on Python 3 only.\")\n def lru_cache():\n def dec(f):\n def _(*args, **kws):\n return f(*args, **kws)\n return _\n return dec\n\nfrom os.path import dirname, join\n\nimport pandas as pd\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import row, column\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import PreText, Select\nfrom bokeh.plotting import figure\n\nDATA_DIR = join(dirname(__file__), 'daily')\n\nDEFAULT_TICKERS = ['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']\n\ndef nix(val, lst):\n return [x for x in lst if x != val]\n\n@lru_cache()\ndef load_ticker(ticker):\n fname = join(DATA_DIR, 'table_%s.csv' % ticker.lower())\n data = pd.read_csv(fname, header=None, parse_dates=['date'],\n names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'])\n data = data.set_index('date')\n return pd.DataFrame({ticker: data.c, ticker+'_returns': data.c.diff()})\n\n@lru_cache()\ndef get_data(t1, t2):\n df1 = load_ticker(t1)\n df2 = load_ticker(t2)\n data = pd.concat([df1, df2], axis=1)\n data = data.dropna()\n data['t1'] = data[t1]\n data['t2'] = data[t2]\n data['t1_returns'] = data[t1+'_returns']\n data['t2_returns'] = data[t2+'_returns']\n return data\n\n# set up widgets\n\nstats = PreText(text='', width=500)\nticker1 = Select(value='AAPL', options=nix('GOOG', DEFAULT_TICKERS))\nticker2 = Select(value='GOOG', options=nix('AAPL', DEFAULT_TICKERS))\n\n# set up plots\n\nsource = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))\nsource_static = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))\ntools = 'pan,wheel_zoom,xbox_select,reset'\n\ncorr = figure(plot_width=350, plot_height=350,\n tools='pan,wheel_zoom,box_select,reset')\ncorr.circle('t1_returns', 't2_returns', size=2, source=source,\n selection_color=\"orange\", alpha=0.6, nonselection_alpha=0.1, selection_alpha=0.4)\n\nts1 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag=\"xbox_select\")\nts1.line('date', 't1', source=source_static)\nts1.circle('date', 't1', size=1, source=source, color=None, selection_color=\"orange\")\n\nts2 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag=\"xbox_select\")\nts2.x_range = ts1.x_range\nts2.line('date', 't2', source=source_static)\nts2.circle('date', 't2', size=1, source=source, color=None, selection_color=\"orange\")\n\n# set up callbacks\n\ndef ticker1_change(attrname, old, new):\n ticker2.options = nix(new, DEFAULT_TICKERS)\n update()\n\ndef ticker2_change(attrname, old, new):\n ticker1.options = nix(new, DEFAULT_TICKERS)\n update()\n\ndef update(selected=None):\n t1, t2 = ticker1.value, ticker2.value\n\n data = get_data(t1, t2)\n source.data = source.from_df(data[['t1', 't2', 't1_returns', 't2_returns']])\n source_static.data = source.data\n\n update_stats(data, t1, t2)\n\n corr.title.text = '%s returns vs. %s returns' % (t1, t2)\n ts1.title.text, ts2.title.text = t1, t2\n\ndef update_stats(data, t1, t2):\n stats.text = str(data[[t1, t2, t1+'_returns', t2+'_returns']].describe())\n\nticker1.on_change('value', ticker1_change)\nticker2.on_change('value', ticker2_change)\n\ndef selection_change(attrname, old, new):\n t1, t2 = ticker1.value, ticker2.value\n data = get_data(t1, t2)\n selected = source.selected.indices\n if selected:\n data = data.iloc[selected, :]\n update_stats(data, t1, t2)\n\nsource.on_change('selected', selection_change)\n\n# set up layout\nwidgets = column(ticker1, ticker2, stats)\nmain_row = row(corr, widgets)\nseries = column(ts1, ts2)\nlayout = column(main_row, series)\n\n# initialize\nupdate()\n\ncurdoc().add_root(layout)\ncurdoc().title = \"Stocks\"\n", "path": "examples/app/stocks/main.py"}], "after_files": [{"content": "''' Create a simple stocks correlation dashboard.\n\nChoose stocks to compare in the drop down widgets, and make selections\non the plots to update the summary and histograms accordingly.\n\n.. note::\n Running this example requires downloading sample data. See\n the included `README`_ for more information.\n\nUse the ``bokeh serve`` command to run the example by executing:\n\n bokeh serve stocks\n\nat your command prompt. Then navigate to the URL\n\n http://localhost:5006/stocks\n\n.. _README: https://github.com/bokeh/bokeh/blob/master/examples/app/stocks/README.md\n\n'''\ntry:\n from functools import lru_cache\nexcept ImportError:\n # Python 2 does stdlib does not have lru_cache so let's just\n # create a dummy decorator to avoid crashing\n print (\"WARNING: Cache for this example is available on Python 3 only.\")\n def lru_cache():\n def dec(f):\n def _(*args, **kws):\n return f(*args, **kws)\n return _\n return dec\n\nfrom os.path import dirname, join\n\nimport pandas as pd\n\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import row, column\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models.widgets import PreText, Select\nfrom bokeh.plotting import figure\n\nDATA_DIR = join(dirname(__file__), 'daily')\n\nDEFAULT_TICKERS = ['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']\n\ndef nix(val, lst):\n return [x for x in lst if x != val]\n\n@lru_cache()\ndef load_ticker(ticker):\n fname = join(DATA_DIR, 'table_%s.csv' % ticker.lower())\n data = pd.read_csv(fname, header=None, parse_dates=['date'],\n names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'])\n data = data.set_index('date')\n return pd.DataFrame({ticker: data.c, ticker+'_returns': data.c.diff()})\n\n@lru_cache()\ndef get_data(t1, t2):\n df1 = load_ticker(t1)\n df2 = load_ticker(t2)\n data = pd.concat([df1, df2], axis=1)\n data = data.dropna()\n data['t1'] = data[t1]\n data['t2'] = data[t2]\n data['t1_returns'] = data[t1+'_returns']\n data['t2_returns'] = data[t2+'_returns']\n return data\n\n# set up widgets\n\nstats = PreText(text='', width=500)\nticker1 = Select(value='AAPL', options=nix('GOOG', DEFAULT_TICKERS))\nticker2 = Select(value='GOOG', options=nix('AAPL', DEFAULT_TICKERS))\n\n# set up plots\n\nsource = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))\nsource_static = ColumnDataSource(data=dict(date=[], t1=[], t2=[], t1_returns=[], t2_returns=[]))\ntools = 'pan,wheel_zoom,xbox_select,reset'\n\ncorr = figure(plot_width=350, plot_height=350,\n tools='pan,wheel_zoom,box_select,reset')\ncorr.circle('t1_returns', 't2_returns', size=2, source=source,\n selection_color=\"orange\", alpha=0.6, nonselection_alpha=0.1, selection_alpha=0.4)\n\nts1 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag=\"xbox_select\")\nts1.line('date', 't1', source=source_static)\nts1.circle('date', 't1', size=1, source=source, color=None, selection_color=\"orange\")\n\nts2 = figure(plot_width=900, plot_height=200, tools=tools, x_axis_type='datetime', active_drag=\"xbox_select\")\nts2.x_range = ts1.x_range\nts2.line('date', 't2', source=source_static)\nts2.circle('date', 't2', size=1, source=source, color=None, selection_color=\"orange\")\n\n# set up callbacks\n\ndef ticker1_change(attrname, old, new):\n ticker2.options = nix(new, DEFAULT_TICKERS)\n update()\n\ndef ticker2_change(attrname, old, new):\n ticker1.options = nix(new, DEFAULT_TICKERS)\n update()\n\ndef update(selected=None):\n t1, t2 = ticker1.value, ticker2.value\n\n data = get_data(t1, t2)\n source.data = source.from_df(data[['t1', 't2', 't1_returns', 't2_returns']])\n source_static.data = source.data\n\n update_stats(data, t1, t2)\n\n corr.title.text = '%s returns vs. %s returns' % (t1, t2)\n ts1.title.text, ts2.title.text = t1, t2\n\ndef update_stats(data, t1, t2):\n stats.text = str(data[[t1, t2, t1+'_returns', t2+'_returns']].describe())\n\nticker1.on_change('value', ticker1_change)\nticker2.on_change('value', ticker2_change)\n\ndef selection_change(attrname, old, new):\n t1, t2 = ticker1.value, ticker2.value\n data = get_data(t1, t2)\n selected = source.selected.indices\n if selected:\n data = data.iloc[selected, :]\n update_stats(data, t1, t2)\n\nsource.selected.on_change('indices', selection_change)\n\n# set up layout\nwidgets = column(ticker1, ticker2, stats)\nmain_row = row(corr, widgets)\nseries = column(ts1, ts2)\nlayout = column(main_row, series)\n\n# initialize\nupdate()\n\ncurdoc().add_root(layout)\ncurdoc().title = \"Stocks\"\n", "path": "examples/app/stocks/main.py"}]} | 2,036 | 108 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.