problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_30178 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-965 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ku-ring-gai Council doesn't work if there is a house number 1A
works - 1A Latona Street PYMBLE 2073
doesn't work - 1 Latona Street PYMBLE 2073
Both exist
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py`
Content:
```
1 import datetime
2 import json
3 import requests
4
5 from bs4 import BeautifulSoup
6 from requests.utils import requote_uri
7 from waste_collection_schedule import Collection
8
9 TITLE = "Ku-ring-gai Council"
10 DESCRIPTION = "Source for Ku-ring-gai Council waste collection."
11 URL = "https://www.krg.nsw.gov.au"
12 TEST_CASES = {
13 "randomHouse": {
14 "post_code": "2070",
15 "suburb": "LINDFIELD",
16 "street_name": "Wolseley Road",
17 "street_number": "42",
18 },
19 "randomAppartment": {
20 "post_code": "2074",
21 "suburb": "WARRAWEE",
22 "street_name": "Cherry Street",
23 "street_number": "4/9",
24 },
25 "randomMultiunit": {
26 "post_code": "2075",
27 "suburb": "ST IVES",
28 "street_name": "Kitchener Street",
29 "street_number": "99/2-8",
30 },
31 }
32
33 API_URLS = {
34 "session":"https://www.krg.nsw.gov.au" ,
35 "search": "https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}",
36 "schedule": "https://www.krg.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU",
37 }
38
39 HEADERS = {
40 "user-agent": "Mozilla/5.0",
41 }
42
43 ICON_MAP = {
44 "GeneralWaste": "mdi:trash-can",
45 "Recycling": "mdi:recycle",
46 "GreenWaste": "mdi:leaf",
47 }
48
49 ROUNDS = {
50 "GeneralWaste": "General Waste",
51 "Recycling": "Recycling",
52 "GreenWaste": "Green Waste",
53 }
54
55 # _LOGGER = logging.getLogger(__name__)
56
57
58 class Source:
59 def __init__(
60 self, post_code: str, suburb: str, street_name: str, street_number: str
61 ):
62 self.post_code = post_code
63 self.suburb = suburb.upper()
64 self.street_name = street_name
65 self.street_number = street_number
66
67 def fetch(self):
68
69 locationId = 0
70
71 # 'collection' api call seems to require an ASP.Net_sessionID, so obtain the relevant cookie
72 s = requests.Session()
73 q = requote_uri(str(API_URLS["session"]))
74 r0 = s.get(q, headers = HEADERS)
75
76 # Do initial address search
77 address = "{} {}, {} NSW {}".format(self.street_number, self.street_name, self.suburb, self.post_code)
78 q = requote_uri(str(API_URLS["search"]).format(address))
79 r1 = s.get(q, headers = HEADERS)
80 data = json.loads(r1.text)
81
82 # Find the geolocation for the address
83 for item in data["Items"]:
84 if address in item['AddressSingleLine']:
85 locationId = item["Id"]
86 break
87
88 if locationId == 0:
89 return []
90
91 # Retrieve the upcoming collections for location
92 q = requote_uri(str(API_URLS["schedule"]).format(locationId))
93 r2 = s.get(q, headers = HEADERS)
94 data = json.loads(r2.text)
95 responseContent = data["responseContent"]
96
97 soup = BeautifulSoup(responseContent, "html.parser")
98 services = soup.find_all("article")
99
100 entries = []
101
102 for item in services:
103 waste_type = item.find('h3').text
104 date = datetime.datetime.strptime(item.find('div', {'class': 'next-service'}).text.strip(), "%a %d/%m/%Y").date()
105 entries.append(
106 Collection(
107 date = date,
108 # t=waste_type, # api returns GeneralWaste, Recycling, GreenWaste
109 t = ROUNDS.get(waste_type), # returns user-friendly General Waste, Recycling, Green Waste
110 icon=ICON_MAP.get(waste_type),
111 )
112 )
113
114 return entries
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py
@@ -28,8 +28,21 @@
"street_name": "Kitchener Street",
"street_number": "99/2-8",
},
+ "1 Latona St": {
+ "post_code": "2073",
+ "suburb": "PYMBLE",
+ "street_name": "Latona Street",
+ "street_number": "1",
+ },
+ "1A Latona St": {
+ "post_code": "2073",
+ "suburb": "PYMBLE",
+ "street_name": "Latona Street",
+ "street_number": "1A",
+ },
}
+
API_URLS = {
"session":"https://www.krg.nsw.gov.au" ,
"search": "https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}",
@@ -77,16 +90,12 @@
address = "{} {}, {} NSW {}".format(self.street_number, self.street_name, self.suburb, self.post_code)
q = requote_uri(str(API_URLS["search"]).format(address))
r1 = s.get(q, headers = HEADERS)
- data = json.loads(r1.text)
+ data = json.loads(r1.text)["Items"]
# Find the geolocation for the address
- for item in data["Items"]:
+ for item in data:
if address in item['AddressSingleLine']:
locationId = item["Id"]
- break
-
- if locationId == 0:
- return []
# Retrieve the upcoming collections for location
q = requote_uri(str(API_URLS["schedule"]).format(locationId))
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py\n@@ -28,8 +28,21 @@\n \"street_name\": \"Kitchener Street\",\n \"street_number\": \"99/2-8\",\n },\n+ \"1 Latona St\": {\n+ \"post_code\": \"2073\",\n+ \"suburb\": \"PYMBLE\",\n+ \"street_name\": \"Latona Street\",\n+ \"street_number\": \"1\",\n+ },\n+ \"1A Latona St\": {\n+ \"post_code\": \"2073\",\n+ \"suburb\": \"PYMBLE\",\n+ \"street_name\": \"Latona Street\",\n+ \"street_number\": \"1A\",\n+ },\n }\n \n+\n API_URLS = {\n \"session\":\"https://www.krg.nsw.gov.au\" ,\n \"search\": \"https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n@@ -77,16 +90,12 @@\n address = \"{} {}, {} NSW {}\".format(self.street_number, self.street_name, self.suburb, self.post_code)\n q = requote_uri(str(API_URLS[\"search\"]).format(address))\n r1 = s.get(q, headers = HEADERS)\n- data = json.loads(r1.text)\n+ data = json.loads(r1.text)[\"Items\"]\n \n # Find the geolocation for the address\n- for item in data[\"Items\"]:\n+ for item in data:\n if address in item['AddressSingleLine']:\n locationId = item[\"Id\"]\n- break\n-\n- if locationId == 0:\n- return []\n \n # Retrieve the upcoming collections for location\n q = requote_uri(str(API_URLS[\"schedule\"]).format(locationId))\n", "issue": "Ku-ring-gai Council doesn't work if there is a house number 1A\nworks - 1A Latona Street PYMBLE 2073\r\ndoesn't work - 1 Latona Street PYMBLE 2073\r\n\r\nBoth exist\n", "before_files": [{"content": "import datetime\nimport json\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom requests.utils import requote_uri\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Ku-ring-gai Council\"\nDESCRIPTION = \"Source for Ku-ring-gai Council waste collection.\"\nURL = \"https://www.krg.nsw.gov.au\"\nTEST_CASES = {\n \"randomHouse\": {\n \"post_code\": \"2070\",\n \"suburb\": \"LINDFIELD\",\n \"street_name\": \"Wolseley Road\",\n \"street_number\": \"42\",\n },\n \"randomAppartment\": {\n \"post_code\": \"2074\",\n \"suburb\": \"WARRAWEE\",\n \"street_name\": \"Cherry Street\",\n \"street_number\": \"4/9\",\n },\n \"randomMultiunit\": {\n \"post_code\": \"2075\",\n \"suburb\": \"ST IVES\",\n \"street_name\": \"Kitchener Street\",\n \"street_number\": \"99/2-8\",\n },\n}\n\nAPI_URLS = {\n \"session\":\"https://www.krg.nsw.gov.au\" ,\n \"search\": \"https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n \"schedule\": \"https://www.krg.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\n}\n\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\n\nICON_MAP = {\n \"GeneralWaste\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"GreenWaste\": \"mdi:leaf\",\n}\n\nROUNDS = {\n \"GeneralWaste\": \"General Waste\",\n \"Recycling\": \"Recycling\",\n \"GreenWaste\": \"Green Waste\",\n}\n\n# _LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n ):\n self.post_code = post_code\n self.suburb = suburb.upper()\n self.street_name = street_name\n self.street_number = street_number\n\n def fetch(self):\n\n locationId = 0\n\n # 'collection' api call seems to require an ASP.Net_sessionID, so obtain the relevant cookie\n s = requests.Session()\n q = requote_uri(str(API_URLS[\"session\"]))\n r0 = s.get(q, headers = HEADERS)\n\n # Do initial address search\n address = \"{} {}, {} NSW {}\".format(self.street_number, self.street_name, self.suburb, self.post_code)\n q = requote_uri(str(API_URLS[\"search\"]).format(address))\n r1 = s.get(q, headers = HEADERS)\n data = json.loads(r1.text)\n\n # Find the geolocation for the address\n for item in data[\"Items\"]:\n if address in item['AddressSingleLine']:\n locationId = item[\"Id\"]\n break\n\n if locationId == 0:\n return []\n\n # Retrieve the upcoming collections for location\n q = requote_uri(str(API_URLS[\"schedule\"]).format(locationId))\n r2 = s.get(q, headers = HEADERS)\n data = json.loads(r2.text)\n responseContent = data[\"responseContent\"]\n\n soup = BeautifulSoup(responseContent, \"html.parser\")\n services = soup.find_all(\"article\")\n \n entries = []\n\n for item in services:\n waste_type = item.find('h3').text\n date = datetime.datetime.strptime(item.find('div', {'class': 'next-service'}).text.strip(), \"%a %d/%m/%Y\").date()\n entries.append(\n Collection(\n date = date,\n # t=waste_type, # api returns GeneralWaste, Recycling, GreenWaste \n t = ROUNDS.get(waste_type), # returns user-friendly General Waste, Recycling, Green Waste\n icon=ICON_MAP.get(waste_type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py"}], "after_files": [{"content": "import datetime\nimport json\nimport requests\n\nfrom bs4 import BeautifulSoup\nfrom requests.utils import requote_uri\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Ku-ring-gai Council\"\nDESCRIPTION = \"Source for Ku-ring-gai Council waste collection.\"\nURL = \"https://www.krg.nsw.gov.au\"\nTEST_CASES = {\n \"randomHouse\": {\n \"post_code\": \"2070\",\n \"suburb\": \"LINDFIELD\",\n \"street_name\": \"Wolseley Road\",\n \"street_number\": \"42\",\n },\n \"randomAppartment\": {\n \"post_code\": \"2074\",\n \"suburb\": \"WARRAWEE\",\n \"street_name\": \"Cherry Street\",\n \"street_number\": \"4/9\",\n },\n \"randomMultiunit\": {\n \"post_code\": \"2075\",\n \"suburb\": \"ST IVES\",\n \"street_name\": \"Kitchener Street\",\n \"street_number\": \"99/2-8\",\n },\n \"1 Latona St\": {\n \"post_code\": \"2073\",\n \"suburb\": \"PYMBLE\",\n \"street_name\": \"Latona Street\",\n \"street_number\": \"1\",\n },\n \"1A Latona St\": {\n \"post_code\": \"2073\",\n \"suburb\": \"PYMBLE\",\n \"street_name\": \"Latona Street\",\n \"street_number\": \"1A\",\n },\n}\n\n\nAPI_URLS = {\n \"session\":\"https://www.krg.nsw.gov.au\" ,\n \"search\": \"https://www.krg.nsw.gov.au/api/v1/myarea/search?keywords={}\",\n \"schedule\": \"https://www.krg.nsw.gov.au/ocapi/Public/myarea/wasteservices?geolocationid={}&ocsvclang=en-AU\",\n}\n\nHEADERS = {\n \"user-agent\": \"Mozilla/5.0\",\n}\n\nICON_MAP = {\n \"GeneralWaste\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"GreenWaste\": \"mdi:leaf\",\n}\n\nROUNDS = {\n \"GeneralWaste\": \"General Waste\",\n \"Recycling\": \"Recycling\",\n \"GreenWaste\": \"Green Waste\",\n}\n\n# _LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self, post_code: str, suburb: str, street_name: str, street_number: str\n ):\n self.post_code = post_code\n self.suburb = suburb.upper()\n self.street_name = street_name\n self.street_number = street_number\n\n def fetch(self):\n\n locationId = 0\n\n # 'collection' api call seems to require an ASP.Net_sessionID, so obtain the relevant cookie\n s = requests.Session()\n q = requote_uri(str(API_URLS[\"session\"]))\n r0 = s.get(q, headers = HEADERS)\n\n # Do initial address search\n address = \"{} {}, {} NSW {}\".format(self.street_number, self.street_name, self.suburb, self.post_code)\n q = requote_uri(str(API_URLS[\"search\"]).format(address))\n r1 = s.get(q, headers = HEADERS)\n data = json.loads(r1.text)[\"Items\"]\n\n # Find the geolocation for the address\n for item in data:\n if address in item['AddressSingleLine']:\n locationId = item[\"Id\"]\n\n # Retrieve the upcoming collections for location\n q = requote_uri(str(API_URLS[\"schedule\"]).format(locationId))\n r2 = s.get(q, headers = HEADERS)\n data = json.loads(r2.text)\n responseContent = data[\"responseContent\"]\n\n soup = BeautifulSoup(responseContent, \"html.parser\")\n services = soup.find_all(\"article\")\n \n entries = []\n\n for item in services:\n waste_type = item.find('h3').text\n date = datetime.datetime.strptime(item.find('div', {'class': 'next-service'}).text.strip(), \"%a %d/%m/%Y\").date()\n entries.append(\n Collection(\n date = date,\n # t=waste_type, # api returns GeneralWaste, Recycling, GreenWaste \n t = ROUNDS.get(waste_type), # returns user-friendly General Waste, Recycling, Green Waste\n icon=ICON_MAP.get(waste_type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/kuringgai_nsw_gov_au.py"}]} | 1,452 | 478 |
gh_patches_debug_15323 | rasdani/github-patches | git_diff | pyca__cryptography-7511 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cryptography packaging `ext_package` causes `_openssl` to be in `top_level.txt` in the wheel
this is extremely minor and I have an idea on how to work around it -- but it appears some of the distribution metadata is incorrect due to the way the cffi modules are built in `cryptography`.
here's a minimal example:
```console
$ pip download cryptography --no-deps ...
$ unzip -p cryptography-*.whl */top_level.txt
_openssl
cryptography
```
this file is _supposed_ to list the top level modules afaict and `_openssl` is not one of them. it appears that comes from this: https://github.com/pyca/cryptography/blob/7a4c612b0280a4a01df636f8d648b9c1a1dad6dc/src/_cffi_src/build_openssl.py#L73
I can probably work around this (I'm using this file to do some rudimentary testing of wheels) and regenerate `top_level.txt` in a different way (perhaps via `RECORD`)
I think this _might_ be a `setuptools` bug with `ext_package` -- it can be worked around like this:
```diff
diff --git a/setup.cfg b/setup.cfg
index f9710764d..d96bb2d4f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -43,7 +43,6 @@ zip_safe = False
package_dir =
=src
packages = find:
-ext_package = cryptography.hazmat.bindings
# `install_requires` must be kept in sync with `pyproject.toml`
install_requires =
cffi >=1.12
diff --git a/setup.py b/setup.py
index 946bb0f92..320994e69 100644
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@ try:
],
rust_extensions=[
RustExtension(
- "_rust",
+ "cryptography.hazmat.bindings._rust",
"src/rust/Cargo.toml",
py_limited_api=True,
# Enable abi3 mode if we're not using PyPy.
diff --git a/src/_cffi_src/build_openssl.py b/src/_cffi_src/build_openssl.py
index c5ab3cb3c..84637f090 100644
--- a/src/_cffi_src/build_openssl.py
+++ b/src/_cffi_src/build_openssl.py
@@ -70,7 +70,7 @@ def _extra_compile_args(platform):
ffi = build_ffi_for_binding(
- module_name="_openssl",
+ module_name="cryptography.hazmat.bindings._openssl",
module_prefix="_cffi_src.openssl.",
modules=[
# This goes first so we can define some cryptography-wide symbols.
```
```console
$ rm -rf build *.whl && pip wheel . --no-deps
...
$ unzip -p *.whl */top_level.txt
cryptography
$ unzip -l *.whl | grep '\.so'
1974696 2022-08-15 14:04 cryptography/hazmat/bindings/_openssl.abi3.so
3696800 2022-08-15 14:04 cryptography/hazmat/bindings/_rust.abi3.so
```
let me know if this would be worth a patch (though I'd totally understand if this is a "no improvement" / "upstream problem" issue)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # This file is dual licensed under the terms of the Apache License, Version
4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
5 # for complete details.
6
7 import os
8 import platform
9 import re
10 import shutil
11 import subprocess
12 import sys
13
14 from setuptools import setup
15
16 try:
17 from setuptools_rust import RustExtension
18 except ImportError:
19 print(
20 """
21 =============================DEBUG ASSISTANCE==========================
22 If you are seeing an error here please try the following to
23 successfully install cryptography:
24
25 Upgrade to the latest pip and try again. This will fix errors for most
26 users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip
27 =============================DEBUG ASSISTANCE==========================
28 """
29 )
30 raise
31
32
33 base_dir = os.path.dirname(__file__)
34 src_dir = os.path.join(base_dir, "src")
35
36 # When executing the setup.py, we need to be able to import ourselves, this
37 # means that we need to add the src/ directory to the sys.path.
38 sys.path.insert(0, src_dir)
39
40 try:
41 # See setup.cfg for most of the config metadata.
42 setup(
43 cffi_modules=[
44 "src/_cffi_src/build_openssl.py:ffi",
45 ],
46 rust_extensions=[
47 RustExtension(
48 "_rust",
49 "src/rust/Cargo.toml",
50 py_limited_api=True,
51 # Enable abi3 mode if we're not using PyPy.
52 features=(
53 []
54 if platform.python_implementation() == "PyPy"
55 else ["pyo3/abi3-py36"]
56 ),
57 rust_version=">=1.48.0",
58 )
59 ],
60 )
61 except: # noqa: E722
62 # Note: This is a bare exception that re-raises so that we don't interfere
63 # with anything the installation machinery might want to do. Because we
64 # print this for any exception this msg can appear (e.g. in verbose logs)
65 # even if there's no failure. For example, SetupRequirementsError is raised
66 # during PEP517 building and prints this text. setuptools raises SystemExit
67 # when compilation fails right now, but it's possible this isn't stable
68 # or a public API commitment so we'll remain ultra conservative.
69
70 import pkg_resources
71
72 print(
73 """
74 =============================DEBUG ASSISTANCE=============================
75 If you are seeing a compilation error please try the following steps to
76 successfully install cryptography:
77 1) Upgrade to the latest pip and try again. This will fix errors for most
78 users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip
79 2) Read https://cryptography.io/en/latest/installation/ for specific
80 instructions for your platform.
81 3) Check our frequently asked questions for more information:
82 https://cryptography.io/en/latest/faq/
83 4) Ensure you have a recent Rust toolchain installed:
84 https://cryptography.io/en/latest/installation/#rust
85 """
86 )
87 print(f" Python: {'.'.join(str(v) for v in sys.version_info[:3])}")
88 print(f" platform: {platform.platform()}")
89 for dist in ["pip", "setuptools", "setuptools_rust"]:
90 try:
91 version = pkg_resources.get_distribution(dist).version
92 except pkg_resources.DistributionNotFound:
93 version = "n/a"
94 print(f" {dist}: {version}")
95 version = "n/a"
96 if shutil.which("rustc") is not None:
97 try:
98 # If for any reason `rustc --version` fails, silently ignore it
99 rustc_output = subprocess.run(
100 ["rustc", "--version"],
101 capture_output=True,
102 timeout=0.5,
103 encoding="utf8",
104 check=True,
105 ).stdout
106 version = re.sub("^rustc ", "", rustc_output.strip())
107 except subprocess.SubprocessError:
108 pass
109 print(f" rustc: {version}")
110
111 print(
112 """\
113 =============================DEBUG ASSISTANCE=============================
114 """
115 )
116 raise
117
```
Path: `src/_cffi_src/build_openssl.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 import os
7 import sys
8 from distutils import dist
9 from distutils.ccompiler import get_default_compiler
10 from distutils.command.config import config
11
12 from _cffi_src.utils import build_ffi_for_binding, compiler_type
13
14
15 def _get_openssl_libraries(platform):
16 if os.environ.get("CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS", None):
17 return []
18 # OpenSSL goes by a different library name on different operating systems.
19 if platform == "win32" and compiler_type() == "msvc":
20 return [
21 "libssl",
22 "libcrypto",
23 "advapi32",
24 "crypt32",
25 "gdi32",
26 "user32",
27 "ws2_32",
28 ]
29 else:
30 # darwin, linux, mingw all use this path
31 # In some circumstances, the order in which these libs are
32 # specified on the linker command-line is significant;
33 # libssl must come before libcrypto
34 # (https://marc.info/?l=openssl-users&m=135361825921871)
35 # -lpthread required due to usage of pthread an potential
36 # existance of a static part containing e.g. pthread_atfork
37 # (https://github.com/pyca/cryptography/issues/5084)
38 if sys.platform == "zos":
39 return ["ssl", "crypto"]
40 else:
41 return ["ssl", "crypto", "pthread"]
42
43
44 def _extra_compile_args(platform):
45 """
46 We set -Wconversion args here so that we only do Wconversion checks on the
47 code we're compiling and not on cffi itself (as passing -Wconversion in
48 CFLAGS would do). We set no error on sign conversion because some
49 function signatures in LibreSSL differ from OpenSSL have changed on long
50 vs. unsigned long in the past. Since that isn't a precision issue we don't
51 care.
52 """
53 # make sure the compiler used supports the flags to be added
54 is_gcc = False
55 if get_default_compiler() == "unix":
56 d = dist.Distribution()
57 cmd = config(d)
58 cmd._check_compiler()
59 is_gcc = (
60 "gcc" in cmd.compiler.compiler[0]
61 or "clang" in cmd.compiler.compiler[0]
62 )
63 if is_gcc or not (
64 platform in ["win32", "hp-ux11", "sunos5"]
65 or platform.startswith("aix")
66 ):
67 return ["-Wconversion", "-Wno-error=sign-conversion"]
68 else:
69 return []
70
71
72 ffi = build_ffi_for_binding(
73 module_name="_openssl",
74 module_prefix="_cffi_src.openssl.",
75 modules=[
76 # This goes first so we can define some cryptography-wide symbols.
77 "cryptography",
78 # Provider comes early as well so we define OSSL_LIB_CTX
79 "provider",
80 "asn1",
81 "bignum",
82 "bio",
83 "cmac",
84 "conf",
85 "crypto",
86 "dh",
87 "dsa",
88 "ec",
89 "ecdh",
90 "ecdsa",
91 "engine",
92 "err",
93 "evp",
94 "fips",
95 "hmac",
96 "nid",
97 "objects",
98 "opensslv",
99 "osrandom_engine",
100 "pem",
101 "pkcs12",
102 "rand",
103 "rsa",
104 "ssl",
105 "x509",
106 "x509name",
107 "x509v3",
108 "x509_vfy",
109 "pkcs7",
110 "callbacks",
111 ],
112 libraries=_get_openssl_libraries(sys.platform),
113 extra_compile_args=_extra_compile_args(sys.platform),
114 )
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@
],
rust_extensions=[
RustExtension(
- "_rust",
+ "cryptography.hazmat.bindings._rust",
"src/rust/Cargo.toml",
py_limited_api=True,
# Enable abi3 mode if we're not using PyPy.
diff --git a/src/_cffi_src/build_openssl.py b/src/_cffi_src/build_openssl.py
--- a/src/_cffi_src/build_openssl.py
+++ b/src/_cffi_src/build_openssl.py
@@ -70,7 +70,7 @@
ffi = build_ffi_for_binding(
- module_name="_openssl",
+ module_name="cryptography.hazmat.bindings._openssl",
module_prefix="_cffi_src.openssl.",
modules=[
# This goes first so we can define some cryptography-wide symbols.
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,7 @@\n ],\n rust_extensions=[\n RustExtension(\n- \"_rust\",\n+ \"cryptography.hazmat.bindings._rust\",\n \"src/rust/Cargo.toml\",\n py_limited_api=True,\n # Enable abi3 mode if we're not using PyPy.\ndiff --git a/src/_cffi_src/build_openssl.py b/src/_cffi_src/build_openssl.py\n--- a/src/_cffi_src/build_openssl.py\n+++ b/src/_cffi_src/build_openssl.py\n@@ -70,7 +70,7 @@\n \n \n ffi = build_ffi_for_binding(\n- module_name=\"_openssl\",\n+ module_name=\"cryptography.hazmat.bindings._openssl\",\n module_prefix=\"_cffi_src.openssl.\",\n modules=[\n # This goes first so we can define some cryptography-wide symbols.\n", "issue": "cryptography packaging `ext_package` causes `_openssl` to be in `top_level.txt` in the wheel\nthis is extremely minor and I have an idea on how to work around it -- but it appears some of the distribution metadata is incorrect due to the way the cffi modules are built in `cryptography`.\r\n\r\nhere's a minimal example:\r\n\r\n```console\r\n$ pip download cryptography --no-deps ...\r\n$ unzip -p cryptography-*.whl */top_level.txt \r\n_openssl\r\ncryptography\r\n```\r\n\r\nthis file is _supposed_ to list the top level modules afaict and `_openssl` is not one of them. it appears that comes from this: https://github.com/pyca/cryptography/blob/7a4c612b0280a4a01df636f8d648b9c1a1dad6dc/src/_cffi_src/build_openssl.py#L73\r\n\r\nI can probably work around this (I'm using this file to do some rudimentary testing of wheels) and regenerate `top_level.txt` in a different way (perhaps via `RECORD`)\r\n\r\nI think this _might_ be a `setuptools` bug with `ext_package` -- it can be worked around like this:\r\n\r\n```diff\r\ndiff --git a/setup.cfg b/setup.cfg\r\nindex f9710764d..d96bb2d4f 100644\r\n--- a/setup.cfg\r\n+++ b/setup.cfg\r\n@@ -43,7 +43,6 @@ zip_safe = False\r\n package_dir =\r\n =src\r\n packages = find:\r\n-ext_package = cryptography.hazmat.bindings\r\n # `install_requires` must be kept in sync with `pyproject.toml`\r\n install_requires =\r\n cffi >=1.12\r\ndiff --git a/setup.py b/setup.py\r\nindex 946bb0f92..320994e69 100644\r\n--- a/setup.py\r\n+++ b/setup.py\r\n@@ -45,7 +45,7 @@ try:\r\n ],\r\n rust_extensions=[\r\n RustExtension(\r\n- \"_rust\",\r\n+ \"cryptography.hazmat.bindings._rust\",\r\n \"src/rust/Cargo.toml\",\r\n py_limited_api=True,\r\n # Enable abi3 mode if we're not using PyPy.\r\ndiff --git a/src/_cffi_src/build_openssl.py b/src/_cffi_src/build_openssl.py\r\nindex c5ab3cb3c..84637f090 100644\r\n--- a/src/_cffi_src/build_openssl.py\r\n+++ b/src/_cffi_src/build_openssl.py\r\n@@ -70,7 +70,7 @@ def _extra_compile_args(platform):\r\n \r\n \r\n ffi = build_ffi_for_binding(\r\n- module_name=\"_openssl\",\r\n+ module_name=\"cryptography.hazmat.bindings._openssl\",\r\n module_prefix=\"_cffi_src.openssl.\",\r\n modules=[\r\n # This goes first so we can define some cryptography-wide symbols.\r\n```\r\n\r\n```console\r\n$ rm -rf build *.whl && pip wheel . --no-deps\r\n...\r\n$ unzip -p *.whl */top_level.txt\r\ncryptography\r\n$ unzip -l *.whl | grep '\\.so'\r\n 1974696 2022-08-15 14:04 cryptography/hazmat/bindings/_openssl.abi3.so\r\n 3696800 2022-08-15 14:04 cryptography/hazmat/bindings/_rust.abi3.so\r\n```\r\n\r\nlet me know if this would be worth a patch (though I'd totally understand if this is a \"no improvement\" / \"upstream problem\" issue)\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport os\nimport platform\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\ntry:\n from setuptools_rust import RustExtension\nexcept ImportError:\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE==========================\n If you are seeing an error here please try the following to\n successfully install cryptography:\n\n Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n =============================DEBUG ASSISTANCE==========================\n \"\"\"\n )\n raise\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\ntry:\n # See setup.cfg for most of the config metadata.\n setup(\n cffi_modules=[\n \"src/_cffi_src/build_openssl.py:ffi\",\n ],\n rust_extensions=[\n RustExtension(\n \"_rust\",\n \"src/rust/Cargo.toml\",\n py_limited_api=True,\n # Enable abi3 mode if we're not using PyPy.\n features=(\n []\n if platform.python_implementation() == \"PyPy\"\n else [\"pyo3/abi3-py36\"]\n ),\n rust_version=\">=1.48.0\",\n )\n ],\n )\nexcept: # noqa: E722\n # Note: This is a bare exception that re-raises so that we don't interfere\n # with anything the installation machinery might want to do. Because we\n # print this for any exception this msg can appear (e.g. in verbose logs)\n # even if there's no failure. For example, SetupRequirementsError is raised\n # during PEP517 building and prints this text. setuptools raises SystemExit\n # when compilation fails right now, but it's possible this isn't stable\n # or a public API commitment so we'll remain ultra conservative.\n\n import pkg_resources\n\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE=============================\n If you are seeing a compilation error please try the following steps to\n successfully install cryptography:\n 1) Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n 2) Read https://cryptography.io/en/latest/installation/ for specific\n instructions for your platform.\n 3) Check our frequently asked questions for more information:\n https://cryptography.io/en/latest/faq/\n 4) Ensure you have a recent Rust toolchain installed:\n https://cryptography.io/en/latest/installation/#rust\n \"\"\"\n )\n print(f\" Python: {'.'.join(str(v) for v in sys.version_info[:3])}\")\n print(f\" platform: {platform.platform()}\")\n for dist in [\"pip\", \"setuptools\", \"setuptools_rust\"]:\n try:\n version = pkg_resources.get_distribution(dist).version\n except pkg_resources.DistributionNotFound:\n version = \"n/a\"\n print(f\" {dist}: {version}\")\n version = \"n/a\"\n if shutil.which(\"rustc\") is not None:\n try:\n # If for any reason `rustc --version` fails, silently ignore it\n rustc_output = subprocess.run(\n [\"rustc\", \"--version\"],\n capture_output=True,\n timeout=0.5,\n encoding=\"utf8\",\n check=True,\n ).stdout\n version = re.sub(\"^rustc \", \"\", rustc_output.strip())\n except subprocess.SubprocessError:\n pass\n print(f\" rustc: {version}\")\n\n print(\n \"\"\"\\\n =============================DEBUG ASSISTANCE=============================\n \"\"\"\n )\n raise\n", "path": "setup.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport os\nimport sys\nfrom distutils import dist\nfrom distutils.ccompiler import get_default_compiler\nfrom distutils.command.config import config\n\nfrom _cffi_src.utils import build_ffi_for_binding, compiler_type\n\n\ndef _get_openssl_libraries(platform):\n if os.environ.get(\"CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS\", None):\n return []\n # OpenSSL goes by a different library name on different operating systems.\n if platform == \"win32\" and compiler_type() == \"msvc\":\n return [\n \"libssl\",\n \"libcrypto\",\n \"advapi32\",\n \"crypt32\",\n \"gdi32\",\n \"user32\",\n \"ws2_32\",\n ]\n else:\n # darwin, linux, mingw all use this path\n # In some circumstances, the order in which these libs are\n # specified on the linker command-line is significant;\n # libssl must come before libcrypto\n # (https://marc.info/?l=openssl-users&m=135361825921871)\n # -lpthread required due to usage of pthread an potential\n # existance of a static part containing e.g. pthread_atfork\n # (https://github.com/pyca/cryptography/issues/5084)\n if sys.platform == \"zos\":\n return [\"ssl\", \"crypto\"]\n else:\n return [\"ssl\", \"crypto\", \"pthread\"]\n\n\ndef _extra_compile_args(platform):\n \"\"\"\n We set -Wconversion args here so that we only do Wconversion checks on the\n code we're compiling and not on cffi itself (as passing -Wconversion in\n CFLAGS would do). We set no error on sign conversion because some\n function signatures in LibreSSL differ from OpenSSL have changed on long\n vs. unsigned long in the past. Since that isn't a precision issue we don't\n care.\n \"\"\"\n # make sure the compiler used supports the flags to be added\n is_gcc = False\n if get_default_compiler() == \"unix\":\n d = dist.Distribution()\n cmd = config(d)\n cmd._check_compiler()\n is_gcc = (\n \"gcc\" in cmd.compiler.compiler[0]\n or \"clang\" in cmd.compiler.compiler[0]\n )\n if is_gcc or not (\n platform in [\"win32\", \"hp-ux11\", \"sunos5\"]\n or platform.startswith(\"aix\")\n ):\n return [\"-Wconversion\", \"-Wno-error=sign-conversion\"]\n else:\n return []\n\n\nffi = build_ffi_for_binding(\n module_name=\"_openssl\",\n module_prefix=\"_cffi_src.openssl.\",\n modules=[\n # This goes first so we can define some cryptography-wide symbols.\n \"cryptography\",\n # Provider comes early as well so we define OSSL_LIB_CTX\n \"provider\",\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"cmac\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"ec\",\n \"ecdh\",\n \"ecdsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"fips\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"osrandom_engine\",\n \"pem\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n \"x509_vfy\",\n \"pkcs7\",\n \"callbacks\",\n ],\n libraries=_get_openssl_libraries(sys.platform),\n extra_compile_args=_extra_compile_args(sys.platform),\n)\n", "path": "src/_cffi_src/build_openssl.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nimport os\nimport platform\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\ntry:\n from setuptools_rust import RustExtension\nexcept ImportError:\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE==========================\n If you are seeing an error here please try the following to\n successfully install cryptography:\n\n Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n =============================DEBUG ASSISTANCE==========================\n \"\"\"\n )\n raise\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\ntry:\n # See setup.cfg for most of the config metadata.\n setup(\n cffi_modules=[\n \"src/_cffi_src/build_openssl.py:ffi\",\n ],\n rust_extensions=[\n RustExtension(\n \"cryptography.hazmat.bindings._rust\",\n \"src/rust/Cargo.toml\",\n py_limited_api=True,\n # Enable abi3 mode if we're not using PyPy.\n features=(\n []\n if platform.python_implementation() == \"PyPy\"\n else [\"pyo3/abi3-py36\"]\n ),\n rust_version=\">=1.48.0\",\n )\n ],\n )\nexcept: # noqa: E722\n # Note: This is a bare exception that re-raises so that we don't interfere\n # with anything the installation machinery might want to do. Because we\n # print this for any exception this msg can appear (e.g. in verbose logs)\n # even if there's no failure. For example, SetupRequirementsError is raised\n # during PEP517 building and prints this text. setuptools raises SystemExit\n # when compilation fails right now, but it's possible this isn't stable\n # or a public API commitment so we'll remain ultra conservative.\n\n import pkg_resources\n\n print(\n \"\"\"\n =============================DEBUG ASSISTANCE=============================\n If you are seeing a compilation error please try the following steps to\n successfully install cryptography:\n 1) Upgrade to the latest pip and try again. This will fix errors for most\n users. See: https://pip.pypa.io/en/stable/installing/#upgrading-pip\n 2) Read https://cryptography.io/en/latest/installation/ for specific\n instructions for your platform.\n 3) Check our frequently asked questions for more information:\n https://cryptography.io/en/latest/faq/\n 4) Ensure you have a recent Rust toolchain installed:\n https://cryptography.io/en/latest/installation/#rust\n \"\"\"\n )\n print(f\" Python: {'.'.join(str(v) for v in sys.version_info[:3])}\")\n print(f\" platform: {platform.platform()}\")\n for dist in [\"pip\", \"setuptools\", \"setuptools_rust\"]:\n try:\n version = pkg_resources.get_distribution(dist).version\n except pkg_resources.DistributionNotFound:\n version = \"n/a\"\n print(f\" {dist}: {version}\")\n version = \"n/a\"\n if shutil.which(\"rustc\") is not None:\n try:\n # If for any reason `rustc --version` fails, silently ignore it\n rustc_output = subprocess.run(\n [\"rustc\", \"--version\"],\n capture_output=True,\n timeout=0.5,\n encoding=\"utf8\",\n check=True,\n ).stdout\n version = re.sub(\"^rustc \", \"\", rustc_output.strip())\n except subprocess.SubprocessError:\n pass\n print(f\" rustc: {version}\")\n\n print(\n \"\"\"\\\n =============================DEBUG ASSISTANCE=============================\n \"\"\"\n )\n raise\n", "path": "setup.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport os\nimport sys\nfrom distutils import dist\nfrom distutils.ccompiler import get_default_compiler\nfrom distutils.command.config import config\n\nfrom _cffi_src.utils import build_ffi_for_binding, compiler_type\n\n\ndef _get_openssl_libraries(platform):\n if os.environ.get(\"CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS\", None):\n return []\n # OpenSSL goes by a different library name on different operating systems.\n if platform == \"win32\" and compiler_type() == \"msvc\":\n return [\n \"libssl\",\n \"libcrypto\",\n \"advapi32\",\n \"crypt32\",\n \"gdi32\",\n \"user32\",\n \"ws2_32\",\n ]\n else:\n # darwin, linux, mingw all use this path\n # In some circumstances, the order in which these libs are\n # specified on the linker command-line is significant;\n # libssl must come before libcrypto\n # (https://marc.info/?l=openssl-users&m=135361825921871)\n # -lpthread required due to usage of pthread an potential\n # existance of a static part containing e.g. pthread_atfork\n # (https://github.com/pyca/cryptography/issues/5084)\n if sys.platform == \"zos\":\n return [\"ssl\", \"crypto\"]\n else:\n return [\"ssl\", \"crypto\", \"pthread\"]\n\n\ndef _extra_compile_args(platform):\n \"\"\"\n We set -Wconversion args here so that we only do Wconversion checks on the\n code we're compiling and not on cffi itself (as passing -Wconversion in\n CFLAGS would do). We set no error on sign conversion because some\n function signatures in LibreSSL differ from OpenSSL have changed on long\n vs. unsigned long in the past. Since that isn't a precision issue we don't\n care.\n \"\"\"\n # make sure the compiler used supports the flags to be added\n is_gcc = False\n if get_default_compiler() == \"unix\":\n d = dist.Distribution()\n cmd = config(d)\n cmd._check_compiler()\n is_gcc = (\n \"gcc\" in cmd.compiler.compiler[0]\n or \"clang\" in cmd.compiler.compiler[0]\n )\n if is_gcc or not (\n platform in [\"win32\", \"hp-ux11\", \"sunos5\"]\n or platform.startswith(\"aix\")\n ):\n return [\"-Wconversion\", \"-Wno-error=sign-conversion\"]\n else:\n return []\n\n\nffi = build_ffi_for_binding(\n module_name=\"cryptography.hazmat.bindings._openssl\",\n module_prefix=\"_cffi_src.openssl.\",\n modules=[\n # This goes first so we can define some cryptography-wide symbols.\n \"cryptography\",\n # Provider comes early as well so we define OSSL_LIB_CTX\n \"provider\",\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"cmac\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"ec\",\n \"ecdh\",\n \"ecdsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"fips\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"osrandom_engine\",\n \"pem\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n \"x509_vfy\",\n \"pkcs7\",\n \"callbacks\",\n ],\n libraries=_get_openssl_libraries(sys.platform),\n extra_compile_args=_extra_compile_args(sys.platform),\n)\n", "path": "src/_cffi_src/build_openssl.py"}]} | 3,359 | 213 |
gh_patches_debug_1788 | rasdani/github-patches | git_diff | beeware__toga-2041 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash when displaying InfoDialog on iOS
### Describe the bug
InfoDialog could not be called from a Button on iOS
### Steps to reproduce
1. Run the `dialogs` example
2. Click on any of the buttons that display a dialog
3. See error:
```
Traceback (most recent call last):
File "/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/rubicon/objc/api.py", line 334, in __call__
result = self.py_method(py_self, *args)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga_iOS/widgets/button.py", line 22, in onPress_
self.interface.on_press(None)
File "/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga/handlers.py", line 65, in _handler
result = handler(interface, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app/toolbox/app.py", line 624, in show_not_function
self.main_window.info_dialog(
File "/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga/window.py", line 287, in info_dialog
self.factory.dialogs.InfoDialog(
File "/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga_iOS/dialogs.py", line 83, in __init__
super().__init__(interface, title, message, on_result=on_result)
File "/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga_iOS/dialogs.py", line 31, in __init__
interface.window._impl.controller.presentViewController(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: 'MainWindow' object has no attribute 'controller'
```
### Expected behavior
InfoDialog should be displayed.
### Screenshots
I believe this is not needed
### Environment
- Operating System: iOS
- Python version: any
- Software versions:
- Briefcase: 0.3.15
- Toga: 5d9a077345d34018342944199212fb7861d8d772 (0.3.1+)
- ...
### Logs
```
```
### Additional context
This problem was introduced by #1969. The fix is relatively simple:
```
diff --git a/iOS/src/toga_iOS/dialogs.py b/iOS/src/toga_iOS/dialogs.py
index a0a8bce38..1448bf8fa 100644
--- a/iOS/src/toga_iOS/dialogs.py
+++ b/iOS/src/toga_iOS/dialogs.py
@@ -28,7 +28,7 @@ class AlertDialog(BaseDialog):
self.populate_dialog()
- interface.window._impl.controller.presentViewController(
+ interface.window._impl.native.rootViewController.presentViewController(
self.native,
animated=False,
completion=None,
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `iOS/src/toga_iOS/dialogs.py`
Content:
```
1 from abc import ABC
2
3 from rubicon.objc import Block
4 from rubicon.objc.runtime import objc_id
5
6 from toga_iOS.libs import (
7 UIAlertAction,
8 UIAlertActionStyle,
9 UIAlertController,
10 UIAlertControllerStyle,
11 )
12
13
14 class BaseDialog(ABC):
15 def __init__(self, interface):
16 self.interface = interface
17 self.interface.impl = self
18
19
20 class AlertDialog(BaseDialog):
21 def __init__(self, interface, title, message, on_result=None):
22 super().__init__(interface=interface)
23 self.on_result = on_result
24
25 self.native = UIAlertController.alertControllerWithTitle(
26 title, message=message, preferredStyle=UIAlertControllerStyle.Alert
27 )
28
29 self.populate_dialog()
30
31 interface.window._impl.controller.presentViewController(
32 self.native,
33 animated=False,
34 completion=None,
35 )
36
37 def populate_dialog(self, native):
38 pass
39
40 def response(self, value):
41 self.on_result(self, value)
42 self.interface.future.set_result(value)
43
44 def null_response(self, action: objc_id) -> None:
45 self.response(None)
46
47 def true_response(self, action: objc_id) -> None:
48 self.response(True)
49
50 def false_response(self, action: objc_id) -> None:
51 self.response(False)
52
53 def add_null_response_button(self, label):
54 self.native.addAction(
55 UIAlertAction.actionWithTitle(
56 label,
57 style=UIAlertActionStyle.Default,
58 handler=Block(self.null_response, None, objc_id),
59 )
60 )
61
62 def add_true_response_button(self, label):
63 self.native.addAction(
64 UIAlertAction.actionWithTitle(
65 label,
66 style=UIAlertActionStyle.Default,
67 handler=Block(self.true_response, None, objc_id),
68 )
69 )
70
71 def add_false_response_button(self, label):
72 self.native.addAction(
73 UIAlertAction.actionWithTitle(
74 label,
75 style=UIAlertActionStyle.Cancel,
76 handler=Block(self.false_response, None, objc_id),
77 )
78 )
79
80
81 class InfoDialog(AlertDialog):
82 def __init__(self, interface, title, message, on_result=None):
83 super().__init__(interface, title, message, on_result=on_result)
84
85 def populate_dialog(self):
86 self.add_null_response_button("OK")
87
88
89 class QuestionDialog(AlertDialog):
90 def __init__(self, interface, title, message, on_result=None):
91 super().__init__(interface, title, message, on_result=on_result)
92
93 def populate_dialog(self):
94 self.add_true_response_button("Yes")
95 self.add_false_response_button("No")
96
97
98 class ConfirmDialog(AlertDialog):
99 def __init__(self, interface, title, message, on_result=None):
100 super().__init__(interface, title, message, on_result=on_result)
101
102 def populate_dialog(self):
103 self.add_true_response_button("OK")
104 self.add_false_response_button("Cancel")
105
106
107 class ErrorDialog(AlertDialog):
108 def __init__(self, interface, title, message, on_result=None):
109 super().__init__(interface, title, message, on_result=on_result)
110
111 def populate_dialog(self):
112 self.add_null_response_button("OK")
113
114
115 class StackTraceDialog(BaseDialog):
116 def __init__(self, interface, title, message, on_result=None, **kwargs):
117 super().__init__(interface=interface)
118 interface.window.factory.not_implemented("Window.stack_trace_dialog()")
119
120
121 class SaveFileDialog(BaseDialog):
122 def __init__(
123 self,
124 interface,
125 title,
126 filename,
127 initial_directory,
128 file_types=None,
129 on_result=None,
130 ):
131 super().__init__(interface=interface)
132 interface.window.factory.not_implemented("Window.save_file_dialog()")
133
134
135 class OpenFileDialog(BaseDialog):
136 def __init__(
137 self,
138 interface,
139 title,
140 initial_directory,
141 file_types,
142 multiselect,
143 on_result=None,
144 ):
145 super().__init__(interface=interface)
146 interface.window.factory.not_implemented("Window.open_file_dialog()")
147
148
149 class SelectFolderDialog(BaseDialog):
150 def __init__(
151 self,
152 interface,
153 title,
154 initial_directory,
155 multiselect,
156 on_result=None,
157 ):
158 super().__init__(interface=interface)
159 interface.window.factory.not_implemented("Window.select_folder_dialog()")
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/iOS/src/toga_iOS/dialogs.py b/iOS/src/toga_iOS/dialogs.py
--- a/iOS/src/toga_iOS/dialogs.py
+++ b/iOS/src/toga_iOS/dialogs.py
@@ -28,7 +28,7 @@
self.populate_dialog()
- interface.window._impl.controller.presentViewController(
+ interface.window._impl.native.rootViewController.presentViewController(
self.native,
animated=False,
completion=None,
| {"golden_diff": "diff --git a/iOS/src/toga_iOS/dialogs.py b/iOS/src/toga_iOS/dialogs.py\n--- a/iOS/src/toga_iOS/dialogs.py\n+++ b/iOS/src/toga_iOS/dialogs.py\n@@ -28,7 +28,7 @@\n \n self.populate_dialog()\n \n- interface.window._impl.controller.presentViewController(\n+ interface.window._impl.native.rootViewController.presentViewController(\n self.native,\n animated=False,\n completion=None,\n", "issue": "Crash when displaying InfoDialog on iOS\n### Describe the bug\r\n\r\nInfoDialog could not be called from a Button on iOS\r\n\r\n### Steps to reproduce\r\n\r\n1. Run the `dialogs` example\r\n2. Click on any of the buttons that display a dialog \r\n3. See error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/rubicon/objc/api.py\", line 334, in __call__\r\n result = self.py_method(py_self, *args)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga_iOS/widgets/button.py\", line 22, in onPress_\r\n self.interface.on_press(None)\r\n File \"/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga/handlers.py\", line 65, in _handler\r\n result = handler(interface, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app/toolbox/app.py\", line 624, in show_not_function\r\n self.main_window.info_dialog(\r\n File \"/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga/window.py\", line 287, in info_dialog\r\n self.factory.dialogs.InfoDialog(\r\n File \"/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga_iOS/dialogs.py\", line 83, in __init__\r\n super().__init__(interface, title, message, on_result=on_result)\r\n File \"/Users/carstenschmidt/Library/Developer/CoreSimulator/Devices/DDAB8D40-4B82-42E8-840A-DAC5BA41273C/data/Containers/Bundle/Application/4EBA5383-22B3-4BF3-B5A9-4E0DB9B3F918/ToolBox.app/app_packages/toga_iOS/dialogs.py\", line 31, in __init__\r\n interface.window._impl.controller.presentViewController(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nAttributeError: 'MainWindow' object has no attribute 'controller'\r\n```\r\n\r\n### Expected behavior\r\n\r\nInfoDialog should be displayed.\r\n\r\n### Screenshots\r\n\r\nI believe this is not needed\r\n\r\n### Environment\r\n\r\n- Operating System: iOS\r\n- Python version: any\r\n- Software versions:\r\n - Briefcase: 0.3.15\r\n - Toga: 5d9a077345d34018342944199212fb7861d8d772 (0.3.1+)\r\n - ...\r\n\r\n\r\n### Logs\r\n\r\n```\r\n\r\n```\r\n\r\n\r\n### Additional context\r\n\r\nThis problem was introduced by #1969. The fix is relatively simple:\r\n```\r\ndiff --git a/iOS/src/toga_iOS/dialogs.py b/iOS/src/toga_iOS/dialogs.py\r\nindex a0a8bce38..1448bf8fa 100644\r\n--- a/iOS/src/toga_iOS/dialogs.py\r\n+++ b/iOS/src/toga_iOS/dialogs.py\r\n@@ -28,7 +28,7 @@ class AlertDialog(BaseDialog):\r\n \r\n self.populate_dialog()\r\n \r\n- interface.window._impl.controller.presentViewController(\r\n+ interface.window._impl.native.rootViewController.presentViewController(\r\n self.native,\r\n animated=False,\r\n completion=None,\r\n```\n", "before_files": [{"content": "from abc import ABC\n\nfrom rubicon.objc import Block\nfrom rubicon.objc.runtime import objc_id\n\nfrom toga_iOS.libs import (\n UIAlertAction,\n UIAlertActionStyle,\n UIAlertController,\n UIAlertControllerStyle,\n)\n\n\nclass BaseDialog(ABC):\n def __init__(self, interface):\n self.interface = interface\n self.interface.impl = self\n\n\nclass AlertDialog(BaseDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface=interface)\n self.on_result = on_result\n\n self.native = UIAlertController.alertControllerWithTitle(\n title, message=message, preferredStyle=UIAlertControllerStyle.Alert\n )\n\n self.populate_dialog()\n\n interface.window._impl.controller.presentViewController(\n self.native,\n animated=False,\n completion=None,\n )\n\n def populate_dialog(self, native):\n pass\n\n def response(self, value):\n self.on_result(self, value)\n self.interface.future.set_result(value)\n\n def null_response(self, action: objc_id) -> None:\n self.response(None)\n\n def true_response(self, action: objc_id) -> None:\n self.response(True)\n\n def false_response(self, action: objc_id) -> None:\n self.response(False)\n\n def add_null_response_button(self, label):\n self.native.addAction(\n UIAlertAction.actionWithTitle(\n label,\n style=UIAlertActionStyle.Default,\n handler=Block(self.null_response, None, objc_id),\n )\n )\n\n def add_true_response_button(self, label):\n self.native.addAction(\n UIAlertAction.actionWithTitle(\n label,\n style=UIAlertActionStyle.Default,\n handler=Block(self.true_response, None, objc_id),\n )\n )\n\n def add_false_response_button(self, label):\n self.native.addAction(\n UIAlertAction.actionWithTitle(\n label,\n style=UIAlertActionStyle.Cancel,\n handler=Block(self.false_response, None, objc_id),\n )\n )\n\n\nclass InfoDialog(AlertDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface, title, message, on_result=on_result)\n\n def populate_dialog(self):\n self.add_null_response_button(\"OK\")\n\n\nclass QuestionDialog(AlertDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface, title, message, on_result=on_result)\n\n def populate_dialog(self):\n self.add_true_response_button(\"Yes\")\n self.add_false_response_button(\"No\")\n\n\nclass ConfirmDialog(AlertDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface, title, message, on_result=on_result)\n\n def populate_dialog(self):\n self.add_true_response_button(\"OK\")\n self.add_false_response_button(\"Cancel\")\n\n\nclass ErrorDialog(AlertDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface, title, message, on_result=on_result)\n\n def populate_dialog(self):\n self.add_null_response_button(\"OK\")\n\n\nclass StackTraceDialog(BaseDialog):\n def __init__(self, interface, title, message, on_result=None, **kwargs):\n super().__init__(interface=interface)\n interface.window.factory.not_implemented(\"Window.stack_trace_dialog()\")\n\n\nclass SaveFileDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n filename,\n initial_directory,\n file_types=None,\n on_result=None,\n ):\n super().__init__(interface=interface)\n interface.window.factory.not_implemented(\"Window.save_file_dialog()\")\n\n\nclass OpenFileDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n file_types,\n multiselect,\n on_result=None,\n ):\n super().__init__(interface=interface)\n interface.window.factory.not_implemented(\"Window.open_file_dialog()\")\n\n\nclass SelectFolderDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n multiselect,\n on_result=None,\n ):\n super().__init__(interface=interface)\n interface.window.factory.not_implemented(\"Window.select_folder_dialog()\")\n", "path": "iOS/src/toga_iOS/dialogs.py"}], "after_files": [{"content": "from abc import ABC\n\nfrom rubicon.objc import Block\nfrom rubicon.objc.runtime import objc_id\n\nfrom toga_iOS.libs import (\n UIAlertAction,\n UIAlertActionStyle,\n UIAlertController,\n UIAlertControllerStyle,\n)\n\n\nclass BaseDialog(ABC):\n def __init__(self, interface):\n self.interface = interface\n self.interface.impl = self\n\n\nclass AlertDialog(BaseDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface=interface)\n self.on_result = on_result\n\n self.native = UIAlertController.alertControllerWithTitle(\n title, message=message, preferredStyle=UIAlertControllerStyle.Alert\n )\n\n self.populate_dialog()\n\n interface.window._impl.native.rootViewController.presentViewController(\n self.native,\n animated=False,\n completion=None,\n )\n\n def populate_dialog(self, native):\n pass\n\n def response(self, value):\n self.on_result(self, value)\n self.interface.future.set_result(value)\n\n def null_response(self, action: objc_id) -> None:\n self.response(None)\n\n def true_response(self, action: objc_id) -> None:\n self.response(True)\n\n def false_response(self, action: objc_id) -> None:\n self.response(False)\n\n def add_null_response_button(self, label):\n self.native.addAction(\n UIAlertAction.actionWithTitle(\n label,\n style=UIAlertActionStyle.Default,\n handler=Block(self.null_response, None, objc_id),\n )\n )\n\n def add_true_response_button(self, label):\n self.native.addAction(\n UIAlertAction.actionWithTitle(\n label,\n style=UIAlertActionStyle.Default,\n handler=Block(self.true_response, None, objc_id),\n )\n )\n\n def add_false_response_button(self, label):\n self.native.addAction(\n UIAlertAction.actionWithTitle(\n label,\n style=UIAlertActionStyle.Cancel,\n handler=Block(self.false_response, None, objc_id),\n )\n )\n\n\nclass InfoDialog(AlertDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface, title, message, on_result=on_result)\n\n def populate_dialog(self):\n self.add_null_response_button(\"OK\")\n\n\nclass QuestionDialog(AlertDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface, title, message, on_result=on_result)\n\n def populate_dialog(self):\n self.add_true_response_button(\"Yes\")\n self.add_false_response_button(\"No\")\n\n\nclass ConfirmDialog(AlertDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface, title, message, on_result=on_result)\n\n def populate_dialog(self):\n self.add_true_response_button(\"OK\")\n self.add_false_response_button(\"Cancel\")\n\n\nclass ErrorDialog(AlertDialog):\n def __init__(self, interface, title, message, on_result=None):\n super().__init__(interface, title, message, on_result=on_result)\n\n def populate_dialog(self):\n self.add_null_response_button(\"OK\")\n\n\nclass StackTraceDialog(BaseDialog):\n def __init__(self, interface, title, message, on_result=None, **kwargs):\n super().__init__(interface=interface)\n interface.window.factory.not_implemented(\"Window.stack_trace_dialog()\")\n\n\nclass SaveFileDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n filename,\n initial_directory,\n file_types=None,\n on_result=None,\n ):\n super().__init__(interface=interface)\n interface.window.factory.not_implemented(\"Window.save_file_dialog()\")\n\n\nclass OpenFileDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n file_types,\n multiselect,\n on_result=None,\n ):\n super().__init__(interface=interface)\n interface.window.factory.not_implemented(\"Window.open_file_dialog()\")\n\n\nclass SelectFolderDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n multiselect,\n on_result=None,\n ):\n super().__init__(interface=interface)\n interface.window.factory.not_implemented(\"Window.select_folder_dialog()\")\n", "path": "iOS/src/toga_iOS/dialogs.py"}]} | 2,759 | 105 |
gh_patches_debug_34136 | rasdani/github-patches | git_diff | catalyst-team__catalyst-1004 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lost parameters in logs_path/configs/_config.json
## 🐛 Bug Report
<!-- A clear and concise description of what the bug is. -->
After running a custom experiment - the parameters given in the config are not saved into /path/to/log/configs/_config.json which gives problems during model tracing using the function `trace_model_from_checkpoint`.
### How To Reproduce
Steps to reproduce the behavior:
1. Comment the line `from .experiment import Experiment` in `examples/cifar_simple/__init__.py` (otherwise you will get the error "AssertionError: Experiment is set both in code and config.")
2. `cd examples`
3. `catalyst-dl run --config ./cifar_simple/config_experiment.yml`
4. Then run `catalyst-dl trace ./logs/cifar_simple/` -> get error "TypeError: 'NoneType' object is not callable"
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<!--#### Code sample-->
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
#### Screenshots
<!-- If applicable, add screenshots to help explain your problem. -->
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
Tracing doesn't give an error; parameters are written to _config.
### Environment
Please copy and paste the output from our environment collection script
```bash
Catalyst version: 20.11
PyTorch version: 1.7.0
Is debug build: Yes
CUDA used to build PyTorch: 10.2
TensorFlow version: N/A
TensorBoard version: 2.4.0
OS: Linux Mint 19.3 Tricia
GCC version: (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0
CMake version: version 3.10.2
Python version: 3.8
Is CUDA available: Yes
CUDA runtime version: Could not collect
GPU models and configuration: GPU 0: GeForce RTX 2070 SUPER
Nvidia driver version: 450.80.02
cuDNN version: Could not collect
Versions of relevant libraries:
[pip3] catalyst==20.11
[pip3] catalyst-codestyle==20.6.1
[pip3] catalyst-sphinx-theme==1.1.2
[pip3] efficientnet-pytorch==0.6.3
[pip3] numpy==1.19.4
[pip3] segmentation-models-pytorch==0.1.2
[pip3] tensorboard==2.4.0
[pip3] tensorboard-plugin-wit==1.7.0
[pip3] tensorboardX==2.1
[pip3] torch==1.7.0
[pip3] torchvision==0.8.1
[conda] catalyst 20.11 pypi_0 pypi
[conda] catalyst-codestyle 20.6.1 pypi_0 pypi
[conda] catalyst-sphinx-theme 1.1.2 pypi_0 pypi
[conda] efficientnet-pytorch 0.6.3 pypi_0 pypi
[conda] numpy 1.19.4 pypi_0 pypi
[conda] segmentation-models-pytorch 0.1.2 pypi_0 pypi
[conda] tensorboard 2.4.0 pypi_0 pypi
[conda] tensorboard-plugin-wit 1.7.0 pypi_0 pypi
[conda] tensorboardx 2.1 pypi_0 pypi
[conda] torch 1.7.0 pypi_0 pypi
[conda] torchvision 0.8.1 pypi_0 pypi
```
PS
- [x] I know, that I could [join Catalyst slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `catalyst/utils/scripts.py`
Content:
```
1 from typing import Callable, Dict, Union
2 from importlib.util import module_from_spec, spec_from_file_location
3 import os
4 import pathlib
5 import shutil
6 import subprocess
7 import sys
8 import warnings
9
10 import torch
11 import torch.distributed
12
13 from catalyst.registry import EXPERIMENTS, RUNNERS
14 from catalyst.utils.distributed import (
15 get_distributed_env,
16 get_distributed_params,
17 )
18 from catalyst.utils.misc import get_utcnow_time
19
20
21 def import_module(expdir: Union[str, pathlib.Path]):
22 """
23 Imports python module by path.
24
25 Args:
26 expdir: path to python module.
27
28 Returns:
29 Imported module.
30 """
31 if not isinstance(expdir, pathlib.Path):
32 expdir = pathlib.Path(expdir)
33 sys.path.insert(0, str(expdir.absolute()))
34 sys.path.insert(0, os.path.dirname(str(expdir.absolute())))
35 s = spec_from_file_location(
36 expdir.name,
37 str(expdir.absolute() / "__init__.py"),
38 submodule_search_locations=[expdir.absolute()],
39 )
40 m = module_from_spec(s)
41 s.loader.exec_module(m)
42 sys.modules[expdir.name] = m
43 return m
44
45
46 def prepare_config_api_components(expdir: pathlib.Path, config: Dict):
47 """
48 Imports and create core Config API components - Experiment, Runner
49 and Config from ``expdir`` - experiment directory
50 and ``config`` - experiment config.
51
52 Args:
53 expdir: experiment directory path
54 config: dictionary with experiment Config
55
56 Returns:
57 Experiment, Runner, Config for Config API usage.
58 """
59 if not isinstance(expdir, pathlib.Path):
60 expdir = pathlib.Path(expdir)
61 m = import_module(expdir)
62 experiment_fn = getattr(m, "Experiment", None)
63 runner_fn = getattr(m, "Runner", None)
64
65 experiment_params = config.get("experiment_params", {})
66 experiment_from_config = experiment_params.pop("experiment", None)
67 assert any(
68 x is None for x in (experiment_fn, experiment_from_config)
69 ), "Experiment is set both in code and config."
70 if experiment_fn is None and experiment_from_config is not None:
71 experiment_fn = EXPERIMENTS.get(experiment_from_config)
72
73 runner_params = config.get("runner_params", {})
74 runner_from_config = runner_params.pop("runner", None)
75 assert any(
76 x is None for x in (runner_fn, runner_from_config)
77 ), "Runner is set both in code and config."
78 if runner_fn is None and runner_from_config is not None:
79 runner_fn = RUNNERS.get(runner_from_config)
80
81 experiment = experiment_fn(config)
82 runner = runner_fn(**runner_params)
83
84 return experiment, runner, config
85
86
87 def _tricky_dir_copy(dir_from: str, dir_to: str) -> None:
88 os.makedirs(dir_to, exist_ok=True)
89 shutil.rmtree(dir_to)
90 shutil.copytree(dir_from, dir_to)
91
92
93 def dump_code(
94 expdir: Union[str, pathlib.Path], logdir: Union[str, pathlib.Path]
95 ) -> None:
96 """
97 Dumps Catalyst code for reproducibility.
98
99 Args:
100 expdir (Union[str, pathlib.Path]): experiment dir path
101 logdir (Union[str, pathlib.Path]): logging dir path
102 """
103 expdir = expdir[:-1] if expdir.endswith("/") else expdir
104 new_src_dir = "code"
105
106 # @TODO: hardcoded
107 old_pro_dir = os.path.dirname(os.path.abspath(__file__)) + "/../"
108 new_pro_dir = os.path.join(logdir, new_src_dir, "catalyst")
109 _tricky_dir_copy(old_pro_dir, new_pro_dir)
110
111 old_expdir = os.path.abspath(expdir)
112 new_expdir = os.path.basename(old_expdir)
113 new_expdir = os.path.join(logdir, new_src_dir, new_expdir)
114 _tricky_dir_copy(old_expdir, new_expdir)
115
116
117 def dump_python_files(src: pathlib.Path, dst: pathlib.Path) -> None:
118 """
119 Dumps python code (``*.py`` and ``*.ipynb``) files.
120
121 Args:
122 src: source code path
123 dst: destination code path
124 """
125 py_files = list(src.glob("*.py"))
126 ipynb_files = list(src.glob("*.ipynb"))
127
128 py_files += ipynb_files
129 py_files = list(set(py_files))
130 for py_file in py_files:
131 shutil.copy2(f"{str(py_file.absolute())}", f"{dst}/{py_file.name}")
132
133
134 def dump_experiment_code(src: pathlib.Path, dst: pathlib.Path) -> None:
135 """
136 Dumps your experiment code for Config API use cases.
137
138 Args:
139 src: source code path
140 dst: destination code path
141 """
142 utcnow = get_utcnow_time()
143 dst = dst.joinpath("code")
144 dst = dst.joinpath(f"code-{utcnow}") if dst.exists() else dst
145 os.makedirs(dst, exist_ok=True)
146 dump_python_files(src, dst)
147
148
149 def distributed_cmd_run(
150 worker_fn: Callable, distributed: bool = True, *args, **kwargs
151 ) -> None:
152 """
153 Distributed run
154
155 Args:
156 worker_fn: worker fn to run in distributed mode
157 distributed: distributed flag
158 args: additional parameters for worker_fn
159 kwargs: additional key-value parameters for worker_fn
160 """
161 distributed_params = get_distributed_params()
162 local_rank = distributed_params["local_rank"]
163 world_size = distributed_params["world_size"]
164
165 if distributed and torch.distributed.is_initialized():
166 warnings.warn(
167 "Looks like you are trying to call distributed setup twice, "
168 "switching to normal run for correct distributed training."
169 )
170
171 if (
172 not distributed
173 or torch.distributed.is_initialized()
174 or world_size <= 1
175 ):
176 worker_fn(*args, **kwargs)
177 elif local_rank is not None:
178 torch.cuda.set_device(int(local_rank))
179
180 torch.distributed.init_process_group(
181 backend="nccl", init_method="env://"
182 )
183 worker_fn(*args, **kwargs)
184 else:
185 workers = []
186 try:
187 for local_rank in range(torch.cuda.device_count()):
188 rank = distributed_params["start_rank"] + local_rank
189 env = get_distributed_env(local_rank, rank, world_size)
190 cmd = [sys.executable] + sys.argv.copy()
191 workers.append(subprocess.Popen(cmd, env=env))
192 for worker in workers:
193 worker.wait()
194 finally:
195 for worker in workers:
196 worker.kill()
197
198
199 __all__ = [
200 "import_module",
201 "dump_code",
202 "dump_python_files",
203 "prepare_config_api_components",
204 "dump_experiment_code",
205 "distributed_cmd_run",
206 ]
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/catalyst/utils/scripts.py b/catalyst/utils/scripts.py
--- a/catalyst/utils/scripts.py
+++ b/catalyst/utils/scripts.py
@@ -1,4 +1,5 @@
from typing import Callable, Dict, Union
+import copy
from importlib.util import module_from_spec, spec_from_file_location
import os
import pathlib
@@ -56,13 +57,14 @@
Returns:
Experiment, Runner, Config for Config API usage.
"""
+ copy_config = copy.deepcopy(config)
if not isinstance(expdir, pathlib.Path):
expdir = pathlib.Path(expdir)
m = import_module(expdir)
experiment_fn = getattr(m, "Experiment", None)
runner_fn = getattr(m, "Runner", None)
- experiment_params = config.get("experiment_params", {})
+ experiment_params = copy_config.get("experiment_params", {})
experiment_from_config = experiment_params.pop("experiment", None)
assert any(
x is None for x in (experiment_fn, experiment_from_config)
@@ -70,7 +72,7 @@
if experiment_fn is None and experiment_from_config is not None:
experiment_fn = EXPERIMENTS.get(experiment_from_config)
- runner_params = config.get("runner_params", {})
+ runner_params = copy_config.get("runner_params", {})
runner_from_config = runner_params.pop("runner", None)
assert any(
x is None for x in (runner_fn, runner_from_config)
@@ -78,7 +80,7 @@
if runner_fn is None and runner_from_config is not None:
runner_fn = RUNNERS.get(runner_from_config)
- experiment = experiment_fn(config)
+ experiment = experiment_fn(copy_config)
runner = runner_fn(**runner_params)
return experiment, runner, config
| {"golden_diff": "diff --git a/catalyst/utils/scripts.py b/catalyst/utils/scripts.py\n--- a/catalyst/utils/scripts.py\n+++ b/catalyst/utils/scripts.py\n@@ -1,4 +1,5 @@\n from typing import Callable, Dict, Union\n+import copy\n from importlib.util import module_from_spec, spec_from_file_location\n import os\n import pathlib\n@@ -56,13 +57,14 @@\n Returns:\n Experiment, Runner, Config for Config API usage.\n \"\"\"\n+ copy_config = copy.deepcopy(config)\n if not isinstance(expdir, pathlib.Path):\n expdir = pathlib.Path(expdir)\n m = import_module(expdir)\n experiment_fn = getattr(m, \"Experiment\", None)\n runner_fn = getattr(m, \"Runner\", None)\n \n- experiment_params = config.get(\"experiment_params\", {})\n+ experiment_params = copy_config.get(\"experiment_params\", {})\n experiment_from_config = experiment_params.pop(\"experiment\", None)\n assert any(\n x is None for x in (experiment_fn, experiment_from_config)\n@@ -70,7 +72,7 @@\n if experiment_fn is None and experiment_from_config is not None:\n experiment_fn = EXPERIMENTS.get(experiment_from_config)\n \n- runner_params = config.get(\"runner_params\", {})\n+ runner_params = copy_config.get(\"runner_params\", {})\n runner_from_config = runner_params.pop(\"runner\", None)\n assert any(\n x is None for x in (runner_fn, runner_from_config)\n@@ -78,7 +80,7 @@\n if runner_fn is None and runner_from_config is not None:\n runner_fn = RUNNERS.get(runner_from_config)\n \n- experiment = experiment_fn(config)\n+ experiment = experiment_fn(copy_config)\n runner = runner_fn(**runner_params)\n \n return experiment, runner, config\n", "issue": "Lost parameters in logs_path/configs/_config.json\n## \ud83d\udc1b Bug Report\r\n<!-- A clear and concise description of what the bug is. -->\r\nAfter running a custom experiment - the parameters given in the config are not saved into /path/to/log/configs/_config.json which gives problems during model tracing using the function `trace_model_from_checkpoint`.\r\n\r\n### How To Reproduce\r\nSteps to reproduce the behavior:\r\n\r\n1. Comment the line `from .experiment import Experiment` in `examples/cifar_simple/__init__.py` (otherwise you will get the error \"AssertionError: Experiment is set both in code and config.\")\r\n2. `cd examples`\r\n3. `catalyst-dl run --config ./cifar_simple/config_experiment.yml`\r\n4. Then run `catalyst-dl trace ./logs/cifar_simple/` -> get error \"TypeError: 'NoneType' object is not callable\"\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n\r\n<!--#### Code sample--> \r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue. \r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n\r\n#### Screenshots\r\n<!-- If applicable, add screenshots to help explain your problem. -->\r\n\r\n\r\n### Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nTracing doesn't give an error; parameters are written to _config.\r\n\r\n### Environment\r\nPlease copy and paste the output from our environment collection script\r\n```bash\r\nCatalyst version: 20.11\r\nPyTorch version: 1.7.0\r\nIs debug build: Yes\r\nCUDA used to build PyTorch: 10.2\r\nTensorFlow version: N/A\r\nTensorBoard version: 2.4.0\r\n\r\nOS: Linux Mint 19.3 Tricia\r\nGCC version: (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0\r\nCMake version: version 3.10.2\r\n\r\nPython version: 3.8\r\nIs CUDA available: Yes\r\nCUDA runtime version: Could not collect\r\nGPU models and configuration: GPU 0: GeForce RTX 2070 SUPER\r\nNvidia driver version: 450.80.02\r\ncuDNN version: Could not collect\r\n\r\nVersions of relevant libraries:\r\n[pip3] catalyst==20.11\r\n[pip3] catalyst-codestyle==20.6.1\r\n[pip3] catalyst-sphinx-theme==1.1.2\r\n[pip3] efficientnet-pytorch==0.6.3\r\n[pip3] numpy==1.19.4\r\n[pip3] segmentation-models-pytorch==0.1.2\r\n[pip3] tensorboard==2.4.0\r\n[pip3] tensorboard-plugin-wit==1.7.0\r\n[pip3] tensorboardX==2.1\r\n[pip3] torch==1.7.0\r\n[pip3] torchvision==0.8.1\r\n[conda] catalyst 20.11 pypi_0 pypi\r\n[conda] catalyst-codestyle 20.6.1 pypi_0 pypi\r\n[conda] catalyst-sphinx-theme 1.1.2 pypi_0 pypi\r\n[conda] efficientnet-pytorch 0.6.3 pypi_0 pypi\r\n[conda] numpy 1.19.4 pypi_0 pypi\r\n[conda] segmentation-models-pytorch 0.1.2 pypi_0 pypi\r\n[conda] tensorboard 2.4.0 pypi_0 pypi\r\n[conda] tensorboard-plugin-wit 1.7.0 pypi_0 pypi\r\n[conda] tensorboardx 2.1 pypi_0 pypi\r\n[conda] torch 1.7.0 pypi_0 pypi\r\n[conda] torchvision 0.8.1 pypi_0 pypi\r\n```\r\n\r\nPS\r\n- [x] I know, that I could [join Catalyst slack (#__questions channel)](https://join.slack.com/t/catalyst-team-core/shared_invite/zt-d9miirnn-z86oKDzFMKlMG4fgFdZafw) for issue discussion.\r\n\n", "before_files": [{"content": "from typing import Callable, Dict, Union\nfrom importlib.util import module_from_spec, spec_from_file_location\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nimport sys\nimport warnings\n\nimport torch\nimport torch.distributed\n\nfrom catalyst.registry import EXPERIMENTS, RUNNERS\nfrom catalyst.utils.distributed import (\n get_distributed_env,\n get_distributed_params,\n)\nfrom catalyst.utils.misc import get_utcnow_time\n\n\ndef import_module(expdir: Union[str, pathlib.Path]):\n \"\"\"\n Imports python module by path.\n\n Args:\n expdir: path to python module.\n\n Returns:\n Imported module.\n \"\"\"\n if not isinstance(expdir, pathlib.Path):\n expdir = pathlib.Path(expdir)\n sys.path.insert(0, str(expdir.absolute()))\n sys.path.insert(0, os.path.dirname(str(expdir.absolute())))\n s = spec_from_file_location(\n expdir.name,\n str(expdir.absolute() / \"__init__.py\"),\n submodule_search_locations=[expdir.absolute()],\n )\n m = module_from_spec(s)\n s.loader.exec_module(m)\n sys.modules[expdir.name] = m\n return m\n\n\ndef prepare_config_api_components(expdir: pathlib.Path, config: Dict):\n \"\"\"\n Imports and create core Config API components - Experiment, Runner\n and Config from ``expdir`` - experiment directory\n and ``config`` - experiment config.\n\n Args:\n expdir: experiment directory path\n config: dictionary with experiment Config\n\n Returns:\n Experiment, Runner, Config for Config API usage.\n \"\"\"\n if not isinstance(expdir, pathlib.Path):\n expdir = pathlib.Path(expdir)\n m = import_module(expdir)\n experiment_fn = getattr(m, \"Experiment\", None)\n runner_fn = getattr(m, \"Runner\", None)\n\n experiment_params = config.get(\"experiment_params\", {})\n experiment_from_config = experiment_params.pop(\"experiment\", None)\n assert any(\n x is None for x in (experiment_fn, experiment_from_config)\n ), \"Experiment is set both in code and config.\"\n if experiment_fn is None and experiment_from_config is not None:\n experiment_fn = EXPERIMENTS.get(experiment_from_config)\n\n runner_params = config.get(\"runner_params\", {})\n runner_from_config = runner_params.pop(\"runner\", None)\n assert any(\n x is None for x in (runner_fn, runner_from_config)\n ), \"Runner is set both in code and config.\"\n if runner_fn is None and runner_from_config is not None:\n runner_fn = RUNNERS.get(runner_from_config)\n\n experiment = experiment_fn(config)\n runner = runner_fn(**runner_params)\n\n return experiment, runner, config\n\n\ndef _tricky_dir_copy(dir_from: str, dir_to: str) -> None:\n os.makedirs(dir_to, exist_ok=True)\n shutil.rmtree(dir_to)\n shutil.copytree(dir_from, dir_to)\n\n\ndef dump_code(\n expdir: Union[str, pathlib.Path], logdir: Union[str, pathlib.Path]\n) -> None:\n \"\"\"\n Dumps Catalyst code for reproducibility.\n\n Args:\n expdir (Union[str, pathlib.Path]): experiment dir path\n logdir (Union[str, pathlib.Path]): logging dir path\n \"\"\"\n expdir = expdir[:-1] if expdir.endswith(\"/\") else expdir\n new_src_dir = \"code\"\n\n # @TODO: hardcoded\n old_pro_dir = os.path.dirname(os.path.abspath(__file__)) + \"/../\"\n new_pro_dir = os.path.join(logdir, new_src_dir, \"catalyst\")\n _tricky_dir_copy(old_pro_dir, new_pro_dir)\n\n old_expdir = os.path.abspath(expdir)\n new_expdir = os.path.basename(old_expdir)\n new_expdir = os.path.join(logdir, new_src_dir, new_expdir)\n _tricky_dir_copy(old_expdir, new_expdir)\n\n\ndef dump_python_files(src: pathlib.Path, dst: pathlib.Path) -> None:\n \"\"\"\n Dumps python code (``*.py`` and ``*.ipynb``) files.\n\n Args:\n src: source code path\n dst: destination code path\n \"\"\"\n py_files = list(src.glob(\"*.py\"))\n ipynb_files = list(src.glob(\"*.ipynb\"))\n\n py_files += ipynb_files\n py_files = list(set(py_files))\n for py_file in py_files:\n shutil.copy2(f\"{str(py_file.absolute())}\", f\"{dst}/{py_file.name}\")\n\n\ndef dump_experiment_code(src: pathlib.Path, dst: pathlib.Path) -> None:\n \"\"\"\n Dumps your experiment code for Config API use cases.\n\n Args:\n src: source code path\n dst: destination code path\n \"\"\"\n utcnow = get_utcnow_time()\n dst = dst.joinpath(\"code\")\n dst = dst.joinpath(f\"code-{utcnow}\") if dst.exists() else dst\n os.makedirs(dst, exist_ok=True)\n dump_python_files(src, dst)\n\n\ndef distributed_cmd_run(\n worker_fn: Callable, distributed: bool = True, *args, **kwargs\n) -> None:\n \"\"\"\n Distributed run\n\n Args:\n worker_fn: worker fn to run in distributed mode\n distributed: distributed flag\n args: additional parameters for worker_fn\n kwargs: additional key-value parameters for worker_fn\n \"\"\"\n distributed_params = get_distributed_params()\n local_rank = distributed_params[\"local_rank\"]\n world_size = distributed_params[\"world_size\"]\n\n if distributed and torch.distributed.is_initialized():\n warnings.warn(\n \"Looks like you are trying to call distributed setup twice, \"\n \"switching to normal run for correct distributed training.\"\n )\n\n if (\n not distributed\n or torch.distributed.is_initialized()\n or world_size <= 1\n ):\n worker_fn(*args, **kwargs)\n elif local_rank is not None:\n torch.cuda.set_device(int(local_rank))\n\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n worker_fn(*args, **kwargs)\n else:\n workers = []\n try:\n for local_rank in range(torch.cuda.device_count()):\n rank = distributed_params[\"start_rank\"] + local_rank\n env = get_distributed_env(local_rank, rank, world_size)\n cmd = [sys.executable] + sys.argv.copy()\n workers.append(subprocess.Popen(cmd, env=env))\n for worker in workers:\n worker.wait()\n finally:\n for worker in workers:\n worker.kill()\n\n\n__all__ = [\n \"import_module\",\n \"dump_code\",\n \"dump_python_files\",\n \"prepare_config_api_components\",\n \"dump_experiment_code\",\n \"distributed_cmd_run\",\n]\n", "path": "catalyst/utils/scripts.py"}], "after_files": [{"content": "from typing import Callable, Dict, Union\nimport copy\nfrom importlib.util import module_from_spec, spec_from_file_location\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nimport sys\nimport warnings\n\nimport torch\nimport torch.distributed\n\nfrom catalyst.registry import EXPERIMENTS, RUNNERS\nfrom catalyst.utils.distributed import (\n get_distributed_env,\n get_distributed_params,\n)\nfrom catalyst.utils.misc import get_utcnow_time\n\n\ndef import_module(expdir: Union[str, pathlib.Path]):\n \"\"\"\n Imports python module by path.\n\n Args:\n expdir: path to python module.\n\n Returns:\n Imported module.\n \"\"\"\n if not isinstance(expdir, pathlib.Path):\n expdir = pathlib.Path(expdir)\n sys.path.insert(0, str(expdir.absolute()))\n sys.path.insert(0, os.path.dirname(str(expdir.absolute())))\n s = spec_from_file_location(\n expdir.name,\n str(expdir.absolute() / \"__init__.py\"),\n submodule_search_locations=[expdir.absolute()],\n )\n m = module_from_spec(s)\n s.loader.exec_module(m)\n sys.modules[expdir.name] = m\n return m\n\n\ndef prepare_config_api_components(expdir: pathlib.Path, config: Dict):\n \"\"\"\n Imports and create core Config API components - Experiment, Runner\n and Config from ``expdir`` - experiment directory\n and ``config`` - experiment config.\n\n Args:\n expdir: experiment directory path\n config: dictionary with experiment Config\n\n Returns:\n Experiment, Runner, Config for Config API usage.\n \"\"\"\n copy_config = copy.deepcopy(config)\n if not isinstance(expdir, pathlib.Path):\n expdir = pathlib.Path(expdir)\n m = import_module(expdir)\n experiment_fn = getattr(m, \"Experiment\", None)\n runner_fn = getattr(m, \"Runner\", None)\n\n experiment_params = copy_config.get(\"experiment_params\", {})\n experiment_from_config = experiment_params.pop(\"experiment\", None)\n assert any(\n x is None for x in (experiment_fn, experiment_from_config)\n ), \"Experiment is set both in code and config.\"\n if experiment_fn is None and experiment_from_config is not None:\n experiment_fn = EXPERIMENTS.get(experiment_from_config)\n\n runner_params = copy_config.get(\"runner_params\", {})\n runner_from_config = runner_params.pop(\"runner\", None)\n assert any(\n x is None for x in (runner_fn, runner_from_config)\n ), \"Runner is set both in code and config.\"\n if runner_fn is None and runner_from_config is not None:\n runner_fn = RUNNERS.get(runner_from_config)\n\n experiment = experiment_fn(copy_config)\n runner = runner_fn(**runner_params)\n\n return experiment, runner, config\n\n\ndef _tricky_dir_copy(dir_from: str, dir_to: str) -> None:\n os.makedirs(dir_to, exist_ok=True)\n shutil.rmtree(dir_to)\n shutil.copytree(dir_from, dir_to)\n\n\ndef dump_code(\n expdir: Union[str, pathlib.Path], logdir: Union[str, pathlib.Path]\n) -> None:\n \"\"\"\n Dumps Catalyst code for reproducibility.\n\n Args:\n expdir (Union[str, pathlib.Path]): experiment dir path\n logdir (Union[str, pathlib.Path]): logging dir path\n \"\"\"\n expdir = expdir[:-1] if expdir.endswith(\"/\") else expdir\n new_src_dir = \"code\"\n\n # @TODO: hardcoded\n old_pro_dir = os.path.dirname(os.path.abspath(__file__)) + \"/../\"\n new_pro_dir = os.path.join(logdir, new_src_dir, \"catalyst\")\n _tricky_dir_copy(old_pro_dir, new_pro_dir)\n\n old_expdir = os.path.abspath(expdir)\n new_expdir = os.path.basename(old_expdir)\n new_expdir = os.path.join(logdir, new_src_dir, new_expdir)\n _tricky_dir_copy(old_expdir, new_expdir)\n\n\ndef dump_python_files(src: pathlib.Path, dst: pathlib.Path) -> None:\n \"\"\"\n Dumps python code (``*.py`` and ``*.ipynb``) files.\n\n Args:\n src: source code path\n dst: destination code path\n \"\"\"\n py_files = list(src.glob(\"*.py\"))\n ipynb_files = list(src.glob(\"*.ipynb\"))\n\n py_files += ipynb_files\n py_files = list(set(py_files))\n for py_file in py_files:\n shutil.copy2(f\"{str(py_file.absolute())}\", f\"{dst}/{py_file.name}\")\n\n\ndef dump_experiment_code(src: pathlib.Path, dst: pathlib.Path) -> None:\n \"\"\"\n Dumps your experiment code for Config API use cases.\n\n Args:\n src: source code path\n dst: destination code path\n \"\"\"\n utcnow = get_utcnow_time()\n dst = dst.joinpath(\"code\")\n dst = dst.joinpath(f\"code-{utcnow}\") if dst.exists() else dst\n os.makedirs(dst, exist_ok=True)\n dump_python_files(src, dst)\n\n\ndef distributed_cmd_run(\n worker_fn: Callable, distributed: bool = True, *args, **kwargs\n) -> None:\n \"\"\"\n Distributed run\n\n Args:\n worker_fn: worker fn to run in distributed mode\n distributed: distributed flag\n args: additional parameters for worker_fn\n kwargs: additional key-value parameters for worker_fn\n \"\"\"\n distributed_params = get_distributed_params()\n local_rank = distributed_params[\"local_rank\"]\n world_size = distributed_params[\"world_size\"]\n\n if distributed and torch.distributed.is_initialized():\n warnings.warn(\n \"Looks like you are trying to call distributed setup twice, \"\n \"switching to normal run for correct distributed training.\"\n )\n\n if (\n not distributed\n or torch.distributed.is_initialized()\n or world_size <= 1\n ):\n worker_fn(*args, **kwargs)\n elif local_rank is not None:\n torch.cuda.set_device(int(local_rank))\n\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n worker_fn(*args, **kwargs)\n else:\n workers = []\n try:\n for local_rank in range(torch.cuda.device_count()):\n rank = distributed_params[\"start_rank\"] + local_rank\n env = get_distributed_env(local_rank, rank, world_size)\n cmd = [sys.executable] + sys.argv.copy()\n workers.append(subprocess.Popen(cmd, env=env))\n for worker in workers:\n worker.wait()\n finally:\n for worker in workers:\n worker.kill()\n\n\n__all__ = [\n \"import_module\",\n \"dump_code\",\n \"dump_python_files\",\n \"prepare_config_api_components\",\n \"dump_experiment_code\",\n \"distributed_cmd_run\",\n]\n", "path": "catalyst/utils/scripts.py"}]} | 3,202 | 399 |
gh_patches_debug_8227 | rasdani/github-patches | git_diff | spacetelescope__jwql-84 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make dev conda environment more general
We should make our `dev` `conda` environment more generalized so that it can be used on the new test server.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.4.0'
6
7 AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
8 AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
9
10 REQUIRES = ['astropy', 'astroquery', 'bokeh', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
11
12 setup(
13 name = 'jwql',
14 version = VERSION,
15 description = 'The JWST Quicklook Project',
16 url = 'https://github.com/spacetelescope/jwql.git',
17 author = AUTHORS,
18 author_email='[email protected]',
19 license='BSD',
20 keywords = ['astronomy', 'python'],
21 classifiers = ['Programming Language :: Python'],
22 packages = find_packages(),
23 install_requires = REQUIRES,
24 include_package_data=True,
25 include_dirs = [np.get_include()],
26 )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'
AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
-REQUIRES = ['astropy', 'astroquery', 'bokeh', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']
setup(
name = 'jwql',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\n AUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n \n-REQUIRES = ['astropy', 'astroquery', 'bokeh', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n+REQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n \n setup(\n name = 'jwql',\n", "issue": "Make dev conda environment more general\nWe should make our `dev` `conda` environment more generalized so that it can be used on the new test server. \n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.4.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\nAUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astropy', 'astroquery', 'bokeh', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name = 'jwql',\n version = VERSION,\n description = 'The JWST Quicklook Project',\n url = 'https://github.com/spacetelescope/jwql.git',\n author = AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords = ['astronomy', 'python'],\n classifiers = ['Programming Language :: Python'],\n packages = find_packages(),\n install_requires = REQUIRES,\n include_package_data=True,\n include_dirs = [np.get_include()],\n )\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.4.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\nAUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name = 'jwql',\n version = VERSION,\n description = 'The JWST Quicklook Project',\n url = 'https://github.com/spacetelescope/jwql.git',\n author = AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords = ['astronomy', 'python'],\n classifiers = ['Programming Language :: Python'],\n packages = find_packages(),\n install_requires = REQUIRES,\n include_package_data=True,\n include_dirs = [np.get_include()],\n )\n", "path": "setup.py"}]} | 571 | 189 |
gh_patches_debug_38004 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-594 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replay does not get correct user config
I think we need to persist the currently chosen config file so that subsequent calls of `config.get_user_config` yield the same values for the session.
Currently **cookiecutter/replay.py** always points to `os.path.expanduser('~/.cookiecutter_replay')`:
``` python
replay_dir = get_user_config()['replay_dir']
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/replay.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 cookiecutter.replay
5 -------------------
6 """
7
8 from __future__ import unicode_literals
9
10 import json
11 import os
12 from past.builtins import basestring
13
14 from .config import get_user_config
15 from .utils import make_sure_path_exists
16
17
18 def get_file_name(replay_dir, template_name):
19 file_name = '{}.json'.format(template_name)
20 return os.path.join(replay_dir, file_name)
21
22
23 def dump(template_name, context):
24 if not isinstance(template_name, basestring):
25 raise TypeError('Template name is required to be of type str')
26
27 if not isinstance(context, dict):
28 raise TypeError('Context is required to be of type dict')
29
30 if 'cookiecutter' not in context:
31 raise ValueError('Context is required to contain a cookiecutter key')
32
33 replay_dir = get_user_config()['replay_dir']
34
35 if not make_sure_path_exists(replay_dir):
36 raise IOError('Unable to create replay dir at {}'.format(replay_dir))
37
38 replay_file = get_file_name(replay_dir, template_name)
39
40 with open(replay_file, 'w') as outfile:
41 json.dump(context, outfile)
42
43
44 def load(template_name):
45 if not isinstance(template_name, basestring):
46 raise TypeError('Template name is required to be of type str')
47
48 replay_dir = get_user_config()['replay_dir']
49 replay_file = get_file_name(replay_dir, template_name)
50
51 with open(replay_file, 'r') as infile:
52 context = json.load(infile)
53
54 if 'cookiecutter' not in context:
55 raise ValueError('Context is required to contain a cookiecutter key')
56
57 return context
58
```
Path: `cookiecutter/main.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.main
6 -----------------
7
8 Main entry point for the `cookiecutter` command.
9
10 The code in this module is also a good example of how to use Cookiecutter as a
11 library rather than a script.
12 """
13
14 from __future__ import unicode_literals
15 import logging
16 import os
17 import re
18
19 from .config import get_user_config, USER_CONFIG_PATH
20 from .exceptions import InvalidModeException
21 from .prompt import prompt_for_config
22 from .generate import generate_context, generate_files
23 from .vcs import clone
24 from .replay import dump, load
25
26 logger = logging.getLogger(__name__)
27
28 builtin_abbreviations = {
29 'gh': 'https://github.com/{0}.git',
30 'bb': 'https://bitbucket.org/{0}',
31 }
32
33 REPO_REGEX = """
34 (
35 ((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.
36 | # or
37 (\w+@[\w\.]+) # something like user@...
38 )
39 .*
40 """
41
42
43 def is_repo_url(value):
44 """Return True if value is a repository URL."""
45 return bool(re.match(REPO_REGEX, value, re.VERBOSE))
46
47
48 def expand_abbreviations(template, config_dict):
49 """
50 Expand abbreviations in a template name.
51
52 :param template: The project template name.
53 :param config_dict: The user config, which will contain abbreviation
54 definitions.
55 """
56
57 abbreviations = builtin_abbreviations.copy()
58 abbreviations.update(config_dict.get('abbreviations', {}))
59
60 if template in abbreviations:
61 return abbreviations[template]
62
63 # Split on colon. If there is no colon, rest will be empty
64 # and prefix will be the whole template
65 prefix, sep, rest = template.partition(':')
66 if prefix in abbreviations:
67 return abbreviations[prefix].format(rest)
68
69 return template
70
71
72 def cookiecutter(
73 template, checkout=None, no_input=False, extra_context=None,
74 replay=False, overwrite_if_exists=False, output_dir='.',
75 config_file=USER_CONFIG_PATH):
76 """
77 API equivalent to using Cookiecutter at the command line.
78
79 :param template: A directory containing a project template directory,
80 or a URL to a git repository.
81 :param checkout: The branch, tag or commit ID to checkout after clone.
82 :param no_input: Prompt the user at command line for manual configuration?
83 :param extra_context: A dictionary of context that overrides default
84 and user configuration.
85 :param: overwrite_if_exists: Overwrite the contents of output directory
86 if it exists
87 :param output_dir: Where to output the generated project dir into.
88 :param config_file: User configuration file path.
89 """
90 if replay and ((no_input is not False) or (extra_context is not None)):
91 err_msg = (
92 "You can not use both replay and no_input or extra_context "
93 "at the same time."
94 )
95 raise InvalidModeException(err_msg)
96
97 # Get user config from ~/.cookiecutterrc or equivalent
98 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used
99 config_dict = get_user_config(config_file=config_file)
100
101 template = expand_abbreviations(template, config_dict)
102
103 if is_repo_url(template):
104 repo_dir = clone(
105 repo_url=template,
106 checkout=checkout,
107 clone_to_dir=config_dict['cookiecutters_dir'],
108 no_input=no_input
109 )
110 else:
111 # If it's a local repo, no need to clone or copy to your
112 # cookiecutters_dir
113 repo_dir = template
114
115 template_name = os.path.basename(template)
116
117 if replay:
118 context = load(template_name)
119 else:
120 context_file = os.path.join(repo_dir, 'cookiecutter.json')
121 logging.debug('context_file is {0}'.format(context_file))
122
123 context = generate_context(
124 context_file=context_file,
125 default_context=config_dict['default_context'],
126 extra_context=extra_context,
127 )
128
129 # prompt the user to manually configure at the command line.
130 # except when 'no-input' flag is set
131 context['cookiecutter'] = prompt_for_config(context, no_input)
132
133 dump(template_name, context)
134
135 # Create project from local context and project template.
136 return generate_files(
137 repo_dir=repo_dir,
138 context=context,
139 overwrite_if_exists=overwrite_if_exists,
140 output_dir=output_dir
141 )
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -115,7 +115,7 @@
template_name = os.path.basename(template)
if replay:
- context = load(template_name)
+ context = load(config_dict['replay_dir'], template_name)
else:
context_file = os.path.join(repo_dir, 'cookiecutter.json')
logging.debug('context_file is {0}'.format(context_file))
@@ -130,7 +130,7 @@
# except when 'no-input' flag is set
context['cookiecutter'] = prompt_for_config(context, no_input)
- dump(template_name, context)
+ dump(config_dict['replay_dir'], template_name, context)
# Create project from local context and project template.
return generate_files(
diff --git a/cookiecutter/replay.py b/cookiecutter/replay.py
--- a/cookiecutter/replay.py
+++ b/cookiecutter/replay.py
@@ -11,7 +11,6 @@
import os
from past.builtins import basestring
-from .config import get_user_config
from .utils import make_sure_path_exists
@@ -20,7 +19,10 @@
return os.path.join(replay_dir, file_name)
-def dump(template_name, context):
+def dump(replay_dir, template_name, context):
+ if not make_sure_path_exists(replay_dir):
+ raise IOError('Unable to create replay dir at {}'.format(replay_dir))
+
if not isinstance(template_name, basestring):
raise TypeError('Template name is required to be of type str')
@@ -30,22 +32,16 @@
if 'cookiecutter' not in context:
raise ValueError('Context is required to contain a cookiecutter key')
- replay_dir = get_user_config()['replay_dir']
-
- if not make_sure_path_exists(replay_dir):
- raise IOError('Unable to create replay dir at {}'.format(replay_dir))
-
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'w') as outfile:
json.dump(context, outfile)
-def load(template_name):
+def load(replay_dir, template_name):
if not isinstance(template_name, basestring):
raise TypeError('Template name is required to be of type str')
- replay_dir = get_user_config()['replay_dir']
replay_file = get_file_name(replay_dir, template_name)
with open(replay_file, 'r') as infile:
| {"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -115,7 +115,7 @@\n template_name = os.path.basename(template)\n \n if replay:\n- context = load(template_name)\n+ context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n@@ -130,7 +130,7 @@\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n \n- dump(template_name, context)\n+ dump(config_dict['replay_dir'], template_name, context)\n \n # Create project from local context and project template.\n return generate_files(\ndiff --git a/cookiecutter/replay.py b/cookiecutter/replay.py\n--- a/cookiecutter/replay.py\n+++ b/cookiecutter/replay.py\n@@ -11,7 +11,6 @@\n import os\n from past.builtins import basestring\n \n-from .config import get_user_config\n from .utils import make_sure_path_exists\n \n \n@@ -20,7 +19,10 @@\n return os.path.join(replay_dir, file_name)\n \n \n-def dump(template_name, context):\n+def dump(replay_dir, template_name, context):\n+ if not make_sure_path_exists(replay_dir):\n+ raise IOError('Unable to create replay dir at {}'.format(replay_dir))\n+\n if not isinstance(template_name, basestring):\n raise TypeError('Template name is required to be of type str')\n \n@@ -30,22 +32,16 @@\n if 'cookiecutter' not in context:\n raise ValueError('Context is required to contain a cookiecutter key')\n \n- replay_dir = get_user_config()['replay_dir']\n-\n- if not make_sure_path_exists(replay_dir):\n- raise IOError('Unable to create replay dir at {}'.format(replay_dir))\n-\n replay_file = get_file_name(replay_dir, template_name)\n \n with open(replay_file, 'w') as outfile:\n json.dump(context, outfile)\n \n \n-def load(template_name):\n+def load(replay_dir, template_name):\n if not isinstance(template_name, basestring):\n raise TypeError('Template name is required to be of type str')\n \n- replay_dir = get_user_config()['replay_dir']\n replay_file = get_file_name(replay_dir, template_name)\n \n with open(replay_file, 'r') as infile:\n", "issue": "Replay does not get correct user config\nI think we need to persist the currently chosen config file so that subsequent calls of `config.get_user_config` yield the same values for the session.\n\nCurrently **cookiecutter/replay.py** always points to `os.path.expanduser('~/.cookiecutter_replay')`:\n\n``` python\nreplay_dir = get_user_config()['replay_dir']\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.replay\n-------------------\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport os\nfrom past.builtins import basestring\n\nfrom .config import get_user_config\nfrom .utils import make_sure_path_exists\n\n\ndef get_file_name(replay_dir, template_name):\n file_name = '{}.json'.format(template_name)\n return os.path.join(replay_dir, file_name)\n\n\ndef dump(template_name, context):\n if not isinstance(template_name, basestring):\n raise TypeError('Template name is required to be of type str')\n\n if not isinstance(context, dict):\n raise TypeError('Context is required to be of type dict')\n\n if 'cookiecutter' not in context:\n raise ValueError('Context is required to contain a cookiecutter key')\n\n replay_dir = get_user_config()['replay_dir']\n\n if not make_sure_path_exists(replay_dir):\n raise IOError('Unable to create replay dir at {}'.format(replay_dir))\n\n replay_file = get_file_name(replay_dir, template_name)\n\n with open(replay_file, 'w') as outfile:\n json.dump(context, outfile)\n\n\ndef load(template_name):\n if not isinstance(template_name, basestring):\n raise TypeError('Template name is required to be of type str')\n\n replay_dir = get_user_config()['replay_dir']\n replay_file = get_file_name(replay_dir, template_name)\n\n with open(replay_file, 'r') as infile:\n context = json.load(infile)\n\n if 'cookiecutter' not in context:\n raise ValueError('Context is required to contain a cookiecutter key')\n\n return context\n", "path": "cookiecutter/replay.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = \"\"\"\n(\n((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n.*\n\"\"\"\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(re.match(REPO_REGEX, value, re.VERBOSE))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n", "path": "cookiecutter/main.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.replay\n-------------------\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport os\nfrom past.builtins import basestring\n\nfrom .utils import make_sure_path_exists\n\n\ndef get_file_name(replay_dir, template_name):\n file_name = '{}.json'.format(template_name)\n return os.path.join(replay_dir, file_name)\n\n\ndef dump(replay_dir, template_name, context):\n if not make_sure_path_exists(replay_dir):\n raise IOError('Unable to create replay dir at {}'.format(replay_dir))\n\n if not isinstance(template_name, basestring):\n raise TypeError('Template name is required to be of type str')\n\n if not isinstance(context, dict):\n raise TypeError('Context is required to be of type dict')\n\n if 'cookiecutter' not in context:\n raise ValueError('Context is required to contain a cookiecutter key')\n\n replay_file = get_file_name(replay_dir, template_name)\n\n with open(replay_file, 'w') as outfile:\n json.dump(context, outfile)\n\n\ndef load(replay_dir, template_name):\n if not isinstance(template_name, basestring):\n raise TypeError('Template name is required to be of type str')\n\n replay_file = get_file_name(replay_dir, template_name)\n\n with open(replay_file, 'r') as infile:\n context = json.load(infile)\n\n if 'cookiecutter' not in context:\n raise ValueError('Context is required to contain a cookiecutter key')\n\n return context\n", "path": "cookiecutter/replay.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = \"\"\"\n(\n((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n.*\n\"\"\"\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(re.match(REPO_REGEX, value, re.VERBOSE))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n", "path": "cookiecutter/main.py"}]} | 2,133 | 590 |
gh_patches_debug_28901 | rasdani/github-patches | git_diff | Pylons__pyramid-710 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
view_execution_permitted() when view does not exist?
`Pyramid.security.view_execution_permitted(context, request, name)` currently returns a true value if there is no appropriate view registered for the context. I'm not sure whether that is the intended behavior or not (so I'm not sure whether this is a bug report or a feature request.) I would argue that _"view exists and is permitted"_ is a more useful predicate than _"view execution permitted or view does not exist"_.
In my use case — which I'd guess is not uncommon — I'd like to determine whether the current user can execute a given view on a given context without knowing a priori whether that view is even registered for that context. (The result of the check determines whether an entry for that view will appear in an admin nav menu.)
Currently, AFAICT, the only way to determine whether a particular view exists using Pyramid's public API is using `pyramid.traversal.traverse`, which seems awkward and overkill. Have I missed a better way?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/security.py`
Content:
```
1 from zope.interface import providedBy
2
3 from pyramid.interfaces import (
4 IAuthenticationPolicy,
5 IAuthorizationPolicy,
6 ISecuredView,
7 IViewClassifier,
8 )
9
10 from pyramid.compat import map_
11 from pyramid.threadlocal import get_current_registry
12
13 Everyone = 'system.Everyone'
14 Authenticated = 'system.Authenticated'
15 Allow = 'Allow'
16 Deny = 'Deny'
17
18 class AllPermissionsList(object):
19 """ Stand in 'permission list' to represent all permissions """
20 def __iter__(self):
21 return ()
22 def __contains__(self, other):
23 return True
24 def __eq__(self, other):
25 return isinstance(other, self.__class__)
26
27 ALL_PERMISSIONS = AllPermissionsList()
28 DENY_ALL = (Deny, Everyone, ALL_PERMISSIONS)
29
30 NO_PERMISSION_REQUIRED = '__no_permission_required__'
31
32 def has_permission(permission, context, request):
33 """ Provided a permission (a string or unicode object), a context
34 (a :term:`resource` instance) and a request object, return an
35 instance of :data:`pyramid.security.Allowed` if the permission
36 is granted in this context to the user implied by the
37 request. Return an instance of :mod:`pyramid.security.Denied`
38 if this permission is not granted in this context to this user.
39 This function delegates to the current authentication and
40 authorization policies. Return
41 :data:`pyramid.security.Allowed` unconditionally if no
42 authentication policy has been configured in this application."""
43 try:
44 reg = request.registry
45 except AttributeError:
46 reg = get_current_registry() # b/c
47 authn_policy = reg.queryUtility(IAuthenticationPolicy)
48 if authn_policy is None:
49 return Allowed('No authentication policy in use.')
50
51 authz_policy = reg.queryUtility(IAuthorizationPolicy)
52 if authz_policy is None:
53 raise ValueError('Authentication policy registered without '
54 'authorization policy') # should never happen
55 principals = authn_policy.effective_principals(request)
56 return authz_policy.permits(context, principals, permission)
57
58 def authenticated_userid(request):
59 """ Return the userid of the currently authenticated user or
60 ``None`` if there is no :term:`authentication policy` in effect or
61 there is no currently authenticated user."""
62 try:
63 reg = request.registry
64 except AttributeError:
65 reg = get_current_registry() # b/c
66
67 policy = reg.queryUtility(IAuthenticationPolicy)
68 if policy is None:
69 return None
70 return policy.authenticated_userid(request)
71
72 def unauthenticated_userid(request):
73 """ Return an object which represents the *claimed* (not verified) user
74 id of the credentials present in the request. ``None`` if there is no
75 :term:`authentication policy` in effect or there is no user data
76 associated with the current request. This differs from
77 :func:`~pyramid.security.authenticated_userid`, because the effective
78 authentication policy will not ensure that a record associated with the
79 userid exists in persistent storage."""
80 try:
81 reg = request.registry
82 except AttributeError:
83 reg = get_current_registry() # b/c
84
85 policy = reg.queryUtility(IAuthenticationPolicy)
86 if policy is None:
87 return None
88 return policy.unauthenticated_userid(request)
89
90 def effective_principals(request):
91 """ Return the list of 'effective' :term:`principal` identifiers
92 for the ``request``. This will include the userid of the
93 currently authenticated user if a user is currently
94 authenticated. If no :term:`authentication policy` is in effect,
95 this will return an empty sequence."""
96 try:
97 reg = request.registry
98 except AttributeError:
99 reg = get_current_registry() # b/c
100
101 policy = reg.queryUtility(IAuthenticationPolicy)
102 if policy is None:
103 return [Everyone]
104 return policy.effective_principals(request)
105
106 def principals_allowed_by_permission(context, permission):
107 """ Provided a ``context`` (a resource object), and a ``permission``
108 (a string or unicode object), if a :term:`authorization policy` is
109 in effect, return a sequence of :term:`principal` ids that possess
110 the permission in the ``context``. If no authorization policy is
111 in effect, this will return a sequence with the single value
112 :mod:`pyramid.security.Everyone` (the special principal
113 identifier representing all principals).
114
115 .. note::
116
117 even if an :term:`authorization policy` is in effect,
118 some (exotic) authorization policies may not implement the
119 required machinery for this function; those will cause a
120 :exc:`NotImplementedError` exception to be raised when this
121 function is invoked.
122 """
123 reg = get_current_registry()
124 policy = reg.queryUtility(IAuthorizationPolicy)
125 if policy is None:
126 return [Everyone]
127 return policy.principals_allowed_by_permission(context, permission)
128
129 def view_execution_permitted(context, request, name=''):
130 """ If the view specified by ``context`` and ``name`` is protected
131 by a :term:`permission`, check the permission associated with the
132 view using the effective authentication/authorization policies and
133 the ``request``. Return a boolean result. If no
134 :term:`authorization policy` is in effect, or if the view is not
135 protected by a permission, return ``True``."""
136 try:
137 reg = request.registry
138 except AttributeError:
139 reg = get_current_registry() # b/c
140 provides = [IViewClassifier] + map_(providedBy, (request, context))
141 view = reg.adapters.lookup(provides, ISecuredView, name=name)
142 if view is None:
143 return Allowed(
144 'Allowed: view name %r in context %r (no permission defined)' %
145 (name, context))
146 return view.__permitted__(context, request)
147
148 def remember(request, principal, **kw):
149 """ Return a sequence of header tuples (e.g. ``[('Set-Cookie',
150 'foo=abc')]``) suitable for 'remembering' a set of credentials
151 implied by the data passed as ``principal`` and ``*kw`` using the
152 current :term:`authentication policy`. Common usage might look
153 like so within the body of a view function (``response`` is
154 assumed to be a :term:`WebOb` -style :term:`response` object
155 computed previously by the view code)::
156
157 from pyramid.security import remember
158 headers = remember(request, 'chrism', password='123', max_age='86400')
159 response.headerlist.extend(headers)
160 return response
161
162 If no :term:`authentication policy` is in use, this function will
163 always return an empty sequence. If used, the composition and
164 meaning of ``**kw`` must be agreed upon by the calling code and
165 the effective authentication policy."""
166 try:
167 reg = request.registry
168 except AttributeError:
169 reg = get_current_registry() # b/c
170 policy = reg.queryUtility(IAuthenticationPolicy)
171 if policy is None:
172 return []
173 else:
174 return policy.remember(request, principal, **kw)
175
176 def forget(request):
177 """ Return a sequence of header tuples (e.g. ``[('Set-Cookie',
178 'foo=abc')]``) suitable for 'forgetting' the set of credentials
179 possessed by the currently authenticated user. A common usage
180 might look like so within the body of a view function
181 (``response`` is assumed to be an :term:`WebOb` -style
182 :term:`response` object computed previously by the view code)::
183
184 from pyramid.security import forget
185 headers = forget(request)
186 response.headerlist.extend(headers)
187 return response
188
189 If no :term:`authentication policy` is in use, this function will
190 always return an empty sequence."""
191 try:
192 reg = request.registry
193 except AttributeError:
194 reg = get_current_registry() # b/c
195 policy = reg.queryUtility(IAuthenticationPolicy)
196 if policy is None:
197 return []
198 else:
199 return policy.forget(request)
200
201 class PermitsResult(int):
202 def __new__(cls, s, *args):
203 inst = int.__new__(cls, cls.boolval)
204 inst.s = s
205 inst.args = args
206 return inst
207
208 @property
209 def msg(self):
210 return self.s % self.args
211
212 def __str__(self):
213 return self.msg
214
215 def __repr__(self):
216 return '<%s instance at %s with msg %r>' % (self.__class__.__name__,
217 id(self),
218 self.msg)
219
220 class Denied(PermitsResult):
221 """ An instance of ``Denied`` is returned when a security-related
222 API or other :app:`Pyramid` code denies an action unrelated to
223 an ACL check. It evaluates equal to all boolean false types. It
224 has an attribute named ``msg`` describing the circumstances for
225 the deny."""
226 boolval = 0
227
228 class Allowed(PermitsResult):
229 """ An instance of ``Allowed`` is returned when a security-related
230 API or other :app:`Pyramid` code allows an action unrelated to
231 an ACL check. It evaluates equal to all boolean true types. It
232 has an attribute named ``msg`` describing the circumstances for
233 the allow."""
234 boolval = 1
235
236 class ACLPermitsResult(int):
237 def __new__(cls, ace, acl, permission, principals, context):
238 inst = int.__new__(cls, cls.boolval)
239 inst.permission = permission
240 inst.ace = ace
241 inst.acl = acl
242 inst.principals = principals
243 inst.context = context
244 return inst
245
246 @property
247 def msg(self):
248 s = ('%s permission %r via ACE %r in ACL %r on context %r for '
249 'principals %r')
250 return s % (self.__class__.__name__,
251 self.permission,
252 self.ace,
253 self.acl,
254 self.context,
255 self.principals)
256
257 def __str__(self):
258 return self.msg
259
260 def __repr__(self):
261 return '<%s instance at %s with msg %r>' % (self.__class__.__name__,
262 id(self),
263 self.msg)
264
265 class ACLDenied(ACLPermitsResult):
266 """ An instance of ``ACLDenied`` represents that a security check made
267 explicitly against ACL was denied. It evaluates equal to all boolean
268 false types. It also has the following attributes: ``acl``, ``ace``,
269 ``permission``, ``principals``, and ``context``. These attributes
270 indicate the security values involved in the request. Its __str__ method
271 prints a summary of these attributes for debugging purposes. The same
272 summary is available as the ``msg`` attribute."""
273 boolval = 0
274
275 class ACLAllowed(ACLPermitsResult):
276 """ An instance of ``ACLAllowed`` represents that a security check made
277 explicitly against ACL was allowed. It evaluates equal to all boolean
278 true types. It also has the following attributes: ``acl``, ``ace``,
279 ``permission``, ``principals``, and ``context``. These attributes
280 indicate the security values involved in the request. Its __str__ method
281 prints a summary of these attributes for debugging purposes. The same
282 summary is available as the ``msg`` attribute."""
283 boolval = 1
284
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyramid/security.py b/pyramid/security.py
--- a/pyramid/security.py
+++ b/pyramid/security.py
@@ -4,6 +4,7 @@
IAuthenticationPolicy,
IAuthorizationPolicy,
ISecuredView,
+ IView,
IViewClassifier,
)
@@ -132,7 +133,13 @@
view using the effective authentication/authorization policies and
the ``request``. Return a boolean result. If no
:term:`authorization policy` is in effect, or if the view is not
- protected by a permission, return ``True``."""
+ protected by a permission, return ``True``. If no view can view found,
+ an exception will be raised.
+
+ .. versionchanged:: 1.4a4
+ An exception is raised if no view is found.
+
+ """
try:
reg = request.registry
except AttributeError:
@@ -140,6 +147,11 @@
provides = [IViewClassifier] + map_(providedBy, (request, context))
view = reg.adapters.lookup(provides, ISecuredView, name=name)
if view is None:
+ view = reg.adapters.lookup(provides, IView, name=name)
+ if view is None:
+ raise TypeError('No registered view satisfies the constraints. '
+ 'It would not make sense to claim that this view '
+ '"is" or "is not" permitted.')
return Allowed(
'Allowed: view name %r in context %r (no permission defined)' %
(name, context))
| {"golden_diff": "diff --git a/pyramid/security.py b/pyramid/security.py\n--- a/pyramid/security.py\n+++ b/pyramid/security.py\n@@ -4,6 +4,7 @@\n IAuthenticationPolicy,\n IAuthorizationPolicy,\n ISecuredView,\n+ IView,\n IViewClassifier,\n )\n \n@@ -132,7 +133,13 @@\n view using the effective authentication/authorization policies and\n the ``request``. Return a boolean result. If no\n :term:`authorization policy` is in effect, or if the view is not\n- protected by a permission, return ``True``.\"\"\"\n+ protected by a permission, return ``True``. If no view can view found,\n+ an exception will be raised.\n+\n+ .. versionchanged:: 1.4a4\n+ An exception is raised if no view is found.\n+\n+ \"\"\"\n try:\n reg = request.registry\n except AttributeError:\n@@ -140,6 +147,11 @@\n provides = [IViewClassifier] + map_(providedBy, (request, context))\n view = reg.adapters.lookup(provides, ISecuredView, name=name)\n if view is None:\n+ view = reg.adapters.lookup(provides, IView, name=name)\n+ if view is None:\n+ raise TypeError('No registered view satisfies the constraints. '\n+ 'It would not make sense to claim that this view '\n+ '\"is\" or \"is not\" permitted.')\n return Allowed(\n 'Allowed: view name %r in context %r (no permission defined)' %\n (name, context))\n", "issue": "view_execution_permitted() when view does not exist?\n`Pyramid.security.view_execution_permitted(context, request, name)` currently returns a true value if there is no appropriate view registered for the context. I'm not sure whether that is the intended behavior or not (so I'm not sure whether this is a bug report or a feature request.) I would argue that _\"view exists and is permitted\"_ is a more useful predicate than _\"view execution permitted or view does not exist\"_.\n\nIn my use case \u2014 which I'd guess is not uncommon \u2014 I'd like to determine whether the current user can execute a given view on a given context without knowing a priori whether that view is even registered for that context. (The result of the check determines whether an entry for that view will appear in an admin nav menu.)\n\nCurrently, AFAICT, the only way to determine whether a particular view exists using Pyramid's public API is using `pyramid.traversal.traverse`, which seems awkward and overkill. Have I missed a better way?\n\n", "before_files": [{"content": "from zope.interface import providedBy\n\nfrom pyramid.interfaces import (\n IAuthenticationPolicy,\n IAuthorizationPolicy,\n ISecuredView,\n IViewClassifier,\n )\n\nfrom pyramid.compat import map_\nfrom pyramid.threadlocal import get_current_registry\n\nEveryone = 'system.Everyone'\nAuthenticated = 'system.Authenticated'\nAllow = 'Allow'\nDeny = 'Deny'\n\nclass AllPermissionsList(object):\n \"\"\" Stand in 'permission list' to represent all permissions \"\"\"\n def __iter__(self):\n return ()\n def __contains__(self, other):\n return True\n def __eq__(self, other):\n return isinstance(other, self.__class__)\n\nALL_PERMISSIONS = AllPermissionsList()\nDENY_ALL = (Deny, Everyone, ALL_PERMISSIONS)\n\nNO_PERMISSION_REQUIRED = '__no_permission_required__'\n\ndef has_permission(permission, context, request):\n \"\"\" Provided a permission (a string or unicode object), a context\n (a :term:`resource` instance) and a request object, return an\n instance of :data:`pyramid.security.Allowed` if the permission\n is granted in this context to the user implied by the\n request. Return an instance of :mod:`pyramid.security.Denied`\n if this permission is not granted in this context to this user.\n This function delegates to the current authentication and\n authorization policies. Return\n :data:`pyramid.security.Allowed` unconditionally if no\n authentication policy has been configured in this application.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n authn_policy = reg.queryUtility(IAuthenticationPolicy)\n if authn_policy is None:\n return Allowed('No authentication policy in use.')\n\n authz_policy = reg.queryUtility(IAuthorizationPolicy)\n if authz_policy is None:\n raise ValueError('Authentication policy registered without '\n 'authorization policy') # should never happen\n principals = authn_policy.effective_principals(request)\n return authz_policy.permits(context, principals, permission)\n\ndef authenticated_userid(request):\n \"\"\" Return the userid of the currently authenticated user or\n ``None`` if there is no :term:`authentication policy` in effect or\n there is no currently authenticated user.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return None\n return policy.authenticated_userid(request)\n\ndef unauthenticated_userid(request):\n \"\"\" Return an object which represents the *claimed* (not verified) user\n id of the credentials present in the request. ``None`` if there is no\n :term:`authentication policy` in effect or there is no user data\n associated with the current request. This differs from\n :func:`~pyramid.security.authenticated_userid`, because the effective\n authentication policy will not ensure that a record associated with the\n userid exists in persistent storage.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return None\n return policy.unauthenticated_userid(request)\n\ndef effective_principals(request):\n \"\"\" Return the list of 'effective' :term:`principal` identifiers\n for the ``request``. This will include the userid of the\n currently authenticated user if a user is currently\n authenticated. If no :term:`authentication policy` is in effect,\n this will return an empty sequence.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return [Everyone]\n return policy.effective_principals(request)\n\ndef principals_allowed_by_permission(context, permission):\n \"\"\" Provided a ``context`` (a resource object), and a ``permission``\n (a string or unicode object), if a :term:`authorization policy` is\n in effect, return a sequence of :term:`principal` ids that possess\n the permission in the ``context``. If no authorization policy is\n in effect, this will return a sequence with the single value\n :mod:`pyramid.security.Everyone` (the special principal\n identifier representing all principals).\n\n .. note::\n\n even if an :term:`authorization policy` is in effect,\n some (exotic) authorization policies may not implement the\n required machinery for this function; those will cause a\n :exc:`NotImplementedError` exception to be raised when this\n function is invoked.\n \"\"\"\n reg = get_current_registry()\n policy = reg.queryUtility(IAuthorizationPolicy)\n if policy is None:\n return [Everyone]\n return policy.principals_allowed_by_permission(context, permission)\n\ndef view_execution_permitted(context, request, name=''):\n \"\"\" If the view specified by ``context`` and ``name`` is protected\n by a :term:`permission`, check the permission associated with the\n view using the effective authentication/authorization policies and\n the ``request``. Return a boolean result. If no\n :term:`authorization policy` is in effect, or if the view is not\n protected by a permission, return ``True``.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n provides = [IViewClassifier] + map_(providedBy, (request, context))\n view = reg.adapters.lookup(provides, ISecuredView, name=name)\n if view is None:\n return Allowed(\n 'Allowed: view name %r in context %r (no permission defined)' %\n (name, context))\n return view.__permitted__(context, request)\n\ndef remember(request, principal, **kw):\n \"\"\" Return a sequence of header tuples (e.g. ``[('Set-Cookie',\n 'foo=abc')]``) suitable for 'remembering' a set of credentials\n implied by the data passed as ``principal`` and ``*kw`` using the\n current :term:`authentication policy`. Common usage might look\n like so within the body of a view function (``response`` is\n assumed to be a :term:`WebOb` -style :term:`response` object\n computed previously by the view code)::\n\n from pyramid.security import remember\n headers = remember(request, 'chrism', password='123', max_age='86400')\n response.headerlist.extend(headers)\n return response\n\n If no :term:`authentication policy` is in use, this function will\n always return an empty sequence. If used, the composition and\n meaning of ``**kw`` must be agreed upon by the calling code and\n the effective authentication policy.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return []\n else:\n return policy.remember(request, principal, **kw)\n\ndef forget(request):\n \"\"\" Return a sequence of header tuples (e.g. ``[('Set-Cookie',\n 'foo=abc')]``) suitable for 'forgetting' the set of credentials\n possessed by the currently authenticated user. A common usage\n might look like so within the body of a view function\n (``response`` is assumed to be an :term:`WebOb` -style\n :term:`response` object computed previously by the view code)::\n\n from pyramid.security import forget\n headers = forget(request)\n response.headerlist.extend(headers)\n return response\n\n If no :term:`authentication policy` is in use, this function will\n always return an empty sequence.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return []\n else:\n return policy.forget(request)\n\nclass PermitsResult(int):\n def __new__(cls, s, *args):\n inst = int.__new__(cls, cls.boolval)\n inst.s = s\n inst.args = args\n return inst\n\n @property\n def msg(self):\n return self.s % self.args\n\n def __str__(self):\n return self.msg\n\n def __repr__(self):\n return '<%s instance at %s with msg %r>' % (self.__class__.__name__,\n id(self),\n self.msg)\n\nclass Denied(PermitsResult):\n \"\"\" An instance of ``Denied`` is returned when a security-related\n API or other :app:`Pyramid` code denies an action unrelated to\n an ACL check. It evaluates equal to all boolean false types. It\n has an attribute named ``msg`` describing the circumstances for\n the deny.\"\"\"\n boolval = 0\n\nclass Allowed(PermitsResult):\n \"\"\" An instance of ``Allowed`` is returned when a security-related\n API or other :app:`Pyramid` code allows an action unrelated to\n an ACL check. It evaluates equal to all boolean true types. It\n has an attribute named ``msg`` describing the circumstances for\n the allow.\"\"\"\n boolval = 1\n\nclass ACLPermitsResult(int):\n def __new__(cls, ace, acl, permission, principals, context):\n inst = int.__new__(cls, cls.boolval)\n inst.permission = permission\n inst.ace = ace\n inst.acl = acl\n inst.principals = principals\n inst.context = context\n return inst\n\n @property\n def msg(self):\n s = ('%s permission %r via ACE %r in ACL %r on context %r for '\n 'principals %r')\n return s % (self.__class__.__name__,\n self.permission,\n self.ace,\n self.acl,\n self.context,\n self.principals)\n\n def __str__(self):\n return self.msg\n\n def __repr__(self):\n return '<%s instance at %s with msg %r>' % (self.__class__.__name__,\n id(self),\n self.msg)\n\nclass ACLDenied(ACLPermitsResult):\n \"\"\" An instance of ``ACLDenied`` represents that a security check made\n explicitly against ACL was denied. It evaluates equal to all boolean\n false types. It also has the following attributes: ``acl``, ``ace``,\n ``permission``, ``principals``, and ``context``. These attributes\n indicate the security values involved in the request. Its __str__ method\n prints a summary of these attributes for debugging purposes. The same\n summary is available as the ``msg`` attribute.\"\"\"\n boolval = 0\n\nclass ACLAllowed(ACLPermitsResult):\n \"\"\" An instance of ``ACLAllowed`` represents that a security check made\n explicitly against ACL was allowed. It evaluates equal to all boolean\n true types. It also has the following attributes: ``acl``, ``ace``,\n ``permission``, ``principals``, and ``context``. These attributes\n indicate the security values involved in the request. Its __str__ method\n prints a summary of these attributes for debugging purposes. The same\n summary is available as the ``msg`` attribute.\"\"\"\n boolval = 1\n\n", "path": "pyramid/security.py"}], "after_files": [{"content": "from zope.interface import providedBy\n\nfrom pyramid.interfaces import (\n IAuthenticationPolicy,\n IAuthorizationPolicy,\n ISecuredView,\n IView,\n IViewClassifier,\n )\n\nfrom pyramid.compat import map_\nfrom pyramid.threadlocal import get_current_registry\n\nEveryone = 'system.Everyone'\nAuthenticated = 'system.Authenticated'\nAllow = 'Allow'\nDeny = 'Deny'\n\nclass AllPermissionsList(object):\n \"\"\" Stand in 'permission list' to represent all permissions \"\"\"\n def __iter__(self):\n return ()\n def __contains__(self, other):\n return True\n def __eq__(self, other):\n return isinstance(other, self.__class__)\n\nALL_PERMISSIONS = AllPermissionsList()\nDENY_ALL = (Deny, Everyone, ALL_PERMISSIONS)\n\nNO_PERMISSION_REQUIRED = '__no_permission_required__'\n\ndef has_permission(permission, context, request):\n \"\"\" Provided a permission (a string or unicode object), a context\n (a :term:`resource` instance) and a request object, return an\n instance of :data:`pyramid.security.Allowed` if the permission\n is granted in this context to the user implied by the\n request. Return an instance of :mod:`pyramid.security.Denied`\n if this permission is not granted in this context to this user.\n This function delegates to the current authentication and\n authorization policies. Return\n :data:`pyramid.security.Allowed` unconditionally if no\n authentication policy has been configured in this application.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n authn_policy = reg.queryUtility(IAuthenticationPolicy)\n if authn_policy is None:\n return Allowed('No authentication policy in use.')\n\n authz_policy = reg.queryUtility(IAuthorizationPolicy)\n if authz_policy is None:\n raise ValueError('Authentication policy registered without '\n 'authorization policy') # should never happen\n principals = authn_policy.effective_principals(request)\n return authz_policy.permits(context, principals, permission)\n\ndef authenticated_userid(request):\n \"\"\" Return the userid of the currently authenticated user or\n ``None`` if there is no :term:`authentication policy` in effect or\n there is no currently authenticated user.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return None\n return policy.authenticated_userid(request)\n\ndef unauthenticated_userid(request):\n \"\"\" Return an object which represents the *claimed* (not verified) user\n id of the credentials present in the request. ``None`` if there is no\n :term:`authentication policy` in effect or there is no user data\n associated with the current request. This differs from\n :func:`~pyramid.security.authenticated_userid`, because the effective\n authentication policy will not ensure that a record associated with the\n userid exists in persistent storage.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return None\n return policy.unauthenticated_userid(request)\n\ndef effective_principals(request):\n \"\"\" Return the list of 'effective' :term:`principal` identifiers\n for the ``request``. This will include the userid of the\n currently authenticated user if a user is currently\n authenticated. If no :term:`authentication policy` is in effect,\n this will return an empty sequence.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return [Everyone]\n return policy.effective_principals(request)\n\ndef principals_allowed_by_permission(context, permission):\n \"\"\" Provided a ``context`` (a resource object), and a ``permission``\n (a string or unicode object), if a :term:`authorization policy` is\n in effect, return a sequence of :term:`principal` ids that possess\n the permission in the ``context``. If no authorization policy is\n in effect, this will return a sequence with the single value\n :mod:`pyramid.security.Everyone` (the special principal\n identifier representing all principals).\n\n .. note::\n\n even if an :term:`authorization policy` is in effect,\n some (exotic) authorization policies may not implement the\n required machinery for this function; those will cause a\n :exc:`NotImplementedError` exception to be raised when this\n function is invoked.\n \"\"\"\n reg = get_current_registry()\n policy = reg.queryUtility(IAuthorizationPolicy)\n if policy is None:\n return [Everyone]\n return policy.principals_allowed_by_permission(context, permission)\n\ndef view_execution_permitted(context, request, name=''):\n \"\"\" If the view specified by ``context`` and ``name`` is protected\n by a :term:`permission`, check the permission associated with the\n view using the effective authentication/authorization policies and\n the ``request``. Return a boolean result. If no\n :term:`authorization policy` is in effect, or if the view is not\n protected by a permission, return ``True``. If no view can view found,\n an exception will be raised.\n\n .. versionchanged:: 1.4a4\n An exception is raised if no view is found.\n\n \"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n provides = [IViewClassifier] + map_(providedBy, (request, context))\n view = reg.adapters.lookup(provides, ISecuredView, name=name)\n if view is None:\n view = reg.adapters.lookup(provides, IView, name=name)\n if view is None:\n raise TypeError('No registered view satisfies the constraints. '\n 'It would not make sense to claim that this view '\n '\"is\" or \"is not\" permitted.')\n return Allowed(\n 'Allowed: view name %r in context %r (no permission defined)' %\n (name, context))\n return view.__permitted__(context, request)\n\ndef remember(request, principal, **kw):\n \"\"\" Return a sequence of header tuples (e.g. ``[('Set-Cookie',\n 'foo=abc')]``) suitable for 'remembering' a set of credentials\n implied by the data passed as ``principal`` and ``*kw`` using the\n current :term:`authentication policy`. Common usage might look\n like so within the body of a view function (``response`` is\n assumed to be a :term:`WebOb` -style :term:`response` object\n computed previously by the view code)::\n\n from pyramid.security import remember\n headers = remember(request, 'chrism', password='123', max_age='86400')\n response.headerlist.extend(headers)\n return response\n\n If no :term:`authentication policy` is in use, this function will\n always return an empty sequence. If used, the composition and\n meaning of ``**kw`` must be agreed upon by the calling code and\n the effective authentication policy.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return []\n else:\n return policy.remember(request, principal, **kw)\n\ndef forget(request):\n \"\"\" Return a sequence of header tuples (e.g. ``[('Set-Cookie',\n 'foo=abc')]``) suitable for 'forgetting' the set of credentials\n possessed by the currently authenticated user. A common usage\n might look like so within the body of a view function\n (``response`` is assumed to be an :term:`WebOb` -style\n :term:`response` object computed previously by the view code)::\n\n from pyramid.security import forget\n headers = forget(request)\n response.headerlist.extend(headers)\n return response\n\n If no :term:`authentication policy` is in use, this function will\n always return an empty sequence.\"\"\"\n try:\n reg = request.registry\n except AttributeError:\n reg = get_current_registry() # b/c\n policy = reg.queryUtility(IAuthenticationPolicy)\n if policy is None:\n return []\n else:\n return policy.forget(request)\n\nclass PermitsResult(int):\n def __new__(cls, s, *args):\n inst = int.__new__(cls, cls.boolval)\n inst.s = s\n inst.args = args\n return inst\n\n @property\n def msg(self):\n return self.s % self.args\n\n def __str__(self):\n return self.msg\n\n def __repr__(self):\n return '<%s instance at %s with msg %r>' % (self.__class__.__name__,\n id(self),\n self.msg)\n\nclass Denied(PermitsResult):\n \"\"\" An instance of ``Denied`` is returned when a security-related\n API or other :app:`Pyramid` code denies an action unrelated to\n an ACL check. It evaluates equal to all boolean false types. It\n has an attribute named ``msg`` describing the circumstances for\n the deny.\"\"\"\n boolval = 0\n\nclass Allowed(PermitsResult):\n \"\"\" An instance of ``Allowed`` is returned when a security-related\n API or other :app:`Pyramid` code allows an action unrelated to\n an ACL check. It evaluates equal to all boolean true types. It\n has an attribute named ``msg`` describing the circumstances for\n the allow.\"\"\"\n boolval = 1\n\nclass ACLPermitsResult(int):\n def __new__(cls, ace, acl, permission, principals, context):\n inst = int.__new__(cls, cls.boolval)\n inst.permission = permission\n inst.ace = ace\n inst.acl = acl\n inst.principals = principals\n inst.context = context\n return inst\n\n @property\n def msg(self):\n s = ('%s permission %r via ACE %r in ACL %r on context %r for '\n 'principals %r')\n return s % (self.__class__.__name__,\n self.permission,\n self.ace,\n self.acl,\n self.context,\n self.principals)\n\n def __str__(self):\n return self.msg\n\n def __repr__(self):\n return '<%s instance at %s with msg %r>' % (self.__class__.__name__,\n id(self),\n self.msg)\n\nclass ACLDenied(ACLPermitsResult):\n \"\"\" An instance of ``ACLDenied`` represents that a security check made\n explicitly against ACL was denied. It evaluates equal to all boolean\n false types. It also has the following attributes: ``acl``, ``ace``,\n ``permission``, ``principals``, and ``context``. These attributes\n indicate the security values involved in the request. Its __str__ method\n prints a summary of these attributes for debugging purposes. The same\n summary is available as the ``msg`` attribute.\"\"\"\n boolval = 0\n\nclass ACLAllowed(ACLPermitsResult):\n \"\"\" An instance of ``ACLAllowed`` represents that a security check made\n explicitly against ACL was allowed. It evaluates equal to all boolean\n true types. It also has the following attributes: ``acl``, ``ace``,\n ``permission``, ``principals``, and ``context``. These attributes\n indicate the security values involved in the request. Its __str__ method\n prints a summary of these attributes for debugging purposes. The same\n summary is available as the ``msg`` attribute.\"\"\"\n boolval = 1\n\n", "path": "pyramid/security.py"}]} | 3,710 | 356 |
gh_patches_debug_26086 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-5569 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Validate docs dir before writing custom js
If the user puts an invalid docs dir, we fail with a generic error, we should raise a more specific one.
https://github.com/rtfd/readthedocs.org/blob/44951fbf8fe1fe12fa9f99f3075f87bd77188525/readthedocs/doc_builder/backends/mkdocs.py#L175-L176
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/doc_builder/backends/mkdocs.py`
Content:
```
1 """
2 MkDocs_ backend for building docs.
3
4 .. _MkDocs: http://www.mkdocs.org/
5 """
6
7 import json
8 import logging
9 import os
10
11 import yaml
12 from django.conf import settings
13 from django.template import loader as template_loader
14
15 from readthedocs.doc_builder.base import BaseBuilder
16 from readthedocs.doc_builder.exceptions import MkDocsYAMLParseError
17 from readthedocs.projects.models import Feature
18
19
20 log = logging.getLogger(__name__)
21
22
23 def get_absolute_static_url():
24 """
25 Get the fully qualified static URL from settings.
26
27 Mkdocs needs a full domain because it tries to link to local files.
28 """
29 static_url = settings.STATIC_URL
30
31 if not static_url.startswith('http'):
32 domain = getattr(settings, 'PRODUCTION_DOMAIN')
33 static_url = 'http://{}{}'.format(domain, static_url)
34
35 return static_url
36
37
38 class BaseMkdocs(BaseBuilder):
39
40 """Mkdocs builder."""
41
42 # The default theme for mkdocs is the 'mkdocs' theme
43 DEFAULT_THEME_NAME = 'mkdocs'
44
45 def __init__(self, *args, **kwargs):
46 super().__init__(*args, **kwargs)
47 self.yaml_file = self.get_yaml_config()
48 self.old_artifact_path = os.path.join(
49 os.path.dirname(self.yaml_file),
50 self.build_dir,
51 )
52 self.root_path = self.version.project.checkout_path(self.version.slug)
53
54 # README: historically, the default theme was ``readthedocs`` but in
55 # https://github.com/rtfd/readthedocs.org/pull/4556 we change it to
56 # ``mkdocs`` to maintain the same behavior in Read the Docs than
57 # building locally. Although, we can't apply this into the Corporate
58 # site. To keep the same default theme there, we created a Feature flag
59 # for these project that were building with MkDocs in the Corporate
60 # site.
61 if self.project.has_feature(Feature.MKDOCS_THEME_RTD):
62 self.DEFAULT_THEME_NAME = 'readthedocs'
63 log.warning(
64 'Project using readthedocs theme as default for MkDocs: slug=%s',
65 self.project.slug,
66 )
67 else:
68 self.DEFAULT_THEME_NAME = 'mkdocs'
69
70 def get_yaml_config(self):
71 """Find the ``mkdocs.yml`` file in the project root."""
72 mkdoc_path = self.config.mkdocs.configuration
73 if not mkdoc_path:
74 mkdoc_path = os.path.join(
75 self.project.checkout_path(self.version.slug),
76 'mkdocs.yml',
77 )
78 return mkdoc_path
79
80 def load_yaml_config(self):
81 """
82 Load a YAML config.
83
84 :raises: ``MkDocsYAMLParseError`` if failed due to syntax errors.
85 """
86 try:
87 return yaml.safe_load(open(self.yaml_file, 'r'),)
88 except IOError:
89 return {
90 'site_name': self.version.project.name,
91 }
92 except yaml.YAMLError as exc:
93 note = ''
94 if hasattr(exc, 'problem_mark'):
95 mark = exc.problem_mark
96 note = ' (line %d, column %d)' % (
97 mark.line + 1,
98 mark.column + 1,
99 )
100 raise MkDocsYAMLParseError(
101 'Your mkdocs.yml could not be loaded, '
102 'possibly due to a syntax error{note}'.format(note=note),
103 )
104
105 def append_conf(self, **__):
106 """
107 Set mkdocs config values.
108
109 :raises: ``MkDocsYAMLParseError`` if failed due to known type errors
110 (i.e. expecting a list and a string is found).
111 """
112 user_config = self.load_yaml_config()
113
114 # Handle custom docs dirs
115 user_docs_dir = user_config.get('docs_dir')
116 if not isinstance(user_docs_dir, (type(None), str)):
117 raise MkDocsYAMLParseError(
118 MkDocsYAMLParseError.INVALID_DOCS_DIR_CONFIG,
119 )
120
121 docs_dir = self.docs_dir(docs_dir=user_docs_dir)
122 self.create_index(extension='md')
123 user_config['docs_dir'] = docs_dir
124
125 # Set mkdocs config values
126 static_url = get_absolute_static_url()
127
128 for config in ('extra_css', 'extra_javascript'):
129 user_value = user_config.get(config, [])
130 if not isinstance(user_value, list):
131 raise MkDocsYAMLParseError(
132 MkDocsYAMLParseError.INVALID_EXTRA_CONFIG.format(
133 config=config,
134 ),
135 )
136
137 extra_javascript_list = [
138 'readthedocs-data.js',
139 '%score/js/readthedocs-doc-embed.js' % static_url,
140 '%sjavascript/readthedocs-analytics.js' % static_url,
141 ]
142 extra_css_list = [
143 '%scss/badge_only.css' % static_url,
144 '%scss/readthedocs-doc-embed.css' % static_url,
145 ]
146
147 # Only add static file if the files are not already in the list
148 user_config.setdefault('extra_javascript', []).extend(
149 [js for js in extra_javascript_list if js not in user_config.get(
150 'extra_javascript')]
151 )
152 user_config.setdefault('extra_css', []).extend(
153 [css for css in extra_css_list if css not in user_config.get(
154 'extra_css')]
155 )
156
157 # The docs path is relative to the location
158 # of the mkdocs configuration file.
159 docs_path = os.path.join(
160 os.path.dirname(self.yaml_file),
161 docs_dir,
162 )
163
164 # RTD javascript writing
165 rtd_data = self.generate_rtd_data(
166 docs_dir=os.path.relpath(docs_path, self.root_path),
167 mkdocs_config=user_config,
168 )
169 with open(os.path.join(docs_path, 'readthedocs-data.js'), 'w') as f:
170 f.write(rtd_data)
171
172 # Use Read the Docs' analytics setup rather than mkdocs'
173 # This supports using RTD's privacy improvements around analytics
174 user_config['google_analytics'] = None
175
176 # README: make MkDocs to use ``readthedocs`` theme as default if the
177 # user didn't specify a specific theme manually
178 if self.project.has_feature(Feature.MKDOCS_THEME_RTD):
179 if 'theme' not in user_config:
180 # mkdocs<0.17 syntax
181 user_config['theme'] = self.DEFAULT_THEME_NAME
182
183 # Write the modified mkdocs configuration
184 yaml.safe_dump(
185 user_config,
186 open(self.yaml_file, 'w'),
187 )
188
189 # Write the mkdocs.yml to the build logs
190 self.run(
191 'cat',
192 os.path.relpath(self.yaml_file, self.root_path),
193 cwd=self.root_path,
194 )
195
196 def generate_rtd_data(self, docs_dir, mkdocs_config):
197 """Generate template properties and render readthedocs-data.js."""
198 # Use the analytics code from mkdocs.yml
199 # if it isn't set already by Read the Docs,
200 analytics_code = self.version.project.analytics_code
201 if not analytics_code and mkdocs_config.get('google_analytics'):
202 # http://www.mkdocs.org/user-guide/configuration/#google_analytics
203 analytics_code = mkdocs_config['google_analytics'][0]
204
205 # Will be available in the JavaScript as READTHEDOCS_DATA.
206 readthedocs_data = {
207 'project': self.version.project.slug,
208 'version': self.version.slug,
209 'language': self.version.project.language,
210 'programming_language': self.version.project.programming_language,
211 'page': None,
212 'theme': self.get_theme_name(mkdocs_config),
213 'builder': 'mkdocs',
214 'docroot': docs_dir,
215 'source_suffix': '.md',
216 'api_host': getattr(
217 settings,
218 'PUBLIC_API_URL',
219 'https://readthedocs.org',
220 ),
221 'ad_free': not self.project.show_advertising,
222 'commit': self.version.project.vcs_repo(self.version.slug).commit,
223 'global_analytics_code': getattr(
224 settings,
225 'GLOBAL_ANALYTICS_CODE',
226 'UA-17997319-1',
227 ),
228 'user_analytics_code': analytics_code,
229 }
230 data_json = json.dumps(readthedocs_data, indent=4)
231 data_ctx = {
232 'data_json': data_json,
233 'current_version': readthedocs_data['version'],
234 'slug': readthedocs_data['project'],
235 'html_theme': readthedocs_data['theme'],
236 'pagename': None,
237 }
238 tmpl = template_loader.get_template('doc_builder/data.js.tmpl')
239 return tmpl.render(data_ctx)
240
241 def build(self):
242 checkout_path = self.project.checkout_path(self.version.slug)
243 build_command = [
244 self.python_env.venv_bin(filename='python'),
245 '-m',
246 'mkdocs',
247 self.builder,
248 '--clean',
249 '--site-dir',
250 self.build_dir,
251 '--config-file',
252 self.yaml_file,
253 ]
254 if self.config.mkdocs.fail_on_warning:
255 build_command.append('--strict')
256 cmd_ret = self.run(
257 *build_command, cwd=checkout_path,
258 bin_path=self.python_env.venv_bin()
259 )
260 return cmd_ret.successful
261
262 def get_theme_name(self, mkdocs_config):
263 """
264 Get the theme configuration in the mkdocs_config.
265
266 In v0.17.0, the theme configuration switched
267 from two separate configs (both optional) to a nested directive.
268
269 :see: http://www.mkdocs.org/about/release-notes/#theme-customization-1164
270 :returns: the name of the theme RTD will use
271 """
272 theme_setting = mkdocs_config.get('theme')
273 if isinstance(theme_setting, dict):
274 # Full nested theme config (the new configuration)
275 return theme_setting.get('name') or self.DEFAULT_THEME_NAME
276
277 if theme_setting:
278 # A string which is the name of the theme
279 return theme_setting
280
281 theme_dir = mkdocs_config.get('theme_dir')
282 if theme_dir:
283 # Use the name of the directory in this project's custom theme directory
284 return theme_dir.rstrip('/').split('/')[-1]
285
286 return self.DEFAULT_THEME_NAME
287
288
289 class MkdocsHTML(BaseMkdocs):
290 type = 'mkdocs'
291 builder = 'build'
292 build_dir = '_build/html'
293
294
295 class MkdocsJSON(BaseMkdocs):
296 type = 'mkdocs_json'
297 builder = 'json'
298 build_dir = '_build/json'
299
```
Path: `readthedocs/doc_builder/exceptions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Exceptions raised when building documentation."""
4
5 from django.utils.translation import ugettext_noop
6
7
8 class BuildEnvironmentException(Exception):
9 message = None
10 status_code = None
11
12 def __init__(self, message=None, **kwargs):
13 self.status_code = kwargs.pop(
14 'status_code',
15 None,
16 ) or self.status_code or 1
17 message = message or self.get_default_message()
18 super().__init__(message, **kwargs)
19
20 def get_default_message(self):
21 return self.message
22
23
24 class BuildEnvironmentError(BuildEnvironmentException):
25 GENERIC_WITH_BUILD_ID = ugettext_noop(
26 'There was a problem with Read the Docs while building your documentation. '
27 'Please try again later. '
28 'However, if this problem persists, '
29 'please report this to us with your build id ({build_id}).',
30 )
31
32
33 class BuildEnvironmentCreationFailed(BuildEnvironmentError):
34 message = ugettext_noop('Build environment creation failed')
35
36
37 class VersionLockedError(BuildEnvironmentError):
38 message = ugettext_noop('Version locked, retrying in 5 minutes.')
39 status_code = 423
40
41
42 class ProjectBuildsSkippedError(BuildEnvironmentError):
43 message = ugettext_noop('Builds for this project are temporarily disabled')
44
45
46 class YAMLParseError(BuildEnvironmentError):
47 GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
48 'Problem in your project\'s configuration. {exception}',
49 )
50
51
52 class BuildTimeoutError(BuildEnvironmentError):
53 message = ugettext_noop('Build exited due to time out')
54
55
56 class BuildEnvironmentWarning(BuildEnvironmentException):
57 pass
58
59
60 class MkDocsYAMLParseError(BuildEnvironmentError):
61 GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(
62 'Problem parsing MkDocs YAML configuration. {exception}',
63 )
64
65 INVALID_DOCS_DIR_CONFIG = ugettext_noop(
66 'The "docs_dir" config from your MkDocs YAML config file has to be a '
67 'string with relative or absolute path.',
68 )
69
70 INVALID_EXTRA_CONFIG = ugettext_noop(
71 'The "{config}" config from your MkDocs YAML config file has to be a '
72 'a list of relative paths.',
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/doc_builder/backends/mkdocs.py b/readthedocs/doc_builder/backends/mkdocs.py
--- a/readthedocs/doc_builder/backends/mkdocs.py
+++ b/readthedocs/doc_builder/backends/mkdocs.py
@@ -119,6 +119,7 @@
)
docs_dir = self.docs_dir(docs_dir=user_docs_dir)
+
self.create_index(extension='md')
user_config['docs_dir'] = docs_dir
@@ -161,6 +162,12 @@
docs_dir,
)
+ # if user puts an invalid `docs_dir` path raise an Exception
+ if not os.path.exists(docs_path):
+ raise MkDocsYAMLParseError(
+ MkDocsYAMLParseError.INVALID_DOCS_DIR_PATH,
+ )
+
# RTD javascript writing
rtd_data = self.generate_rtd_data(
docs_dir=os.path.relpath(docs_path, self.root_path),
diff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py
--- a/readthedocs/doc_builder/exceptions.py
+++ b/readthedocs/doc_builder/exceptions.py
@@ -67,6 +67,11 @@
'string with relative or absolute path.',
)
+ INVALID_DOCS_DIR_PATH = ugettext_noop(
+ 'The "docs_dir" config from your MkDocs YAML config file does not '
+ 'contain a valid path.',
+ )
+
INVALID_EXTRA_CONFIG = ugettext_noop(
'The "{config}" config from your MkDocs YAML config file has to be a '
'a list of relative paths.',
| {"golden_diff": "diff --git a/readthedocs/doc_builder/backends/mkdocs.py b/readthedocs/doc_builder/backends/mkdocs.py\n--- a/readthedocs/doc_builder/backends/mkdocs.py\n+++ b/readthedocs/doc_builder/backends/mkdocs.py\n@@ -119,6 +119,7 @@\n )\n \n docs_dir = self.docs_dir(docs_dir=user_docs_dir)\n+\n self.create_index(extension='md')\n user_config['docs_dir'] = docs_dir\n \n@@ -161,6 +162,12 @@\n docs_dir,\n )\n \n+ # if user puts an invalid `docs_dir` path raise an Exception\n+ if not os.path.exists(docs_path):\n+ raise MkDocsYAMLParseError(\n+ MkDocsYAMLParseError.INVALID_DOCS_DIR_PATH,\n+ )\n+\n # RTD javascript writing\n rtd_data = self.generate_rtd_data(\n docs_dir=os.path.relpath(docs_path, self.root_path),\ndiff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py\n--- a/readthedocs/doc_builder/exceptions.py\n+++ b/readthedocs/doc_builder/exceptions.py\n@@ -67,6 +67,11 @@\n 'string with relative or absolute path.',\n )\n \n+ INVALID_DOCS_DIR_PATH = ugettext_noop(\n+ 'The \"docs_dir\" config from your MkDocs YAML config file does not '\n+ 'contain a valid path.',\n+ )\n+\n INVALID_EXTRA_CONFIG = ugettext_noop(\n 'The \"{config}\" config from your MkDocs YAML config file has to be a '\n 'a list of relative paths.',\n", "issue": "Validate docs dir before writing custom js\nIf the user puts an invalid docs dir, we fail with a generic error, we should raise a more specific one.\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/44951fbf8fe1fe12fa9f99f3075f87bd77188525/readthedocs/doc_builder/backends/mkdocs.py#L175-L176\n", "before_files": [{"content": "\"\"\"\nMkDocs_ backend for building docs.\n\n.. _MkDocs: http://www.mkdocs.org/\n\"\"\"\n\nimport json\nimport logging\nimport os\n\nimport yaml\nfrom django.conf import settings\nfrom django.template import loader as template_loader\n\nfrom readthedocs.doc_builder.base import BaseBuilder\nfrom readthedocs.doc_builder.exceptions import MkDocsYAMLParseError\nfrom readthedocs.projects.models import Feature\n\n\nlog = logging.getLogger(__name__)\n\n\ndef get_absolute_static_url():\n \"\"\"\n Get the fully qualified static URL from settings.\n\n Mkdocs needs a full domain because it tries to link to local files.\n \"\"\"\n static_url = settings.STATIC_URL\n\n if not static_url.startswith('http'):\n domain = getattr(settings, 'PRODUCTION_DOMAIN')\n static_url = 'http://{}{}'.format(domain, static_url)\n\n return static_url\n\n\nclass BaseMkdocs(BaseBuilder):\n\n \"\"\"Mkdocs builder.\"\"\"\n\n # The default theme for mkdocs is the 'mkdocs' theme\n DEFAULT_THEME_NAME = 'mkdocs'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.yaml_file = self.get_yaml_config()\n self.old_artifact_path = os.path.join(\n os.path.dirname(self.yaml_file),\n self.build_dir,\n )\n self.root_path = self.version.project.checkout_path(self.version.slug)\n\n # README: historically, the default theme was ``readthedocs`` but in\n # https://github.com/rtfd/readthedocs.org/pull/4556 we change it to\n # ``mkdocs`` to maintain the same behavior in Read the Docs than\n # building locally. Although, we can't apply this into the Corporate\n # site. To keep the same default theme there, we created a Feature flag\n # for these project that were building with MkDocs in the Corporate\n # site.\n if self.project.has_feature(Feature.MKDOCS_THEME_RTD):\n self.DEFAULT_THEME_NAME = 'readthedocs'\n log.warning(\n 'Project using readthedocs theme as default for MkDocs: slug=%s',\n self.project.slug,\n )\n else:\n self.DEFAULT_THEME_NAME = 'mkdocs'\n\n def get_yaml_config(self):\n \"\"\"Find the ``mkdocs.yml`` file in the project root.\"\"\"\n mkdoc_path = self.config.mkdocs.configuration\n if not mkdoc_path:\n mkdoc_path = os.path.join(\n self.project.checkout_path(self.version.slug),\n 'mkdocs.yml',\n )\n return mkdoc_path\n\n def load_yaml_config(self):\n \"\"\"\n Load a YAML config.\n\n :raises: ``MkDocsYAMLParseError`` if failed due to syntax errors.\n \"\"\"\n try:\n return yaml.safe_load(open(self.yaml_file, 'r'),)\n except IOError:\n return {\n 'site_name': self.version.project.name,\n }\n except yaml.YAMLError as exc:\n note = ''\n if hasattr(exc, 'problem_mark'):\n mark = exc.problem_mark\n note = ' (line %d, column %d)' % (\n mark.line + 1,\n mark.column + 1,\n )\n raise MkDocsYAMLParseError(\n 'Your mkdocs.yml could not be loaded, '\n 'possibly due to a syntax error{note}'.format(note=note),\n )\n\n def append_conf(self, **__):\n \"\"\"\n Set mkdocs config values.\n\n :raises: ``MkDocsYAMLParseError`` if failed due to known type errors\n (i.e. expecting a list and a string is found).\n \"\"\"\n user_config = self.load_yaml_config()\n\n # Handle custom docs dirs\n user_docs_dir = user_config.get('docs_dir')\n if not isinstance(user_docs_dir, (type(None), str)):\n raise MkDocsYAMLParseError(\n MkDocsYAMLParseError.INVALID_DOCS_DIR_CONFIG,\n )\n\n docs_dir = self.docs_dir(docs_dir=user_docs_dir)\n self.create_index(extension='md')\n user_config['docs_dir'] = docs_dir\n\n # Set mkdocs config values\n static_url = get_absolute_static_url()\n\n for config in ('extra_css', 'extra_javascript'):\n user_value = user_config.get(config, [])\n if not isinstance(user_value, list):\n raise MkDocsYAMLParseError(\n MkDocsYAMLParseError.INVALID_EXTRA_CONFIG.format(\n config=config,\n ),\n )\n\n extra_javascript_list = [\n 'readthedocs-data.js',\n '%score/js/readthedocs-doc-embed.js' % static_url,\n '%sjavascript/readthedocs-analytics.js' % static_url,\n ]\n extra_css_list = [\n '%scss/badge_only.css' % static_url,\n '%scss/readthedocs-doc-embed.css' % static_url,\n ]\n\n # Only add static file if the files are not already in the list\n user_config.setdefault('extra_javascript', []).extend(\n [js for js in extra_javascript_list if js not in user_config.get(\n 'extra_javascript')]\n )\n user_config.setdefault('extra_css', []).extend(\n [css for css in extra_css_list if css not in user_config.get(\n 'extra_css')]\n )\n\n # The docs path is relative to the location\n # of the mkdocs configuration file.\n docs_path = os.path.join(\n os.path.dirname(self.yaml_file),\n docs_dir,\n )\n\n # RTD javascript writing\n rtd_data = self.generate_rtd_data(\n docs_dir=os.path.relpath(docs_path, self.root_path),\n mkdocs_config=user_config,\n )\n with open(os.path.join(docs_path, 'readthedocs-data.js'), 'w') as f:\n f.write(rtd_data)\n\n # Use Read the Docs' analytics setup rather than mkdocs'\n # This supports using RTD's privacy improvements around analytics\n user_config['google_analytics'] = None\n\n # README: make MkDocs to use ``readthedocs`` theme as default if the\n # user didn't specify a specific theme manually\n if self.project.has_feature(Feature.MKDOCS_THEME_RTD):\n if 'theme' not in user_config:\n # mkdocs<0.17 syntax\n user_config['theme'] = self.DEFAULT_THEME_NAME\n\n # Write the modified mkdocs configuration\n yaml.safe_dump(\n user_config,\n open(self.yaml_file, 'w'),\n )\n\n # Write the mkdocs.yml to the build logs\n self.run(\n 'cat',\n os.path.relpath(self.yaml_file, self.root_path),\n cwd=self.root_path,\n )\n\n def generate_rtd_data(self, docs_dir, mkdocs_config):\n \"\"\"Generate template properties and render readthedocs-data.js.\"\"\"\n # Use the analytics code from mkdocs.yml\n # if it isn't set already by Read the Docs,\n analytics_code = self.version.project.analytics_code\n if not analytics_code and mkdocs_config.get('google_analytics'):\n # http://www.mkdocs.org/user-guide/configuration/#google_analytics\n analytics_code = mkdocs_config['google_analytics'][0]\n\n # Will be available in the JavaScript as READTHEDOCS_DATA.\n readthedocs_data = {\n 'project': self.version.project.slug,\n 'version': self.version.slug,\n 'language': self.version.project.language,\n 'programming_language': self.version.project.programming_language,\n 'page': None,\n 'theme': self.get_theme_name(mkdocs_config),\n 'builder': 'mkdocs',\n 'docroot': docs_dir,\n 'source_suffix': '.md',\n 'api_host': getattr(\n settings,\n 'PUBLIC_API_URL',\n 'https://readthedocs.org',\n ),\n 'ad_free': not self.project.show_advertising,\n 'commit': self.version.project.vcs_repo(self.version.slug).commit,\n 'global_analytics_code': getattr(\n settings,\n 'GLOBAL_ANALYTICS_CODE',\n 'UA-17997319-1',\n ),\n 'user_analytics_code': analytics_code,\n }\n data_json = json.dumps(readthedocs_data, indent=4)\n data_ctx = {\n 'data_json': data_json,\n 'current_version': readthedocs_data['version'],\n 'slug': readthedocs_data['project'],\n 'html_theme': readthedocs_data['theme'],\n 'pagename': None,\n }\n tmpl = template_loader.get_template('doc_builder/data.js.tmpl')\n return tmpl.render(data_ctx)\n\n def build(self):\n checkout_path = self.project.checkout_path(self.version.slug)\n build_command = [\n self.python_env.venv_bin(filename='python'),\n '-m',\n 'mkdocs',\n self.builder,\n '--clean',\n '--site-dir',\n self.build_dir,\n '--config-file',\n self.yaml_file,\n ]\n if self.config.mkdocs.fail_on_warning:\n build_command.append('--strict')\n cmd_ret = self.run(\n *build_command, cwd=checkout_path,\n bin_path=self.python_env.venv_bin()\n )\n return cmd_ret.successful\n\n def get_theme_name(self, mkdocs_config):\n \"\"\"\n Get the theme configuration in the mkdocs_config.\n\n In v0.17.0, the theme configuration switched\n from two separate configs (both optional) to a nested directive.\n\n :see: http://www.mkdocs.org/about/release-notes/#theme-customization-1164\n :returns: the name of the theme RTD will use\n \"\"\"\n theme_setting = mkdocs_config.get('theme')\n if isinstance(theme_setting, dict):\n # Full nested theme config (the new configuration)\n return theme_setting.get('name') or self.DEFAULT_THEME_NAME\n\n if theme_setting:\n # A string which is the name of the theme\n return theme_setting\n\n theme_dir = mkdocs_config.get('theme_dir')\n if theme_dir:\n # Use the name of the directory in this project's custom theme directory\n return theme_dir.rstrip('/').split('/')[-1]\n\n return self.DEFAULT_THEME_NAME\n\n\nclass MkdocsHTML(BaseMkdocs):\n type = 'mkdocs'\n builder = 'build'\n build_dir = '_build/html'\n\n\nclass MkdocsJSON(BaseMkdocs):\n type = 'mkdocs_json'\n builder = 'json'\n build_dir = '_build/json'\n", "path": "readthedocs/doc_builder/backends/mkdocs.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Exceptions raised when building documentation.\"\"\"\n\nfrom django.utils.translation import ugettext_noop\n\n\nclass BuildEnvironmentException(Exception):\n message = None\n status_code = None\n\n def __init__(self, message=None, **kwargs):\n self.status_code = kwargs.pop(\n 'status_code',\n None,\n ) or self.status_code or 1\n message = message or self.get_default_message()\n super().__init__(message, **kwargs)\n\n def get_default_message(self):\n return self.message\n\n\nclass BuildEnvironmentError(BuildEnvironmentException):\n GENERIC_WITH_BUILD_ID = ugettext_noop(\n 'There was a problem with Read the Docs while building your documentation. '\n 'Please try again later. '\n 'However, if this problem persists, '\n 'please report this to us with your build id ({build_id}).',\n )\n\n\nclass BuildEnvironmentCreationFailed(BuildEnvironmentError):\n message = ugettext_noop('Build environment creation failed')\n\n\nclass VersionLockedError(BuildEnvironmentError):\n message = ugettext_noop('Version locked, retrying in 5 minutes.')\n status_code = 423\n\n\nclass ProjectBuildsSkippedError(BuildEnvironmentError):\n message = ugettext_noop('Builds for this project are temporarily disabled')\n\n\nclass YAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem in your project\\'s configuration. {exception}',\n )\n\n\nclass BuildTimeoutError(BuildEnvironmentError):\n message = ugettext_noop('Build exited due to time out')\n\n\nclass BuildEnvironmentWarning(BuildEnvironmentException):\n pass\n\n\nclass MkDocsYAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing MkDocs YAML configuration. {exception}',\n )\n\n INVALID_DOCS_DIR_CONFIG = ugettext_noop(\n 'The \"docs_dir\" config from your MkDocs YAML config file has to be a '\n 'string with relative or absolute path.',\n )\n\n INVALID_EXTRA_CONFIG = ugettext_noop(\n 'The \"{config}\" config from your MkDocs YAML config file has to be a '\n 'a list of relative paths.',\n )\n", "path": "readthedocs/doc_builder/exceptions.py"}], "after_files": [{"content": "\"\"\"\nMkDocs_ backend for building docs.\n\n.. _MkDocs: http://www.mkdocs.org/\n\"\"\"\n\nimport json\nimport logging\nimport os\n\nimport yaml\nfrom django.conf import settings\nfrom django.template import loader as template_loader\n\nfrom readthedocs.doc_builder.base import BaseBuilder\nfrom readthedocs.doc_builder.exceptions import MkDocsYAMLParseError\nfrom readthedocs.projects.models import Feature\n\n\nlog = logging.getLogger(__name__)\n\n\ndef get_absolute_static_url():\n \"\"\"\n Get the fully qualified static URL from settings.\n\n Mkdocs needs a full domain because it tries to link to local files.\n \"\"\"\n static_url = settings.STATIC_URL\n\n if not static_url.startswith('http'):\n domain = getattr(settings, 'PRODUCTION_DOMAIN')\n static_url = 'http://{}{}'.format(domain, static_url)\n\n return static_url\n\n\nclass BaseMkdocs(BaseBuilder):\n\n \"\"\"Mkdocs builder.\"\"\"\n\n # The default theme for mkdocs is the 'mkdocs' theme\n DEFAULT_THEME_NAME = 'mkdocs'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.yaml_file = self.get_yaml_config()\n self.old_artifact_path = os.path.join(\n os.path.dirname(self.yaml_file),\n self.build_dir,\n )\n self.root_path = self.version.project.checkout_path(self.version.slug)\n\n # README: historically, the default theme was ``readthedocs`` but in\n # https://github.com/rtfd/readthedocs.org/pull/4556 we change it to\n # ``mkdocs`` to maintain the same behavior in Read the Docs than\n # building locally. Although, we can't apply this into the Corporate\n # site. To keep the same default theme there, we created a Feature flag\n # for these project that were building with MkDocs in the Corporate\n # site.\n if self.project.has_feature(Feature.MKDOCS_THEME_RTD):\n self.DEFAULT_THEME_NAME = 'readthedocs'\n log.warning(\n 'Project using readthedocs theme as default for MkDocs: slug=%s',\n self.project.slug,\n )\n else:\n self.DEFAULT_THEME_NAME = 'mkdocs'\n\n def get_yaml_config(self):\n \"\"\"Find the ``mkdocs.yml`` file in the project root.\"\"\"\n mkdoc_path = self.config.mkdocs.configuration\n if not mkdoc_path:\n mkdoc_path = os.path.join(\n self.project.checkout_path(self.version.slug),\n 'mkdocs.yml',\n )\n return mkdoc_path\n\n def load_yaml_config(self):\n \"\"\"\n Load a YAML config.\n\n :raises: ``MkDocsYAMLParseError`` if failed due to syntax errors.\n \"\"\"\n try:\n return yaml.safe_load(open(self.yaml_file, 'r'),)\n except IOError:\n return {\n 'site_name': self.version.project.name,\n }\n except yaml.YAMLError as exc:\n note = ''\n if hasattr(exc, 'problem_mark'):\n mark = exc.problem_mark\n note = ' (line %d, column %d)' % (\n mark.line + 1,\n mark.column + 1,\n )\n raise MkDocsYAMLParseError(\n 'Your mkdocs.yml could not be loaded, '\n 'possibly due to a syntax error{note}'.format(note=note),\n )\n\n def append_conf(self, **__):\n \"\"\"\n Set mkdocs config values.\n\n :raises: ``MkDocsYAMLParseError`` if failed due to known type errors\n (i.e. expecting a list and a string is found).\n \"\"\"\n user_config = self.load_yaml_config()\n\n # Handle custom docs dirs\n user_docs_dir = user_config.get('docs_dir')\n if not isinstance(user_docs_dir, (type(None), str)):\n raise MkDocsYAMLParseError(\n MkDocsYAMLParseError.INVALID_DOCS_DIR_CONFIG,\n )\n\n docs_dir = self.docs_dir(docs_dir=user_docs_dir)\n\n self.create_index(extension='md')\n user_config['docs_dir'] = docs_dir\n\n # Set mkdocs config values\n static_url = get_absolute_static_url()\n\n for config in ('extra_css', 'extra_javascript'):\n user_value = user_config.get(config, [])\n if not isinstance(user_value, list):\n raise MkDocsYAMLParseError(\n MkDocsYAMLParseError.INVALID_EXTRA_CONFIG.format(\n config=config,\n ),\n )\n\n extra_javascript_list = [\n 'readthedocs-data.js',\n '%score/js/readthedocs-doc-embed.js' % static_url,\n '%sjavascript/readthedocs-analytics.js' % static_url,\n ]\n extra_css_list = [\n '%scss/badge_only.css' % static_url,\n '%scss/readthedocs-doc-embed.css' % static_url,\n ]\n\n # Only add static file if the files are not already in the list\n user_config.setdefault('extra_javascript', []).extend(\n [js for js in extra_javascript_list if js not in user_config.get(\n 'extra_javascript')]\n )\n user_config.setdefault('extra_css', []).extend(\n [css for css in extra_css_list if css not in user_config.get(\n 'extra_css')]\n )\n\n # The docs path is relative to the location\n # of the mkdocs configuration file.\n docs_path = os.path.join(\n os.path.dirname(self.yaml_file),\n docs_dir,\n )\n\n # if user puts an invalid `docs_dir` path raise an Exception\n if not os.path.exists(docs_path):\n raise MkDocsYAMLParseError(\n MkDocsYAMLParseError.INVALID_DOCS_DIR_PATH,\n )\n\n # RTD javascript writing\n rtd_data = self.generate_rtd_data(\n docs_dir=os.path.relpath(docs_path, self.root_path),\n mkdocs_config=user_config,\n )\n with open(os.path.join(docs_path, 'readthedocs-data.js'), 'w') as f:\n f.write(rtd_data)\n\n # Use Read the Docs' analytics setup rather than mkdocs'\n # This supports using RTD's privacy improvements around analytics\n user_config['google_analytics'] = None\n\n # README: make MkDocs to use ``readthedocs`` theme as default if the\n # user didn't specify a specific theme manually\n if self.project.has_feature(Feature.MKDOCS_THEME_RTD):\n if 'theme' not in user_config:\n # mkdocs<0.17 syntax\n user_config['theme'] = self.DEFAULT_THEME_NAME\n\n # Write the modified mkdocs configuration\n yaml.safe_dump(\n user_config,\n open(self.yaml_file, 'w'),\n )\n\n # Write the mkdocs.yml to the build logs\n self.run(\n 'cat',\n os.path.relpath(self.yaml_file, self.root_path),\n cwd=self.root_path,\n )\n\n def generate_rtd_data(self, docs_dir, mkdocs_config):\n \"\"\"Generate template properties and render readthedocs-data.js.\"\"\"\n # Use the analytics code from mkdocs.yml\n # if it isn't set already by Read the Docs,\n analytics_code = self.version.project.analytics_code\n if not analytics_code and mkdocs_config.get('google_analytics'):\n # http://www.mkdocs.org/user-guide/configuration/#google_analytics\n analytics_code = mkdocs_config['google_analytics'][0]\n\n # Will be available in the JavaScript as READTHEDOCS_DATA.\n readthedocs_data = {\n 'project': self.version.project.slug,\n 'version': self.version.slug,\n 'language': self.version.project.language,\n 'programming_language': self.version.project.programming_language,\n 'page': None,\n 'theme': self.get_theme_name(mkdocs_config),\n 'builder': 'mkdocs',\n 'docroot': docs_dir,\n 'source_suffix': '.md',\n 'api_host': getattr(\n settings,\n 'PUBLIC_API_URL',\n 'https://readthedocs.org',\n ),\n 'ad_free': not self.project.show_advertising,\n 'commit': self.version.project.vcs_repo(self.version.slug).commit,\n 'global_analytics_code': getattr(\n settings,\n 'GLOBAL_ANALYTICS_CODE',\n 'UA-17997319-1',\n ),\n 'user_analytics_code': analytics_code,\n }\n data_json = json.dumps(readthedocs_data, indent=4)\n data_ctx = {\n 'data_json': data_json,\n 'current_version': readthedocs_data['version'],\n 'slug': readthedocs_data['project'],\n 'html_theme': readthedocs_data['theme'],\n 'pagename': None,\n }\n tmpl = template_loader.get_template('doc_builder/data.js.tmpl')\n return tmpl.render(data_ctx)\n\n def build(self):\n checkout_path = self.project.checkout_path(self.version.slug)\n build_command = [\n self.python_env.venv_bin(filename='python'),\n '-m',\n 'mkdocs',\n self.builder,\n '--clean',\n '--site-dir',\n self.build_dir,\n '--config-file',\n self.yaml_file,\n ]\n if self.config.mkdocs.fail_on_warning:\n build_command.append('--strict')\n cmd_ret = self.run(\n *build_command, cwd=checkout_path,\n bin_path=self.python_env.venv_bin()\n )\n return cmd_ret.successful\n\n def get_theme_name(self, mkdocs_config):\n \"\"\"\n Get the theme configuration in the mkdocs_config.\n\n In v0.17.0, the theme configuration switched\n from two separate configs (both optional) to a nested directive.\n\n :see: http://www.mkdocs.org/about/release-notes/#theme-customization-1164\n :returns: the name of the theme RTD will use\n \"\"\"\n theme_setting = mkdocs_config.get('theme')\n if isinstance(theme_setting, dict):\n # Full nested theme config (the new configuration)\n return theme_setting.get('name') or self.DEFAULT_THEME_NAME\n\n if theme_setting:\n # A string which is the name of the theme\n return theme_setting\n\n theme_dir = mkdocs_config.get('theme_dir')\n if theme_dir:\n # Use the name of the directory in this project's custom theme directory\n return theme_dir.rstrip('/').split('/')[-1]\n\n return self.DEFAULT_THEME_NAME\n\n\nclass MkdocsHTML(BaseMkdocs):\n type = 'mkdocs'\n builder = 'build'\n build_dir = '_build/html'\n\n\nclass MkdocsJSON(BaseMkdocs):\n type = 'mkdocs_json'\n builder = 'json'\n build_dir = '_build/json'\n", "path": "readthedocs/doc_builder/backends/mkdocs.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Exceptions raised when building documentation.\"\"\"\n\nfrom django.utils.translation import ugettext_noop\n\n\nclass BuildEnvironmentException(Exception):\n message = None\n status_code = None\n\n def __init__(self, message=None, **kwargs):\n self.status_code = kwargs.pop(\n 'status_code',\n None,\n ) or self.status_code or 1\n message = message or self.get_default_message()\n super().__init__(message, **kwargs)\n\n def get_default_message(self):\n return self.message\n\n\nclass BuildEnvironmentError(BuildEnvironmentException):\n GENERIC_WITH_BUILD_ID = ugettext_noop(\n 'There was a problem with Read the Docs while building your documentation. '\n 'Please try again later. '\n 'However, if this problem persists, '\n 'please report this to us with your build id ({build_id}).',\n )\n\n\nclass BuildEnvironmentCreationFailed(BuildEnvironmentError):\n message = ugettext_noop('Build environment creation failed')\n\n\nclass VersionLockedError(BuildEnvironmentError):\n message = ugettext_noop('Version locked, retrying in 5 minutes.')\n status_code = 423\n\n\nclass ProjectBuildsSkippedError(BuildEnvironmentError):\n message = ugettext_noop('Builds for this project are temporarily disabled')\n\n\nclass YAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem in your project\\'s configuration. {exception}',\n )\n\n\nclass BuildTimeoutError(BuildEnvironmentError):\n message = ugettext_noop('Build exited due to time out')\n\n\nclass BuildEnvironmentWarning(BuildEnvironmentException):\n pass\n\n\nclass MkDocsYAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing MkDocs YAML configuration. {exception}',\n )\n\n INVALID_DOCS_DIR_CONFIG = ugettext_noop(\n 'The \"docs_dir\" config from your MkDocs YAML config file has to be a '\n 'string with relative or absolute path.',\n )\n\n INVALID_DOCS_DIR_PATH = ugettext_noop(\n 'The \"docs_dir\" config from your MkDocs YAML config file does not '\n 'contain a valid path.',\n )\n\n INVALID_EXTRA_CONFIG = ugettext_noop(\n 'The \"{config}\" config from your MkDocs YAML config file has to be a '\n 'a list of relative paths.',\n )\n", "path": "readthedocs/doc_builder/exceptions.py"}]} | 4,077 | 367 |
gh_patches_debug_4187 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-3151 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
W3CBaggagePropagator lowercasing keys
`opentelemetry.baggage.propagation.W3CBaggagePropagator` is lowercasing keys in `extract`, as far as I can tell [the W2C baggage spec](https://w3c.github.io/baggage/) does not define this behavior.
I also confirmed the go implementation does not lowercase keys: https://go.dev/play/p/9H4HKKmSRMA
I think just removing the `.lower()` here should fix it: https://github.com/open-telemetry/opentelemetry-python/blob/main/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py#L96
**Steps to reproduce**
```python
from opentelemetry import baggage
from opentelemetry.baggage.propagation import W3CBaggagePropagator
carrier = {}
propagator = W3CBaggagePropagator()
ctx = baggage.set_baggage('userId', '1')
print(ctx)
propagator.inject(carrier, ctx)
print(carrier)
print(propagator.extract(carrier))
```
**What is the expected behavior?**
```
{'baggage-ce2af44a-cf4d-4151-88a5-349b34d2c353': {'userId': '1'}}
{'baggage': 'userId=1'}
{'baggage-ce2af44a-cf4d-4151-88a5-349b34d2c353': {'userId': '1'}}
```
**What is the actual behavior?**
```
{'baggage-ce2af44a-cf4d-4151-88a5-349b34d2c353': {'userId': '1'}}
{'baggage': 'userId=1'}
{'baggage-ce2af44a-cf4d-4151-88a5-349b34d2c353': {'userid': '1'}} # note lowercased key
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from logging import getLogger
16 from re import split
17 from typing import Iterable, List, Mapping, Optional, Set
18 from urllib.parse import quote_plus, unquote_plus
19
20 from opentelemetry.baggage import _is_valid_pair, get_all, set_baggage
21 from opentelemetry.context import get_current
22 from opentelemetry.context.context import Context
23 from opentelemetry.propagators import textmap
24 from opentelemetry.util.re import _DELIMITER_PATTERN
25
26 _logger = getLogger(__name__)
27
28
29 class W3CBaggagePropagator(textmap.TextMapPropagator):
30 """Extracts and injects Baggage which is used to annotate telemetry."""
31
32 _MAX_HEADER_LENGTH = 8192
33 _MAX_PAIR_LENGTH = 4096
34 _MAX_PAIRS = 180
35 _BAGGAGE_HEADER_NAME = "baggage"
36
37 def extract(
38 self,
39 carrier: textmap.CarrierT,
40 context: Optional[Context] = None,
41 getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter,
42 ) -> Context:
43 """Extract Baggage from the carrier.
44
45 See
46 `opentelemetry.propagators.textmap.TextMapPropagator.extract`
47 """
48
49 if context is None:
50 context = get_current()
51
52 header = _extract_first_element(
53 getter.get(carrier, self._BAGGAGE_HEADER_NAME)
54 )
55
56 if not header:
57 return context
58
59 if len(header) > self._MAX_HEADER_LENGTH:
60 _logger.warning(
61 "Baggage header `%s` exceeded the maximum number of bytes per baggage-string",
62 header,
63 )
64 return context
65
66 baggage_entries: List[str] = split(_DELIMITER_PATTERN, header)
67 total_baggage_entries = self._MAX_PAIRS
68
69 if len(baggage_entries) > self._MAX_PAIRS:
70 _logger.warning(
71 "Baggage header `%s` exceeded the maximum number of list-members",
72 header,
73 )
74
75 for entry in baggage_entries:
76 if len(entry) > self._MAX_PAIR_LENGTH:
77 _logger.warning(
78 "Baggage entry `%s` exceeded the maximum number of bytes per list-member",
79 entry,
80 )
81 continue
82 if not entry: # empty string
83 continue
84 try:
85 name, value = entry.split("=", 1)
86 except Exception: # pylint: disable=broad-except
87 _logger.warning(
88 "Baggage list-member `%s` doesn't match the format", entry
89 )
90 continue
91
92 if not _is_valid_pair(name, value):
93 _logger.warning("Invalid baggage entry: `%s`", entry)
94 continue
95
96 name = unquote_plus(name).strip().lower()
97 value = unquote_plus(value).strip()
98
99 context = set_baggage(
100 name,
101 value,
102 context=context,
103 )
104 total_baggage_entries -= 1
105 if total_baggage_entries == 0:
106 break
107
108 return context
109
110 def inject(
111 self,
112 carrier: textmap.CarrierT,
113 context: Optional[Context] = None,
114 setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter,
115 ) -> None:
116 """Injects Baggage into the carrier.
117
118 See
119 `opentelemetry.propagators.textmap.TextMapPropagator.inject`
120 """
121 baggage_entries = get_all(context=context)
122 if not baggage_entries:
123 return
124
125 baggage_string = _format_baggage(baggage_entries)
126 setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)
127
128 @property
129 def fields(self) -> Set[str]:
130 """Returns a set with the fields set in `inject`."""
131 return {self._BAGGAGE_HEADER_NAME}
132
133
134 def _format_baggage(baggage_entries: Mapping[str, object]) -> str:
135 return ",".join(
136 quote_plus(str(key)) + "=" + quote_plus(str(value))
137 for key, value in baggage_entries.items()
138 )
139
140
141 def _extract_first_element(
142 items: Optional[Iterable[textmap.CarrierT]],
143 ) -> Optional[textmap.CarrierT]:
144 if items is None:
145 return None
146 return next(iter(items), None)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
@@ -93,7 +93,7 @@
_logger.warning("Invalid baggage entry: `%s`", entry)
continue
- name = unquote_plus(name).strip().lower()
+ name = unquote_plus(name).strip()
value = unquote_plus(value).strip()
context = set_baggage(
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n@@ -93,7 +93,7 @@\n _logger.warning(\"Invalid baggage entry: `%s`\", entry)\n continue\n \n- name = unquote_plus(name).strip().lower()\n+ name = unquote_plus(name).strip()\n value = unquote_plus(value).strip()\n \n context = set_baggage(\n", "issue": "W3CBaggagePropagator lowercasing keys\n`opentelemetry.baggage.propagation.W3CBaggagePropagator` is lowercasing keys in `extract`, as far as I can tell [the W2C baggage spec](https://w3c.github.io/baggage/) does not define this behavior.\r\n\r\nI also confirmed the go implementation does not lowercase keys: https://go.dev/play/p/9H4HKKmSRMA\r\n\r\nI think just removing the `.lower()` here should fix it: https://github.com/open-telemetry/opentelemetry-python/blob/main/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py#L96\r\n\r\n**Steps to reproduce**\r\n```python\r\nfrom opentelemetry import baggage\r\nfrom opentelemetry.baggage.propagation import W3CBaggagePropagator\r\n\r\ncarrier = {}\r\npropagator = W3CBaggagePropagator()\r\n\r\nctx = baggage.set_baggage('userId', '1')\r\nprint(ctx)\r\n\r\npropagator.inject(carrier, ctx)\r\nprint(carrier)\r\n\r\nprint(propagator.extract(carrier))\r\n```\r\n\r\n**What is the expected behavior?**\r\n```\r\n{'baggage-ce2af44a-cf4d-4151-88a5-349b34d2c353': {'userId': '1'}}\r\n{'baggage': 'userId=1'}\r\n{'baggage-ce2af44a-cf4d-4151-88a5-349b34d2c353': {'userId': '1'}}\r\n```\r\n\r\n**What is the actual behavior?**\r\n```\r\n{'baggage-ce2af44a-cf4d-4151-88a5-349b34d2c353': {'userId': '1'}}\r\n{'baggage': 'userId=1'}\r\n{'baggage-ce2af44a-cf4d-4151-88a5-349b34d2c353': {'userid': '1'}} # note lowercased key\r\n```\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom logging import getLogger\nfrom re import split\nfrom typing import Iterable, List, Mapping, Optional, Set\nfrom urllib.parse import quote_plus, unquote_plus\n\nfrom opentelemetry.baggage import _is_valid_pair, get_all, set_baggage\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators import textmap\nfrom opentelemetry.util.re import _DELIMITER_PATTERN\n\n_logger = getLogger(__name__)\n\n\nclass W3CBaggagePropagator(textmap.TextMapPropagator):\n \"\"\"Extracts and injects Baggage which is used to annotate telemetry.\"\"\"\n\n _MAX_HEADER_LENGTH = 8192\n _MAX_PAIR_LENGTH = 4096\n _MAX_PAIRS = 180\n _BAGGAGE_HEADER_NAME = \"baggage\"\n\n def extract(\n self,\n carrier: textmap.CarrierT,\n context: Optional[Context] = None,\n getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter,\n ) -> Context:\n \"\"\"Extract Baggage from the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n getter.get(carrier, self._BAGGAGE_HEADER_NAME)\n )\n\n if not header:\n return context\n\n if len(header) > self._MAX_HEADER_LENGTH:\n _logger.warning(\n \"Baggage header `%s` exceeded the maximum number of bytes per baggage-string\",\n header,\n )\n return context\n\n baggage_entries: List[str] = split(_DELIMITER_PATTERN, header)\n total_baggage_entries = self._MAX_PAIRS\n\n if len(baggage_entries) > self._MAX_PAIRS:\n _logger.warning(\n \"Baggage header `%s` exceeded the maximum number of list-members\",\n header,\n )\n\n for entry in baggage_entries:\n if len(entry) > self._MAX_PAIR_LENGTH:\n _logger.warning(\n \"Baggage entry `%s` exceeded the maximum number of bytes per list-member\",\n entry,\n )\n continue\n if not entry: # empty string\n continue\n try:\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n _logger.warning(\n \"Baggage list-member `%s` doesn't match the format\", entry\n )\n continue\n\n if not _is_valid_pair(name, value):\n _logger.warning(\"Invalid baggage entry: `%s`\", entry)\n continue\n\n name = unquote_plus(name).strip().lower()\n value = unquote_plus(value).strip()\n\n context = set_baggage(\n name,\n value,\n context=context,\n )\n total_baggage_entries -= 1\n if total_baggage_entries == 0:\n break\n\n return context\n\n def inject(\n self,\n carrier: textmap.CarrierT,\n context: Optional[Context] = None,\n setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter,\n ) -> None:\n \"\"\"Injects Baggage into the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.inject`\n \"\"\"\n baggage_entries = get_all(context=context)\n if not baggage_entries:\n return\n\n baggage_string = _format_baggage(baggage_entries)\n setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)\n\n @property\n def fields(self) -> Set[str]:\n \"\"\"Returns a set with the fields set in `inject`.\"\"\"\n return {self._BAGGAGE_HEADER_NAME}\n\n\ndef _format_baggage(baggage_entries: Mapping[str, object]) -> str:\n return \",\".join(\n quote_plus(str(key)) + \"=\" + quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n\n\ndef _extract_first_element(\n items: Optional[Iterable[textmap.CarrierT]],\n) -> Optional[textmap.CarrierT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom logging import getLogger\nfrom re import split\nfrom typing import Iterable, List, Mapping, Optional, Set\nfrom urllib.parse import quote_plus, unquote_plus\n\nfrom opentelemetry.baggage import _is_valid_pair, get_all, set_baggage\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators import textmap\nfrom opentelemetry.util.re import _DELIMITER_PATTERN\n\n_logger = getLogger(__name__)\n\n\nclass W3CBaggagePropagator(textmap.TextMapPropagator):\n \"\"\"Extracts and injects Baggage which is used to annotate telemetry.\"\"\"\n\n _MAX_HEADER_LENGTH = 8192\n _MAX_PAIR_LENGTH = 4096\n _MAX_PAIRS = 180\n _BAGGAGE_HEADER_NAME = \"baggage\"\n\n def extract(\n self,\n carrier: textmap.CarrierT,\n context: Optional[Context] = None,\n getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter,\n ) -> Context:\n \"\"\"Extract Baggage from the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n getter.get(carrier, self._BAGGAGE_HEADER_NAME)\n )\n\n if not header:\n return context\n\n if len(header) > self._MAX_HEADER_LENGTH:\n _logger.warning(\n \"Baggage header `%s` exceeded the maximum number of bytes per baggage-string\",\n header,\n )\n return context\n\n baggage_entries: List[str] = split(_DELIMITER_PATTERN, header)\n total_baggage_entries = self._MAX_PAIRS\n\n if len(baggage_entries) > self._MAX_PAIRS:\n _logger.warning(\n \"Baggage header `%s` exceeded the maximum number of list-members\",\n header,\n )\n\n for entry in baggage_entries:\n if len(entry) > self._MAX_PAIR_LENGTH:\n _logger.warning(\n \"Baggage entry `%s` exceeded the maximum number of bytes per list-member\",\n entry,\n )\n continue\n if not entry: # empty string\n continue\n try:\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n _logger.warning(\n \"Baggage list-member `%s` doesn't match the format\", entry\n )\n continue\n\n if not _is_valid_pair(name, value):\n _logger.warning(\"Invalid baggage entry: `%s`\", entry)\n continue\n\n name = unquote_plus(name).strip()\n value = unquote_plus(value).strip()\n\n context = set_baggage(\n name,\n value,\n context=context,\n )\n total_baggage_entries -= 1\n if total_baggage_entries == 0:\n break\n\n return context\n\n def inject(\n self,\n carrier: textmap.CarrierT,\n context: Optional[Context] = None,\n setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter,\n ) -> None:\n \"\"\"Injects Baggage into the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.inject`\n \"\"\"\n baggage_entries = get_all(context=context)\n if not baggage_entries:\n return\n\n baggage_string = _format_baggage(baggage_entries)\n setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)\n\n @property\n def fields(self) -> Set[str]:\n \"\"\"Returns a set with the fields set in `inject`.\"\"\"\n return {self._BAGGAGE_HEADER_NAME}\n\n\ndef _format_baggage(baggage_entries: Mapping[str, object]) -> str:\n return \",\".join(\n quote_plus(str(key)) + \"=\" + quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n\n\ndef _extract_first_element(\n items: Optional[Iterable[textmap.CarrierT]],\n) -> Optional[textmap.CarrierT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py"}]} | 2,123 | 156 |
gh_patches_debug_6476 | rasdani/github-patches | git_diff | deepset-ai__haystack-7249 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API Docs - `haystack.dataclasses`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/dataclasses/document.py`
Content:
```
1 import hashlib
2 import io
3 import logging
4 from dataclasses import asdict, dataclass, field, fields
5 from typing import Any, Dict, List, Optional
6
7 from numpy import ndarray
8 from pandas import DataFrame, read_json
9
10 from haystack.dataclasses.byte_stream import ByteStream
11
12 logger = logging.getLogger(__name__)
13
14
15 class _BackwardCompatible(type):
16 """
17 Metaclass that handles Document backward compatibility.
18 """
19
20 def __call__(cls, *args, **kwargs):
21 """
22 Called before Document.__init__, will remap legacy fields to new ones.
23 Also handles building a Document from a flattened dictionary.
24 """
25 # Move `content` to new fields depending on the type
26 content = kwargs.get("content")
27 if isinstance(content, DataFrame):
28 kwargs["dataframe"] = content
29 del kwargs["content"]
30
31 # Not used anymore
32 if "content_type" in kwargs:
33 del kwargs["content_type"]
34
35 # Embedding were stored as NumPy arrays in 1.x, so we convert it to the new type
36 if isinstance(embedding := kwargs.get("embedding"), ndarray):
37 kwargs["embedding"] = embedding.tolist()
38
39 # id_hash_keys is not used anymore
40 if "id_hash_keys" in kwargs:
41 del kwargs["id_hash_keys"]
42
43 return super().__call__(*args, **kwargs)
44
45
46 @dataclass
47 class Document(metaclass=_BackwardCompatible):
48 """
49 Base data class containing some data to be queried.
50
51 Can contain text snippets, tables, and file paths to images or audios. Documents can be sorted by score and saved
52 to/from dictionary and JSON.
53
54 :param id: Unique identifier for the document. When not set, it's generated based on the Document fields' values.
55 :param content: Text of the document, if the document contains text.
56 :param dataframe: Pandas dataframe with the document's content, if the document contains tabular data.
57 :param blob: Binary data associated with the document, if the document has any binary data associated with it.
58 :param meta: Additional custom metadata for the document. Must be JSON-serializable.
59 :param score: Score of the document. Used for ranking, usually assigned by retrievers.
60 :param embedding: Vector representation of the document.
61 """
62
63 id: str = field(default="")
64 content: Optional[str] = field(default=None)
65 dataframe: Optional[DataFrame] = field(default=None)
66 blob: Optional[ByteStream] = field(default=None)
67 meta: Dict[str, Any] = field(default_factory=dict)
68 score: Optional[float] = field(default=None)
69 embedding: Optional[List[float]] = field(default=None)
70
71 def __repr__(self):
72 fields = []
73 if self.content is not None:
74 fields.append(
75 f"content: '{self.content}'" if len(self.content) < 100 else f"content: '{self.content[:100]}...'"
76 )
77 if self.dataframe is not None:
78 fields.append(f"dataframe: {self.dataframe.shape}")
79 if self.blob is not None:
80 fields.append(f"blob: {len(self.blob.data)} bytes")
81 if len(self.meta) > 0:
82 fields.append(f"meta: {self.meta}")
83 if self.score is not None:
84 fields.append(f"score: {self.score}")
85 if self.embedding is not None:
86 fields.append(f"embedding: vector of size {len(self.embedding)}")
87 fields_str = ", ".join(fields)
88 return f"{self.__class__.__name__}(id={self.id}, {fields_str})"
89
90 def __eq__(self, other):
91 """
92 Compares Documents for equality.
93
94 Two Documents are considered equals if their dictionary representation is identical.
95 """
96 if type(self) != type(other):
97 return False
98 return self.to_dict() == other.to_dict()
99
100 def __post_init__(self):
101 """
102 Generate the ID based on the init parameters.
103 """
104 # Generate an id only if not explicitly set
105 self.id = self.id or self._create_id()
106
107 def _create_id(self):
108 """
109 Creates a hash of the given content that acts as the document's ID.
110 """
111 text = self.content or None
112 dataframe = self.dataframe.to_json() if self.dataframe is not None else None
113 blob = self.blob.data if self.blob is not None else None
114 mime_type = self.blob.mime_type if self.blob is not None else None
115 meta = self.meta or {}
116 embedding = self.embedding if self.embedding is not None else None
117 data = f"{text}{dataframe}{blob}{mime_type}{meta}{embedding}"
118 return hashlib.sha256(data.encode("utf-8")).hexdigest()
119
120 def to_dict(self, flatten=True) -> Dict[str, Any]:
121 """
122 Converts Document into a dictionary.
123
124 `dataframe` and `blob` fields are converted to JSON-serializable types.
125
126 :param flatten:
127 Whether to flatten `meta` field or not. Defaults to `True` to be backward-compatible with Haystack 1.x.
128 """
129 data = asdict(self)
130 if (dataframe := data.get("dataframe")) is not None:
131 data["dataframe"] = dataframe.to_json()
132 if (blob := data.get("blob")) is not None:
133 data["blob"] = {"data": list(blob["data"]), "mime_type": blob["mime_type"]}
134
135 if flatten:
136 meta = data.pop("meta")
137 return {**data, **meta}
138
139 return data
140
141 @classmethod
142 def from_dict(cls, data: Dict[str, Any]) -> "Document":
143 """
144 Creates a new Document object from a dictionary.
145
146 NOTE: `dataframe` and `blob` fields are converted to their original types.
147 """
148 if (dataframe := data.get("dataframe")) is not None:
149 data["dataframe"] = read_json(io.StringIO(dataframe))
150 if blob := data.get("blob"):
151 data["blob"] = ByteStream(data=bytes(blob["data"]), mime_type=blob["mime_type"])
152 # Store metadata for a moment while we try un-flattening allegedly flatten metadata.
153 # We don't expect both a `meta=` keyword and flatten metadata keys so we'll raise a
154 # ValueError later if this is the case.
155 meta = data.pop("meta", {})
156 # Unflatten metadata if it was flattened. We assume any keyword argument that's not
157 # a document field is a metadata key. We treat legacy fields as document fields
158 # for backward compatibility.
159 flatten_meta = {}
160 legacy_fields = ["content_type", "id_hash_keys"]
161 document_fields = legacy_fields + [f.name for f in fields(cls)]
162 for key in list(data.keys()):
163 if key not in document_fields:
164 flatten_meta[key] = data.pop(key)
165
166 # We don't support passing both flatten keys and the `meta` keyword parameter
167 if meta and flatten_meta:
168 raise ValueError(
169 "You can pass either the 'meta' parameter or flattened metadata keys as keyword arguments, "
170 "but currently you're passing both. Pass either the 'meta' parameter or flattened metadata keys."
171 )
172
173 # Finally put back all the metadata
174 return cls(**data, meta={**meta, **flatten_meta})
175
176 @property
177 def content_type(self):
178 """
179 Returns the type of the content for the document.
180
181 This is necessary to keep backward compatibility with 1.x.
182
183 :raises ValueError:
184 If both `text` and `dataframe` fields are set or both are missing.
185 """
186 if self.content is not None and self.dataframe is not None:
187 raise ValueError("Both text and dataframe are set.")
188
189 if self.content is not None:
190 return "text"
191 elif self.dataframe is not None:
192 return "table"
193 raise ValueError("Neither text nor dataframe is set.")
194
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/dataclasses/document.py b/haystack/dataclasses/document.py
--- a/haystack/dataclasses/document.py
+++ b/haystack/dataclasses/document.py
@@ -143,7 +143,7 @@
"""
Creates a new Document object from a dictionary.
- NOTE: `dataframe` and `blob` fields are converted to their original types.
+ The `dataframe` and `blob` fields are converted to their original types.
"""
if (dataframe := data.get("dataframe")) is not None:
data["dataframe"] = read_json(io.StringIO(dataframe))
| {"golden_diff": "diff --git a/haystack/dataclasses/document.py b/haystack/dataclasses/document.py\n--- a/haystack/dataclasses/document.py\n+++ b/haystack/dataclasses/document.py\n@@ -143,7 +143,7 @@\n \"\"\"\n Creates a new Document object from a dictionary.\n \n- NOTE: `dataframe` and `blob` fields are converted to their original types.\n+ The `dataframe` and `blob` fields are converted to their original types.\n \"\"\"\n if (dataframe := data.get(\"dataframe\")) is not None:\n data[\"dataframe\"] = read_json(io.StringIO(dataframe))\n", "issue": "API Docs - `haystack.dataclasses`\n\n", "before_files": [{"content": "import hashlib\nimport io\nimport logging\nfrom dataclasses import asdict, dataclass, field, fields\nfrom typing import Any, Dict, List, Optional\n\nfrom numpy import ndarray\nfrom pandas import DataFrame, read_json\n\nfrom haystack.dataclasses.byte_stream import ByteStream\n\nlogger = logging.getLogger(__name__)\n\n\nclass _BackwardCompatible(type):\n \"\"\"\n Metaclass that handles Document backward compatibility.\n \"\"\"\n\n def __call__(cls, *args, **kwargs):\n \"\"\"\n Called before Document.__init__, will remap legacy fields to new ones.\n Also handles building a Document from a flattened dictionary.\n \"\"\"\n # Move `content` to new fields depending on the type\n content = kwargs.get(\"content\")\n if isinstance(content, DataFrame):\n kwargs[\"dataframe\"] = content\n del kwargs[\"content\"]\n\n # Not used anymore\n if \"content_type\" in kwargs:\n del kwargs[\"content_type\"]\n\n # Embedding were stored as NumPy arrays in 1.x, so we convert it to the new type\n if isinstance(embedding := kwargs.get(\"embedding\"), ndarray):\n kwargs[\"embedding\"] = embedding.tolist()\n\n # id_hash_keys is not used anymore\n if \"id_hash_keys\" in kwargs:\n del kwargs[\"id_hash_keys\"]\n\n return super().__call__(*args, **kwargs)\n\n\n@dataclass\nclass Document(metaclass=_BackwardCompatible):\n \"\"\"\n Base data class containing some data to be queried.\n\n Can contain text snippets, tables, and file paths to images or audios. Documents can be sorted by score and saved\n to/from dictionary and JSON.\n\n :param id: Unique identifier for the document. When not set, it's generated based on the Document fields' values.\n :param content: Text of the document, if the document contains text.\n :param dataframe: Pandas dataframe with the document's content, if the document contains tabular data.\n :param blob: Binary data associated with the document, if the document has any binary data associated with it.\n :param meta: Additional custom metadata for the document. Must be JSON-serializable.\n :param score: Score of the document. Used for ranking, usually assigned by retrievers.\n :param embedding: Vector representation of the document.\n \"\"\"\n\n id: str = field(default=\"\")\n content: Optional[str] = field(default=None)\n dataframe: Optional[DataFrame] = field(default=None)\n blob: Optional[ByteStream] = field(default=None)\n meta: Dict[str, Any] = field(default_factory=dict)\n score: Optional[float] = field(default=None)\n embedding: Optional[List[float]] = field(default=None)\n\n def __repr__(self):\n fields = []\n if self.content is not None:\n fields.append(\n f\"content: '{self.content}'\" if len(self.content) < 100 else f\"content: '{self.content[:100]}...'\"\n )\n if self.dataframe is not None:\n fields.append(f\"dataframe: {self.dataframe.shape}\")\n if self.blob is not None:\n fields.append(f\"blob: {len(self.blob.data)} bytes\")\n if len(self.meta) > 0:\n fields.append(f\"meta: {self.meta}\")\n if self.score is not None:\n fields.append(f\"score: {self.score}\")\n if self.embedding is not None:\n fields.append(f\"embedding: vector of size {len(self.embedding)}\")\n fields_str = \", \".join(fields)\n return f\"{self.__class__.__name__}(id={self.id}, {fields_str})\"\n\n def __eq__(self, other):\n \"\"\"\n Compares Documents for equality.\n\n Two Documents are considered equals if their dictionary representation is identical.\n \"\"\"\n if type(self) != type(other):\n return False\n return self.to_dict() == other.to_dict()\n\n def __post_init__(self):\n \"\"\"\n Generate the ID based on the init parameters.\n \"\"\"\n # Generate an id only if not explicitly set\n self.id = self.id or self._create_id()\n\n def _create_id(self):\n \"\"\"\n Creates a hash of the given content that acts as the document's ID.\n \"\"\"\n text = self.content or None\n dataframe = self.dataframe.to_json() if self.dataframe is not None else None\n blob = self.blob.data if self.blob is not None else None\n mime_type = self.blob.mime_type if self.blob is not None else None\n meta = self.meta or {}\n embedding = self.embedding if self.embedding is not None else None\n data = f\"{text}{dataframe}{blob}{mime_type}{meta}{embedding}\"\n return hashlib.sha256(data.encode(\"utf-8\")).hexdigest()\n\n def to_dict(self, flatten=True) -> Dict[str, Any]:\n \"\"\"\n Converts Document into a dictionary.\n\n `dataframe` and `blob` fields are converted to JSON-serializable types.\n\n :param flatten:\n Whether to flatten `meta` field or not. Defaults to `True` to be backward-compatible with Haystack 1.x.\n \"\"\"\n data = asdict(self)\n if (dataframe := data.get(\"dataframe\")) is not None:\n data[\"dataframe\"] = dataframe.to_json()\n if (blob := data.get(\"blob\")) is not None:\n data[\"blob\"] = {\"data\": list(blob[\"data\"]), \"mime_type\": blob[\"mime_type\"]}\n\n if flatten:\n meta = data.pop(\"meta\")\n return {**data, **meta}\n\n return data\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"Document\":\n \"\"\"\n Creates a new Document object from a dictionary.\n\n NOTE: `dataframe` and `blob` fields are converted to their original types.\n \"\"\"\n if (dataframe := data.get(\"dataframe\")) is not None:\n data[\"dataframe\"] = read_json(io.StringIO(dataframe))\n if blob := data.get(\"blob\"):\n data[\"blob\"] = ByteStream(data=bytes(blob[\"data\"]), mime_type=blob[\"mime_type\"])\n # Store metadata for a moment while we try un-flattening allegedly flatten metadata.\n # We don't expect both a `meta=` keyword and flatten metadata keys so we'll raise a\n # ValueError later if this is the case.\n meta = data.pop(\"meta\", {})\n # Unflatten metadata if it was flattened. We assume any keyword argument that's not\n # a document field is a metadata key. We treat legacy fields as document fields\n # for backward compatibility.\n flatten_meta = {}\n legacy_fields = [\"content_type\", \"id_hash_keys\"]\n document_fields = legacy_fields + [f.name for f in fields(cls)]\n for key in list(data.keys()):\n if key not in document_fields:\n flatten_meta[key] = data.pop(key)\n\n # We don't support passing both flatten keys and the `meta` keyword parameter\n if meta and flatten_meta:\n raise ValueError(\n \"You can pass either the 'meta' parameter or flattened metadata keys as keyword arguments, \"\n \"but currently you're passing both. Pass either the 'meta' parameter or flattened metadata keys.\"\n )\n\n # Finally put back all the metadata\n return cls(**data, meta={**meta, **flatten_meta})\n\n @property\n def content_type(self):\n \"\"\"\n Returns the type of the content for the document.\n\n This is necessary to keep backward compatibility with 1.x.\n\n :raises ValueError:\n If both `text` and `dataframe` fields are set or both are missing.\n \"\"\"\n if self.content is not None and self.dataframe is not None:\n raise ValueError(\"Both text and dataframe are set.\")\n\n if self.content is not None:\n return \"text\"\n elif self.dataframe is not None:\n return \"table\"\n raise ValueError(\"Neither text nor dataframe is set.\")\n", "path": "haystack/dataclasses/document.py"}], "after_files": [{"content": "import hashlib\nimport io\nimport logging\nfrom dataclasses import asdict, dataclass, field, fields\nfrom typing import Any, Dict, List, Optional\n\nfrom numpy import ndarray\nfrom pandas import DataFrame, read_json\n\nfrom haystack.dataclasses.byte_stream import ByteStream\n\nlogger = logging.getLogger(__name__)\n\n\nclass _BackwardCompatible(type):\n \"\"\"\n Metaclass that handles Document backward compatibility.\n \"\"\"\n\n def __call__(cls, *args, **kwargs):\n \"\"\"\n Called before Document.__init__, will remap legacy fields to new ones.\n Also handles building a Document from a flattened dictionary.\n \"\"\"\n # Move `content` to new fields depending on the type\n content = kwargs.get(\"content\")\n if isinstance(content, DataFrame):\n kwargs[\"dataframe\"] = content\n del kwargs[\"content\"]\n\n # Not used anymore\n if \"content_type\" in kwargs:\n del kwargs[\"content_type\"]\n\n # Embedding were stored as NumPy arrays in 1.x, so we convert it to the new type\n if isinstance(embedding := kwargs.get(\"embedding\"), ndarray):\n kwargs[\"embedding\"] = embedding.tolist()\n\n # id_hash_keys is not used anymore\n if \"id_hash_keys\" in kwargs:\n del kwargs[\"id_hash_keys\"]\n\n return super().__call__(*args, **kwargs)\n\n\n@dataclass\nclass Document(metaclass=_BackwardCompatible):\n \"\"\"\n Base data class containing some data to be queried.\n\n Can contain text snippets, tables, and file paths to images or audios. Documents can be sorted by score and saved\n to/from dictionary and JSON.\n\n :param id: Unique identifier for the document. When not set, it's generated based on the Document fields' values.\n :param content: Text of the document, if the document contains text.\n :param dataframe: Pandas dataframe with the document's content, if the document contains tabular data.\n :param blob: Binary data associated with the document, if the document has any binary data associated with it.\n :param meta: Additional custom metadata for the document. Must be JSON-serializable.\n :param score: Score of the document. Used for ranking, usually assigned by retrievers.\n :param embedding: Vector representation of the document.\n \"\"\"\n\n id: str = field(default=\"\")\n content: Optional[str] = field(default=None)\n dataframe: Optional[DataFrame] = field(default=None)\n blob: Optional[ByteStream] = field(default=None)\n meta: Dict[str, Any] = field(default_factory=dict)\n score: Optional[float] = field(default=None)\n embedding: Optional[List[float]] = field(default=None)\n\n def __repr__(self):\n fields = []\n if self.content is not None:\n fields.append(\n f\"content: '{self.content}'\" if len(self.content) < 100 else f\"content: '{self.content[:100]}...'\"\n )\n if self.dataframe is not None:\n fields.append(f\"dataframe: {self.dataframe.shape}\")\n if self.blob is not None:\n fields.append(f\"blob: {len(self.blob.data)} bytes\")\n if len(self.meta) > 0:\n fields.append(f\"meta: {self.meta}\")\n if self.score is not None:\n fields.append(f\"score: {self.score}\")\n if self.embedding is not None:\n fields.append(f\"embedding: vector of size {len(self.embedding)}\")\n fields_str = \", \".join(fields)\n return f\"{self.__class__.__name__}(id={self.id}, {fields_str})\"\n\n def __eq__(self, other):\n \"\"\"\n Compares Documents for equality.\n\n Two Documents are considered equals if their dictionary representation is identical.\n \"\"\"\n if type(self) != type(other):\n return False\n return self.to_dict() == other.to_dict()\n\n def __post_init__(self):\n \"\"\"\n Generate the ID based on the init parameters.\n \"\"\"\n # Generate an id only if not explicitly set\n self.id = self.id or self._create_id()\n\n def _create_id(self):\n \"\"\"\n Creates a hash of the given content that acts as the document's ID.\n \"\"\"\n text = self.content or None\n dataframe = self.dataframe.to_json() if self.dataframe is not None else None\n blob = self.blob.data if self.blob is not None else None\n mime_type = self.blob.mime_type if self.blob is not None else None\n meta = self.meta or {}\n embedding = self.embedding if self.embedding is not None else None\n data = f\"{text}{dataframe}{blob}{mime_type}{meta}{embedding}\"\n return hashlib.sha256(data.encode(\"utf-8\")).hexdigest()\n\n def to_dict(self, flatten=True) -> Dict[str, Any]:\n \"\"\"\n Converts Document into a dictionary.\n\n `dataframe` and `blob` fields are converted to JSON-serializable types.\n\n :param flatten:\n Whether to flatten `meta` field or not. Defaults to `True` to be backward-compatible with Haystack 1.x.\n \"\"\"\n data = asdict(self)\n if (dataframe := data.get(\"dataframe\")) is not None:\n data[\"dataframe\"] = dataframe.to_json()\n if (blob := data.get(\"blob\")) is not None:\n data[\"blob\"] = {\"data\": list(blob[\"data\"]), \"mime_type\": blob[\"mime_type\"]}\n\n if flatten:\n meta = data.pop(\"meta\")\n return {**data, **meta}\n\n return data\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"Document\":\n \"\"\"\n Creates a new Document object from a dictionary.\n\n The `dataframe` and `blob` fields are converted to their original types.\n \"\"\"\n if (dataframe := data.get(\"dataframe\")) is not None:\n data[\"dataframe\"] = read_json(io.StringIO(dataframe))\n if blob := data.get(\"blob\"):\n data[\"blob\"] = ByteStream(data=bytes(blob[\"data\"]), mime_type=blob[\"mime_type\"])\n # Store metadata for a moment while we try un-flattening allegedly flatten metadata.\n # We don't expect both a `meta=` keyword and flatten metadata keys so we'll raise a\n # ValueError later if this is the case.\n meta = data.pop(\"meta\", {})\n # Unflatten metadata if it was flattened. We assume any keyword argument that's not\n # a document field is a metadata key. We treat legacy fields as document fields\n # for backward compatibility.\n flatten_meta = {}\n legacy_fields = [\"content_type\", \"id_hash_keys\"]\n document_fields = legacy_fields + [f.name for f in fields(cls)]\n for key in list(data.keys()):\n if key not in document_fields:\n flatten_meta[key] = data.pop(key)\n\n # We don't support passing both flatten keys and the `meta` keyword parameter\n if meta and flatten_meta:\n raise ValueError(\n \"You can pass either the 'meta' parameter or flattened metadata keys as keyword arguments, \"\n \"but currently you're passing both. Pass either the 'meta' parameter or flattened metadata keys.\"\n )\n\n # Finally put back all the metadata\n return cls(**data, meta={**meta, **flatten_meta})\n\n @property\n def content_type(self):\n \"\"\"\n Returns the type of the content for the document.\n\n This is necessary to keep backward compatibility with 1.x.\n\n :raises ValueError:\n If both `text` and `dataframe` fields are set or both are missing.\n \"\"\"\n if self.content is not None and self.dataframe is not None:\n raise ValueError(\"Both text and dataframe are set.\")\n\n if self.content is not None:\n return \"text\"\n elif self.dataframe is not None:\n return \"table\"\n raise ValueError(\"Neither text nor dataframe is set.\")\n", "path": "haystack/dataclasses/document.py"}]} | 2,451 | 139 |
gh_patches_debug_8719 | rasdani/github-patches | git_diff | streamlit__streamlit-4202 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow click > 8
Hello,
I'm using streamlit into an environment where other packages depend on click > 8.0.
I see that there is a mention that it won't work, however from my testing it does work.
https://github.com/streamlit/streamlit/blob/develop/lib/Pipfile#L45
Can someone pinpoint what APIs they think are incompatible?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/__main__.py`
Content:
```
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sys
16 from streamlit.cli import main
17
18 if __name__ == "__main__":
19 # So that the Streamlit server sees the same command line string
20 # whether streamlit is called directly or via `python -m streamlit`.
21 sys.argv[0] = "streamlit"
22
23 main()
24
```
Path: `lib/streamlit/cli.py`
Content:
```
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """This is a script which is run when the Streamlit package is executed."""
16
17 from streamlit import config as _config
18
19 import os
20 from typing import Optional
21
22 import click
23
24 import streamlit
25 from streamlit.credentials import Credentials, check_credentials
26 import streamlit.bootstrap as bootstrap
27 from streamlit.case_converters import to_snake_case
28
29 ACCEPTED_FILE_EXTENSIONS = ("py", "py3")
30
31 LOG_LEVELS = ("error", "warning", "info", "debug")
32
33
34 def _convert_config_option_to_click_option(config_option):
35 """Composes given config option options as options for click lib."""
36 option = "--{}".format(config_option.key)
37 param = config_option.key.replace(".", "_")
38 description = config_option.description
39 if config_option.deprecated:
40 description += "\n {} - {}".format(
41 config_option.deprecation_text, config_option.expiration_date
42 )
43 envvar = "STREAMLIT_{}".format(to_snake_case(param).upper())
44
45 return {
46 "param": param,
47 "description": description,
48 "type": config_option.type,
49 "option": option,
50 "envvar": envvar,
51 }
52
53
54 def configurator_options(func):
55 """Decorator that adds config param keys to click dynamically."""
56 for _, value in reversed(_config._config_options_template.items()):
57 parsed_parameter = _convert_config_option_to_click_option(value)
58 config_option = click.option(
59 parsed_parameter["option"],
60 parsed_parameter["param"],
61 help=parsed_parameter["description"],
62 type=parsed_parameter["type"],
63 show_envvar=True,
64 envvar=parsed_parameter["envvar"],
65 )
66 func = config_option(func)
67 return func
68
69
70 # Fetch remote file at url_path to script_path
71 def _download_remote(script_path, url_path):
72 import requests
73
74 with open(script_path, "wb") as fp:
75 try:
76 resp = requests.get(url_path)
77 resp.raise_for_status()
78 fp.write(resp.content)
79 except requests.exceptions.RequestException as e:
80 raise click.BadParameter(("Unable to fetch {}.\n{}".format(url_path, e)))
81
82
83 @click.group(context_settings={"auto_envvar_prefix": "STREAMLIT"})
84 @click.option("--log_level", show_default=True, type=click.Choice(LOG_LEVELS))
85 @click.version_option(prog_name="Streamlit")
86 @click.pass_context
87 def main(ctx, log_level="info"):
88 """Try out a demo with:
89
90 $ streamlit hello
91
92 Or use the line below to run your own script:
93
94 $ streamlit run your_script.py
95 """
96
97 if log_level:
98 import streamlit.logger
99
100 streamlit.logger.set_log_level(log_level.upper())
101
102
103 @main.command("help")
104 @click.pass_context
105 def help(ctx):
106 """Print this help message."""
107 # Pretend user typed 'streamlit --help' instead of 'streamlit help'.
108 import sys
109
110 assert len(sys.argv) == 2 # This is always true, but let's assert anyway.
111 sys.argv[1] = "--help"
112 main()
113
114
115 @main.command("version")
116 @click.pass_context
117 def main_version(ctx):
118 """Print Streamlit's version number."""
119 # Pretend user typed 'streamlit --version' instead of 'streamlit version'
120 import sys
121
122 assert len(sys.argv) == 2 # This is always true, but let's assert anyway.
123 sys.argv[1] = "--version"
124 main()
125
126
127 @main.command("docs")
128 def main_docs():
129 """Show help in browser."""
130 print("Showing help page in browser...")
131 from streamlit import util
132
133 util.open_browser("https://docs.streamlit.io")
134
135
136 @main.command("hello")
137 @configurator_options
138 def main_hello(**kwargs):
139 """Runs the Hello World script."""
140 from streamlit.hello import hello
141
142 bootstrap.load_config_options(flag_options=kwargs)
143 filename = hello.__file__
144 _main_run(filename, flag_options=kwargs)
145
146
147 @main.command("run")
148 @configurator_options
149 @click.argument("target", required=True, envvar="STREAMLIT_RUN_TARGET")
150 @click.argument("args", nargs=-1)
151 def main_run(target, args=None, **kwargs):
152 """Run a Python script, piping stderr to Streamlit.
153
154 The script can be local or it can be an url. In the latter case, Streamlit
155 will download the script to a temporary file and runs this file.
156
157 """
158 from validators import url
159
160 bootstrap.load_config_options(flag_options=kwargs)
161
162 _, extension = os.path.splitext(target)
163 if extension[1:] not in ACCEPTED_FILE_EXTENSIONS:
164 if extension[1:] == "":
165 raise click.BadArgumentUsage(
166 "Streamlit requires raw Python (.py) files, but the provided file has no extension.\nFor more information, please see https://docs.streamlit.io"
167 )
168 else:
169 raise click.BadArgumentUsage(
170 "Streamlit requires raw Python (.py) files, not %s.\nFor more information, please see https://docs.streamlit.io"
171 % extension
172 )
173
174 if url(target):
175 from streamlit.temporary_directory import TemporaryDirectory
176
177 with TemporaryDirectory() as temp_dir:
178 from urllib.parse import urlparse
179 from streamlit import url_util
180
181 path = urlparse(target).path
182 script_path = os.path.join(temp_dir, path.strip("/").rsplit("/", 1)[-1])
183 # if this is a GitHub/Gist blob url, convert to a raw URL first.
184 target = url_util.process_gitblob_url(target)
185 _download_remote(script_path, target)
186 _main_run(script_path, args, flag_options=kwargs)
187 else:
188 if not os.path.exists(target):
189 raise click.BadParameter("File does not exist: {}".format(target))
190 _main_run(target, args, flag_options=kwargs)
191
192
193 def _get_command_line_as_string() -> Optional[str]:
194 import subprocess
195
196 parent = click.get_current_context().parent
197 if parent is None:
198 return None
199 cmd_line_as_list = [parent.command_path]
200 cmd_line_as_list.extend(click.get_os_args())
201 return subprocess.list2cmdline(cmd_line_as_list)
202
203
204 def _main_run(file, args=None, flag_options=None):
205 if args is None:
206 args = []
207
208 if flag_options is None:
209 flag_options = {}
210
211 command_line = _get_command_line_as_string()
212
213 # Set a global flag indicating that we're "within" streamlit.
214 streamlit._is_running_with_streamlit = True
215
216 check_credentials()
217
218 bootstrap.run(file, command_line, args, flag_options)
219
220
221 # SUBCOMMAND: cache
222
223
224 @main.group("cache")
225 def cache():
226 """Manage the Streamlit cache."""
227 pass
228
229
230 @cache.command("clear")
231 def cache_clear():
232 """Clear st.cache, st.memo, and st.singleton caches."""
233 import streamlit.legacy_caching
234 import streamlit.caching
235
236 result = streamlit.legacy_caching.clear_cache()
237 cache_path = streamlit.legacy_caching.get_cache_path()
238 if result:
239 print("Cleared directory %s." % cache_path)
240 else:
241 print("Nothing to clear at %s." % cache_path)
242
243 streamlit.caching.clear_memo_cache()
244 streamlit.caching.clear_singleton_cache()
245
246
247 # SUBCOMMAND: config
248
249
250 @main.group("config")
251 def config():
252 """Manage Streamlit's config settings."""
253 pass
254
255
256 @config.command("show")
257 @configurator_options
258 def config_show(**kwargs):
259 """Show all of Streamlit's config settings."""
260
261 bootstrap.load_config_options(flag_options=kwargs)
262
263 _config.show_config()
264
265
266 # SUBCOMMAND: activate
267
268
269 @main.group("activate", invoke_without_command=True)
270 @click.pass_context
271 def activate(ctx):
272 """Activate Streamlit by entering your email."""
273 if not ctx.invoked_subcommand:
274 Credentials.get_current().activate()
275
276
277 @activate.command("reset")
278 def activate_reset():
279 """Reset Activation Credentials."""
280 Credentials.get_current().reset()
281
282
283 if __name__ == "__main__":
284 main()
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/__main__.py b/lib/streamlit/__main__.py
--- a/lib/streamlit/__main__.py
+++ b/lib/streamlit/__main__.py
@@ -20,4 +20,4 @@
# whether streamlit is called directly or via `python -m streamlit`.
sys.argv[0] = "streamlit"
- main()
+ main(prog_name="streamlit")
diff --git a/lib/streamlit/cli.py b/lib/streamlit/cli.py
--- a/lib/streamlit/cli.py
+++ b/lib/streamlit/cli.py
@@ -109,7 +109,7 @@
assert len(sys.argv) == 2 # This is always true, but let's assert anyway.
sys.argv[1] = "--help"
- main()
+ main(prog_name="streamlit")
@main.command("version")
| {"golden_diff": "diff --git a/lib/streamlit/__main__.py b/lib/streamlit/__main__.py\n--- a/lib/streamlit/__main__.py\n+++ b/lib/streamlit/__main__.py\n@@ -20,4 +20,4 @@\n # whether streamlit is called directly or via `python -m streamlit`.\n sys.argv[0] = \"streamlit\"\n \n- main()\n+ main(prog_name=\"streamlit\")\ndiff --git a/lib/streamlit/cli.py b/lib/streamlit/cli.py\n--- a/lib/streamlit/cli.py\n+++ b/lib/streamlit/cli.py\n@@ -109,7 +109,7 @@\n \n assert len(sys.argv) == 2 # This is always true, but let's assert anyway.\n sys.argv[1] = \"--help\"\n- main()\n+ main(prog_name=\"streamlit\")\n \n \n @main.command(\"version\")\n", "issue": "allow click > 8\nHello,\r\n\r\nI'm using streamlit into an environment where other packages depend on click > 8.0.\r\nI see that there is a mention that it won't work, however from my testing it does work.\r\nhttps://github.com/streamlit/streamlit/blob/develop/lib/Pipfile#L45\r\n\r\nCan someone pinpoint what APIs they think are incompatible?\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom streamlit.cli import main\n\nif __name__ == \"__main__\":\n # So that the Streamlit server sees the same command line string\n # whether streamlit is called directly or via `python -m streamlit`.\n sys.argv[0] = \"streamlit\"\n\n main()\n", "path": "lib/streamlit/__main__.py"}, {"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is a script which is run when the Streamlit package is executed.\"\"\"\n\nfrom streamlit import config as _config\n\nimport os\nfrom typing import Optional\n\nimport click\n\nimport streamlit\nfrom streamlit.credentials import Credentials, check_credentials\nimport streamlit.bootstrap as bootstrap\nfrom streamlit.case_converters import to_snake_case\n\nACCEPTED_FILE_EXTENSIONS = (\"py\", \"py3\")\n\nLOG_LEVELS = (\"error\", \"warning\", \"info\", \"debug\")\n\n\ndef _convert_config_option_to_click_option(config_option):\n \"\"\"Composes given config option options as options for click lib.\"\"\"\n option = \"--{}\".format(config_option.key)\n param = config_option.key.replace(\".\", \"_\")\n description = config_option.description\n if config_option.deprecated:\n description += \"\\n {} - {}\".format(\n config_option.deprecation_text, config_option.expiration_date\n )\n envvar = \"STREAMLIT_{}\".format(to_snake_case(param).upper())\n\n return {\n \"param\": param,\n \"description\": description,\n \"type\": config_option.type,\n \"option\": option,\n \"envvar\": envvar,\n }\n\n\ndef configurator_options(func):\n \"\"\"Decorator that adds config param keys to click dynamically.\"\"\"\n for _, value in reversed(_config._config_options_template.items()):\n parsed_parameter = _convert_config_option_to_click_option(value)\n config_option = click.option(\n parsed_parameter[\"option\"],\n parsed_parameter[\"param\"],\n help=parsed_parameter[\"description\"],\n type=parsed_parameter[\"type\"],\n show_envvar=True,\n envvar=parsed_parameter[\"envvar\"],\n )\n func = config_option(func)\n return func\n\n\n# Fetch remote file at url_path to script_path\ndef _download_remote(script_path, url_path):\n import requests\n\n with open(script_path, \"wb\") as fp:\n try:\n resp = requests.get(url_path)\n resp.raise_for_status()\n fp.write(resp.content)\n except requests.exceptions.RequestException as e:\n raise click.BadParameter((\"Unable to fetch {}.\\n{}\".format(url_path, e)))\n\n\[email protected](context_settings={\"auto_envvar_prefix\": \"STREAMLIT\"})\[email protected](\"--log_level\", show_default=True, type=click.Choice(LOG_LEVELS))\[email protected]_option(prog_name=\"Streamlit\")\[email protected]_context\ndef main(ctx, log_level=\"info\"):\n \"\"\"Try out a demo with:\n\n $ streamlit hello\n\n Or use the line below to run your own script:\n\n $ streamlit run your_script.py\n \"\"\"\n\n if log_level:\n import streamlit.logger\n\n streamlit.logger.set_log_level(log_level.upper())\n\n\[email protected](\"help\")\[email protected]_context\ndef help(ctx):\n \"\"\"Print this help message.\"\"\"\n # Pretend user typed 'streamlit --help' instead of 'streamlit help'.\n import sys\n\n assert len(sys.argv) == 2 # This is always true, but let's assert anyway.\n sys.argv[1] = \"--help\"\n main()\n\n\[email protected](\"version\")\[email protected]_context\ndef main_version(ctx):\n \"\"\"Print Streamlit's version number.\"\"\"\n # Pretend user typed 'streamlit --version' instead of 'streamlit version'\n import sys\n\n assert len(sys.argv) == 2 # This is always true, but let's assert anyway.\n sys.argv[1] = \"--version\"\n main()\n\n\[email protected](\"docs\")\ndef main_docs():\n \"\"\"Show help in browser.\"\"\"\n print(\"Showing help page in browser...\")\n from streamlit import util\n\n util.open_browser(\"https://docs.streamlit.io\")\n\n\[email protected](\"hello\")\n@configurator_options\ndef main_hello(**kwargs):\n \"\"\"Runs the Hello World script.\"\"\"\n from streamlit.hello import hello\n\n bootstrap.load_config_options(flag_options=kwargs)\n filename = hello.__file__\n _main_run(filename, flag_options=kwargs)\n\n\[email protected](\"run\")\n@configurator_options\[email protected](\"target\", required=True, envvar=\"STREAMLIT_RUN_TARGET\")\[email protected](\"args\", nargs=-1)\ndef main_run(target, args=None, **kwargs):\n \"\"\"Run a Python script, piping stderr to Streamlit.\n\n The script can be local or it can be an url. In the latter case, Streamlit\n will download the script to a temporary file and runs this file.\n\n \"\"\"\n from validators import url\n\n bootstrap.load_config_options(flag_options=kwargs)\n\n _, extension = os.path.splitext(target)\n if extension[1:] not in ACCEPTED_FILE_EXTENSIONS:\n if extension[1:] == \"\":\n raise click.BadArgumentUsage(\n \"Streamlit requires raw Python (.py) files, but the provided file has no extension.\\nFor more information, please see https://docs.streamlit.io\"\n )\n else:\n raise click.BadArgumentUsage(\n \"Streamlit requires raw Python (.py) files, not %s.\\nFor more information, please see https://docs.streamlit.io\"\n % extension\n )\n\n if url(target):\n from streamlit.temporary_directory import TemporaryDirectory\n\n with TemporaryDirectory() as temp_dir:\n from urllib.parse import urlparse\n from streamlit import url_util\n\n path = urlparse(target).path\n script_path = os.path.join(temp_dir, path.strip(\"/\").rsplit(\"/\", 1)[-1])\n # if this is a GitHub/Gist blob url, convert to a raw URL first.\n target = url_util.process_gitblob_url(target)\n _download_remote(script_path, target)\n _main_run(script_path, args, flag_options=kwargs)\n else:\n if not os.path.exists(target):\n raise click.BadParameter(\"File does not exist: {}\".format(target))\n _main_run(target, args, flag_options=kwargs)\n\n\ndef _get_command_line_as_string() -> Optional[str]:\n import subprocess\n\n parent = click.get_current_context().parent\n if parent is None:\n return None\n cmd_line_as_list = [parent.command_path]\n cmd_line_as_list.extend(click.get_os_args())\n return subprocess.list2cmdline(cmd_line_as_list)\n\n\ndef _main_run(file, args=None, flag_options=None):\n if args is None:\n args = []\n\n if flag_options is None:\n flag_options = {}\n\n command_line = _get_command_line_as_string()\n\n # Set a global flag indicating that we're \"within\" streamlit.\n streamlit._is_running_with_streamlit = True\n\n check_credentials()\n\n bootstrap.run(file, command_line, args, flag_options)\n\n\n# SUBCOMMAND: cache\n\n\[email protected](\"cache\")\ndef cache():\n \"\"\"Manage the Streamlit cache.\"\"\"\n pass\n\n\[email protected](\"clear\")\ndef cache_clear():\n \"\"\"Clear st.cache, st.memo, and st.singleton caches.\"\"\"\n import streamlit.legacy_caching\n import streamlit.caching\n\n result = streamlit.legacy_caching.clear_cache()\n cache_path = streamlit.legacy_caching.get_cache_path()\n if result:\n print(\"Cleared directory %s.\" % cache_path)\n else:\n print(\"Nothing to clear at %s.\" % cache_path)\n\n streamlit.caching.clear_memo_cache()\n streamlit.caching.clear_singleton_cache()\n\n\n# SUBCOMMAND: config\n\n\[email protected](\"config\")\ndef config():\n \"\"\"Manage Streamlit's config settings.\"\"\"\n pass\n\n\[email protected](\"show\")\n@configurator_options\ndef config_show(**kwargs):\n \"\"\"Show all of Streamlit's config settings.\"\"\"\n\n bootstrap.load_config_options(flag_options=kwargs)\n\n _config.show_config()\n\n\n# SUBCOMMAND: activate\n\n\[email protected](\"activate\", invoke_without_command=True)\[email protected]_context\ndef activate(ctx):\n \"\"\"Activate Streamlit by entering your email.\"\"\"\n if not ctx.invoked_subcommand:\n Credentials.get_current().activate()\n\n\[email protected](\"reset\")\ndef activate_reset():\n \"\"\"Reset Activation Credentials.\"\"\"\n Credentials.get_current().reset()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "lib/streamlit/cli.py"}], "after_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom streamlit.cli import main\n\nif __name__ == \"__main__\":\n # So that the Streamlit server sees the same command line string\n # whether streamlit is called directly or via `python -m streamlit`.\n sys.argv[0] = \"streamlit\"\n\n main(prog_name=\"streamlit\")\n", "path": "lib/streamlit/__main__.py"}, {"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This is a script which is run when the Streamlit package is executed.\"\"\"\n\nfrom streamlit import config as _config\n\nimport os\nfrom typing import Optional\n\nimport click\n\nimport streamlit\nfrom streamlit.credentials import Credentials, check_credentials\nimport streamlit.bootstrap as bootstrap\nfrom streamlit.case_converters import to_snake_case\n\nACCEPTED_FILE_EXTENSIONS = (\"py\", \"py3\")\n\nLOG_LEVELS = (\"error\", \"warning\", \"info\", \"debug\")\n\n\ndef _convert_config_option_to_click_option(config_option):\n \"\"\"Composes given config option options as options for click lib.\"\"\"\n option = \"--{}\".format(config_option.key)\n param = config_option.key.replace(\".\", \"_\")\n description = config_option.description\n if config_option.deprecated:\n description += \"\\n {} - {}\".format(\n config_option.deprecation_text, config_option.expiration_date\n )\n envvar = \"STREAMLIT_{}\".format(to_snake_case(param).upper())\n\n return {\n \"param\": param,\n \"description\": description,\n \"type\": config_option.type,\n \"option\": option,\n \"envvar\": envvar,\n }\n\n\ndef configurator_options(func):\n \"\"\"Decorator that adds config param keys to click dynamically.\"\"\"\n for _, value in reversed(_config._config_options_template.items()):\n parsed_parameter = _convert_config_option_to_click_option(value)\n config_option = click.option(\n parsed_parameter[\"option\"],\n parsed_parameter[\"param\"],\n help=parsed_parameter[\"description\"],\n type=parsed_parameter[\"type\"],\n show_envvar=True,\n envvar=parsed_parameter[\"envvar\"],\n )\n func = config_option(func)\n return func\n\n\n# Fetch remote file at url_path to script_path\ndef _download_remote(script_path, url_path):\n import requests\n\n with open(script_path, \"wb\") as fp:\n try:\n resp = requests.get(url_path)\n resp.raise_for_status()\n fp.write(resp.content)\n except requests.exceptions.RequestException as e:\n raise click.BadParameter((\"Unable to fetch {}.\\n{}\".format(url_path, e)))\n\n\[email protected](context_settings={\"auto_envvar_prefix\": \"STREAMLIT\"})\[email protected](\"--log_level\", show_default=True, type=click.Choice(LOG_LEVELS))\[email protected]_option(prog_name=\"Streamlit\")\[email protected]_context\ndef main(ctx, log_level=\"info\"):\n \"\"\"Try out a demo with:\n\n $ streamlit hello\n\n Or use the line below to run your own script:\n\n $ streamlit run your_script.py\n \"\"\"\n\n if log_level:\n import streamlit.logger\n\n streamlit.logger.set_log_level(log_level.upper())\n\n\[email protected](\"help\")\[email protected]_context\ndef help(ctx):\n \"\"\"Print this help message.\"\"\"\n # Pretend user typed 'streamlit --help' instead of 'streamlit help'.\n import sys\n\n assert len(sys.argv) == 2 # This is always true, but let's assert anyway.\n sys.argv[1] = \"--help\"\n main(prog_name=\"streamlit\")\n\n\[email protected](\"version\")\[email protected]_context\ndef main_version(ctx):\n \"\"\"Print Streamlit's version number.\"\"\"\n # Pretend user typed 'streamlit --version' instead of 'streamlit version'\n import sys\n\n assert len(sys.argv) == 2 # This is always true, but let's assert anyway.\n sys.argv[1] = \"--version\"\n main()\n\n\[email protected](\"docs\")\ndef main_docs():\n \"\"\"Show help in browser.\"\"\"\n print(\"Showing help page in browser...\")\n from streamlit import util\n\n util.open_browser(\"https://docs.streamlit.io\")\n\n\[email protected](\"hello\")\n@configurator_options\ndef main_hello(**kwargs):\n \"\"\"Runs the Hello World script.\"\"\"\n from streamlit.hello import hello\n\n bootstrap.load_config_options(flag_options=kwargs)\n filename = hello.__file__\n _main_run(filename, flag_options=kwargs)\n\n\[email protected](\"run\")\n@configurator_options\[email protected](\"target\", required=True, envvar=\"STREAMLIT_RUN_TARGET\")\[email protected](\"args\", nargs=-1)\ndef main_run(target, args=None, **kwargs):\n \"\"\"Run a Python script, piping stderr to Streamlit.\n\n The script can be local or it can be an url. In the latter case, Streamlit\n will download the script to a temporary file and runs this file.\n\n \"\"\"\n from validators import url\n\n bootstrap.load_config_options(flag_options=kwargs)\n\n _, extension = os.path.splitext(target)\n if extension[1:] not in ACCEPTED_FILE_EXTENSIONS:\n if extension[1:] == \"\":\n raise click.BadArgumentUsage(\n \"Streamlit requires raw Python (.py) files, but the provided file has no extension.\\nFor more information, please see https://docs.streamlit.io\"\n )\n else:\n raise click.BadArgumentUsage(\n \"Streamlit requires raw Python (.py) files, not %s.\\nFor more information, please see https://docs.streamlit.io\"\n % extension\n )\n\n if url(target):\n from streamlit.temporary_directory import TemporaryDirectory\n\n with TemporaryDirectory() as temp_dir:\n from urllib.parse import urlparse\n from streamlit import url_util\n\n path = urlparse(target).path\n script_path = os.path.join(temp_dir, path.strip(\"/\").rsplit(\"/\", 1)[-1])\n # if this is a GitHub/Gist blob url, convert to a raw URL first.\n target = url_util.process_gitblob_url(target)\n _download_remote(script_path, target)\n _main_run(script_path, args, flag_options=kwargs)\n else:\n if not os.path.exists(target):\n raise click.BadParameter(\"File does not exist: {}\".format(target))\n _main_run(target, args, flag_options=kwargs)\n\n\ndef _get_command_line_as_string() -> Optional[str]:\n import subprocess\n\n parent = click.get_current_context().parent\n if parent is None:\n return None\n cmd_line_as_list = [parent.command_path]\n cmd_line_as_list.extend(click.get_os_args())\n return subprocess.list2cmdline(cmd_line_as_list)\n\n\ndef _main_run(file, args=None, flag_options=None):\n if args is None:\n args = []\n\n if flag_options is None:\n flag_options = {}\n\n command_line = _get_command_line_as_string()\n\n # Set a global flag indicating that we're \"within\" streamlit.\n streamlit._is_running_with_streamlit = True\n\n check_credentials()\n\n bootstrap.run(file, command_line, args, flag_options)\n\n\n# SUBCOMMAND: cache\n\n\[email protected](\"cache\")\ndef cache():\n \"\"\"Manage the Streamlit cache.\"\"\"\n pass\n\n\[email protected](\"clear\")\ndef cache_clear():\n \"\"\"Clear st.cache, st.memo, and st.singleton caches.\"\"\"\n import streamlit.legacy_caching\n import streamlit.caching\n\n result = streamlit.legacy_caching.clear_cache()\n cache_path = streamlit.legacy_caching.get_cache_path()\n if result:\n print(\"Cleared directory %s.\" % cache_path)\n else:\n print(\"Nothing to clear at %s.\" % cache_path)\n\n streamlit.caching.clear_memo_cache()\n streamlit.caching.clear_singleton_cache()\n\n\n# SUBCOMMAND: config\n\n\[email protected](\"config\")\ndef config():\n \"\"\"Manage Streamlit's config settings.\"\"\"\n pass\n\n\[email protected](\"show\")\n@configurator_options\ndef config_show(**kwargs):\n \"\"\"Show all of Streamlit's config settings.\"\"\"\n\n bootstrap.load_config_options(flag_options=kwargs)\n\n _config.show_config()\n\n\n# SUBCOMMAND: activate\n\n\[email protected](\"activate\", invoke_without_command=True)\[email protected]_context\ndef activate(ctx):\n \"\"\"Activate Streamlit by entering your email.\"\"\"\n if not ctx.invoked_subcommand:\n Credentials.get_current().activate()\n\n\[email protected](\"reset\")\ndef activate_reset():\n \"\"\"Reset Activation Credentials.\"\"\"\n Credentials.get_current().reset()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "lib/streamlit/cli.py"}]} | 3,243 | 193 |
gh_patches_debug_34447 | rasdani/github-patches | git_diff | fonttools__fonttools-1361 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ignore length of the last 'kern' subtable"
I noticed this warning with ttx'ing Open Sans Bold; the other styles in Open Sans don't have this issue:
``` sh
$ git clone https://github.com/google/fonts.git ;
$ ttx fonts/apache/opensans/OpenSans-Bold.ttf ;
Dumping "OpenSans-Bold.ttf" to "OpenSans-Bold.ttx"...
Dumping 'GlyphOrder' table...
Dumping 'head' table...
Dumping 'hhea' table...
Dumping 'maxp' table...
Dumping 'OS/2' table...
Dumping 'hmtx' table...
Dumping 'cmap' table...
Dumping 'fpgm' table...
Dumping 'prep' table...
Dumping 'cvt ' table...
Dumping 'loca' table...
Dumping 'glyf' table...
/Library/Python/2.7/site-packages/fonttools-2.5-py2.7.egg/fontTools/ttLib/tables/_k_e_r_n.py:121: UserWarning: excess data in 'kern' subtable: 46628 bytes
Dumping 'kern' table...
Dumping 'name' table...
Dumping 'post' table...
Dumping 'gasp' table...
Dumping 'GDEF' table...
Dumping 'GPOS' table...
Dumping 'GSUB' table...
Dumping 'DSIG' table...
$ ttx OpenSans-Bold.ttx ;
$ ls -l OpenSans-Bold.ttf OpenSans-Bold#1.ttf ;
-rw-r--r-- 1 user group 158,720 Jul 1 20:53 OpenSans-Bold#1.ttf
-rw-r--r-- 1 user group 224,592 Jan 15 16:28 OpenSans-Bold.ttf
```
Since the data doesn't round trip, I guess its a bug in Open Sans, but before reporting this to the developer, I thought I'd file an issue to better understand this error :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Lib/fontTools/ttLib/tables/_k_e_r_n.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2 from fontTools.misc.py23 import *
3 from fontTools.ttLib import getSearchRange
4 from fontTools.misc.textTools import safeEval, readHex
5 from fontTools.misc.fixedTools import (
6 fixedToFloat as fi2fl,
7 floatToFixed as fl2fi)
8 from . import DefaultTable
9 import struct
10 import sys
11 import array
12 import logging
13
14
15 log = logging.getLogger(__name__)
16
17
18 class table__k_e_r_n(DefaultTable.DefaultTable):
19
20 def getkern(self, format):
21 for subtable in self.kernTables:
22 if subtable.format == format:
23 return subtable
24 return None # not found
25
26 def decompile(self, data, ttFont):
27 version, nTables = struct.unpack(">HH", data[:4])
28 apple = False
29 if (len(data) >= 8) and (version == 1):
30 # AAT Apple's "new" format. Hm.
31 version, nTables = struct.unpack(">LL", data[:8])
32 self.version = fi2fl(version, 16)
33 data = data[8:]
34 apple = True
35 else:
36 self.version = version
37 data = data[4:]
38 self.kernTables = []
39 for i in range(nTables):
40 if self.version == 1.0:
41 # Apple
42 length, coverage, subtableFormat = struct.unpack(
43 ">LBB", data[:6])
44 else:
45 # in OpenType spec the "version" field refers to the common
46 # subtable header; the actual subtable format is stored in
47 # the 8-15 mask bits of "coverage" field.
48 # This "version" is always 0 so we ignore it here
49 _, length, subtableFormat, coverage = struct.unpack(
50 ">HHBB", data[:6])
51 if subtableFormat not in kern_classes:
52 subtable = KernTable_format_unkown(subtableFormat)
53 else:
54 subtable = kern_classes[subtableFormat](apple)
55 subtable.decompile(data[:length], ttFont)
56 self.kernTables.append(subtable)
57 data = data[length:]
58
59 def compile(self, ttFont):
60 if hasattr(self, "kernTables"):
61 nTables = len(self.kernTables)
62 else:
63 nTables = 0
64 if self.version == 1.0:
65 # AAT Apple's "new" format.
66 data = struct.pack(">LL", fl2fi(self.version, 16), nTables)
67 else:
68 data = struct.pack(">HH", self.version, nTables)
69 if hasattr(self, "kernTables"):
70 for subtable in self.kernTables:
71 data = data + subtable.compile(ttFont)
72 return data
73
74 def toXML(self, writer, ttFont):
75 writer.simpletag("version", value=self.version)
76 writer.newline()
77 for subtable in self.kernTables:
78 subtable.toXML(writer, ttFont)
79
80 def fromXML(self, name, attrs, content, ttFont):
81 if name == "version":
82 self.version = safeEval(attrs["value"])
83 return
84 if name != "kernsubtable":
85 return
86 if not hasattr(self, "kernTables"):
87 self.kernTables = []
88 format = safeEval(attrs["format"])
89 if format not in kern_classes:
90 subtable = KernTable_format_unkown(format)
91 else:
92 apple = self.version == 1.0
93 subtable = kern_classes[format](apple)
94 self.kernTables.append(subtable)
95 subtable.fromXML(name, attrs, content, ttFont)
96
97
98 class KernTable_format_0(object):
99
100 # 'version' is kept for backward compatibility
101 version = format = 0
102
103 def __init__(self, apple=False):
104 self.apple = apple
105
106 def decompile(self, data, ttFont):
107 if not self.apple:
108 version, length, subtableFormat, coverage = struct.unpack(
109 ">HHBB", data[:6])
110 if version != 0:
111 from fontTools.ttLib import TTLibError
112 raise TTLibError(
113 "unsupported kern subtable version: %d" % version)
114 tupleIndex = None
115 # Should we also assert length == len(data)?
116 data = data[6:]
117 else:
118 length, coverage, subtableFormat, tupleIndex = struct.unpack(
119 ">LBBH", data[:8])
120 data = data[8:]
121 assert self.format == subtableFormat, "unsupported format"
122 self.coverage = coverage
123 self.tupleIndex = tupleIndex
124
125 self.kernTable = kernTable = {}
126
127 nPairs, searchRange, entrySelector, rangeShift = struct.unpack(
128 ">HHHH", data[:8])
129 data = data[8:]
130
131 nPairs = min(nPairs, len(data) // 6)
132 datas = array.array("H", data[:6 * nPairs])
133 if sys.byteorder != "big": datas.byteswap()
134 it = iter(datas)
135 glyphOrder = ttFont.getGlyphOrder()
136 for k in range(nPairs):
137 left, right, value = next(it), next(it), next(it)
138 if value >= 32768:
139 value -= 65536
140 try:
141 kernTable[(glyphOrder[left], glyphOrder[right])] = value
142 except IndexError:
143 # Slower, but will not throw an IndexError on an invalid
144 # glyph id.
145 kernTable[(
146 ttFont.getGlyphName(left),
147 ttFont.getGlyphName(right))] = value
148 if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
149 log.warning(
150 "excess data in 'kern' subtable: %d bytes",
151 len(data) - 6 * nPairs)
152
153 def compile(self, ttFont):
154 nPairs = len(self.kernTable)
155 searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
156 data = struct.pack(
157 ">HHHH", nPairs, searchRange, entrySelector, rangeShift)
158
159 # yeehee! (I mean, turn names into indices)
160 try:
161 reverseOrder = ttFont.getReverseGlyphMap()
162 kernTable = sorted(
163 (reverseOrder[left], reverseOrder[right], value)
164 for ((left, right), value) in self.kernTable.items())
165 except KeyError:
166 # Slower, but will not throw KeyError on invalid glyph id.
167 getGlyphID = ttFont.getGlyphID
168 kernTable = sorted(
169 (getGlyphID(left), getGlyphID(right), value)
170 for ((left, right), value) in self.kernTable.items())
171
172 for left, right, value in kernTable:
173 data = data + struct.pack(">HHh", left, right, value)
174
175 if not self.apple:
176 version = 0
177 length = len(data) + 6
178 header = struct.pack(
179 ">HHBB", version, length, self.format, self.coverage)
180 else:
181 if self.tupleIndex is None:
182 # sensible default when compiling a TTX from an old fonttools
183 # or when inserting a Windows-style format 0 subtable into an
184 # Apple version=1.0 kern table
185 log.warning("'tupleIndex' is None; default to 0")
186 self.tupleIndex = 0
187 length = len(data) + 8
188 header = struct.pack(
189 ">LBBH", length, self.coverage, self.format, self.tupleIndex)
190 return header + data
191
192 def toXML(self, writer, ttFont):
193 attrs = dict(coverage=self.coverage, format=self.format)
194 if self.apple:
195 if self.tupleIndex is None:
196 log.warning("'tupleIndex' is None; default to 0")
197 attrs["tupleIndex"] = 0
198 else:
199 attrs["tupleIndex"] = self.tupleIndex
200 writer.begintag("kernsubtable", **attrs)
201 writer.newline()
202 items = sorted(self.kernTable.items())
203 for (left, right), value in items:
204 writer.simpletag("pair", [
205 ("l", left),
206 ("r", right),
207 ("v", value)
208 ])
209 writer.newline()
210 writer.endtag("kernsubtable")
211 writer.newline()
212
213 def fromXML(self, name, attrs, content, ttFont):
214 self.coverage = safeEval(attrs["coverage"])
215 subtableFormat = safeEval(attrs["format"])
216 if self.apple:
217 if "tupleIndex" in attrs:
218 self.tupleIndex = safeEval(attrs["tupleIndex"])
219 else:
220 # previous fontTools versions didn't export tupleIndex
221 log.warning(
222 "Apple kern subtable is missing 'tupleIndex' attribute")
223 self.tupleIndex = None
224 else:
225 self.tupleIndex = None
226 assert subtableFormat == self.format, "unsupported format"
227 if not hasattr(self, "kernTable"):
228 self.kernTable = {}
229 for element in content:
230 if not isinstance(element, tuple):
231 continue
232 name, attrs, content = element
233 self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
234
235 def __getitem__(self, pair):
236 return self.kernTable[pair]
237
238 def __setitem__(self, pair, value):
239 self.kernTable[pair] = value
240
241 def __delitem__(self, pair):
242 del self.kernTable[pair]
243
244
245 class KernTable_format_unkown(object):
246
247 def __init__(self, format):
248 self.format = format
249
250 def decompile(self, data, ttFont):
251 self.data = data
252
253 def compile(self, ttFont):
254 return self.data
255
256 def toXML(self, writer, ttFont):
257 writer.begintag("kernsubtable", format=self.format)
258 writer.newline()
259 writer.comment("unknown 'kern' subtable format")
260 writer.newline()
261 writer.dumphex(self.data)
262 writer.endtag("kernsubtable")
263 writer.newline()
264
265 def fromXML(self, name, attrs, content, ttFont):
266 self.decompile(readHex(content), ttFont)
267
268
269 kern_classes = {0: KernTable_format_0}
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Lib/fontTools/ttLib/tables/_k_e_r_n.py b/Lib/fontTools/ttLib/tables/_k_e_r_n.py
--- a/Lib/fontTools/ttLib/tables/_k_e_r_n.py
+++ b/Lib/fontTools/ttLib/tables/_k_e_r_n.py
@@ -48,6 +48,19 @@
# This "version" is always 0 so we ignore it here
_, length, subtableFormat, coverage = struct.unpack(
">HHBB", data[:6])
+ if nTables == 1 and subtableFormat == 0:
+ # The "length" value is ignored since some fonts
+ # (like OpenSans and Calibri) have a subtable larger than
+ # its value.
+ nPairs, = struct.unpack(">H", data[6:8])
+ calculated_length = (nPairs * 6) + 14
+ if length != calculated_length:
+ log.warning(
+ "'kern' subtable longer than defined: "
+ "%d bytes instead of %d bytes" %
+ (calculated_length, length)
+ )
+ length = calculated_length
if subtableFormat not in kern_classes:
subtable = KernTable_format_unkown(subtableFormat)
else:
@@ -128,7 +141,6 @@
">HHHH", data[:8])
data = data[8:]
- nPairs = min(nPairs, len(data) // 6)
datas = array.array("H", data[:6 * nPairs])
if sys.byteorder != "big": datas.byteswap()
it = iter(datas)
@@ -153,6 +165,7 @@
def compile(self, ttFont):
nPairs = len(self.kernTable)
searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
+ searchRange &= 0xFFFF
data = struct.pack(
">HHHH", nPairs, searchRange, entrySelector, rangeShift)
@@ -175,6 +188,10 @@
if not self.apple:
version = 0
length = len(data) + 6
+ if length >= 0x10000:
+ log.warning('"kern" subtable overflow, '
+ 'truncating length value while preserving pairs.')
+ length &= 0xFFFF
header = struct.pack(
">HHBB", version, length, self.format, self.coverage)
else:
| {"golden_diff": "diff --git a/Lib/fontTools/ttLib/tables/_k_e_r_n.py b/Lib/fontTools/ttLib/tables/_k_e_r_n.py\n--- a/Lib/fontTools/ttLib/tables/_k_e_r_n.py\n+++ b/Lib/fontTools/ttLib/tables/_k_e_r_n.py\n@@ -48,6 +48,19 @@\n \t\t\t\t# This \"version\" is always 0 so we ignore it here\n \t\t\t\t_, length, subtableFormat, coverage = struct.unpack(\n \t\t\t\t\t\">HHBB\", data[:6])\n+\t\t\t\tif nTables == 1 and subtableFormat == 0:\n+\t\t\t\t\t# The \"length\" value is ignored since some fonts\n+\t\t\t\t\t# (like OpenSans and Calibri) have a subtable larger than\n+\t\t\t\t\t# its value.\n+\t\t\t\t\tnPairs, = struct.unpack(\">H\", data[6:8])\n+\t\t\t\t\tcalculated_length = (nPairs * 6) + 14\n+\t\t\t\t\tif length != calculated_length:\n+\t\t\t\t\t\tlog.warning(\n+\t\t\t\t\t\t\t\"'kern' subtable longer than defined: \"\n+\t\t\t\t\t\t\t\"%d bytes instead of %d bytes\" %\n+\t\t\t\t\t\t\t(calculated_length, length)\n+\t\t\t\t\t\t)\n+\t\t\t\t\tlength = calculated_length\n \t\t\tif subtableFormat not in kern_classes:\n \t\t\t\tsubtable = KernTable_format_unkown(subtableFormat)\n \t\t\telse:\n@@ -128,7 +141,6 @@\n \t\t\t\">HHHH\", data[:8])\n \t\tdata = data[8:]\n \n-\t\tnPairs = min(nPairs, len(data) // 6)\n \t\tdatas = array.array(\"H\", data[:6 * nPairs])\n \t\tif sys.byteorder != \"big\": datas.byteswap()\n \t\tit = iter(datas)\n@@ -153,6 +165,7 @@\n \tdef compile(self, ttFont):\n \t\tnPairs = len(self.kernTable)\n \t\tsearchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)\n+\t\tsearchRange &= 0xFFFF\n \t\tdata = struct.pack(\n \t\t\t\">HHHH\", nPairs, searchRange, entrySelector, rangeShift)\n \n@@ -175,6 +188,10 @@\n \t\tif not self.apple:\n \t\t\tversion = 0\n \t\t\tlength = len(data) + 6\n+\t\t\tif length >= 0x10000:\n+\t\t\t\tlog.warning('\"kern\" subtable overflow, '\n+\t\t\t\t\t\t\t'truncating length value while preserving pairs.')\n+\t\t\t\tlength &= 0xFFFF\n \t\t\theader = struct.pack(\n \t\t\t\t\">HHBB\", version, length, self.format, self.coverage)\n \t\telse:\n", "issue": "ignore length of the last 'kern' subtable\"\nI noticed this warning with ttx'ing Open Sans Bold; the other styles in Open Sans don't have this issue:\n\n``` sh\n$ git clone https://github.com/google/fonts.git ; \n$ ttx fonts/apache/opensans/OpenSans-Bold.ttf ;\nDumping \"OpenSans-Bold.ttf\" to \"OpenSans-Bold.ttx\"...\nDumping 'GlyphOrder' table...\nDumping 'head' table...\nDumping 'hhea' table...\nDumping 'maxp' table...\nDumping 'OS/2' table...\nDumping 'hmtx' table...\nDumping 'cmap' table...\nDumping 'fpgm' table...\nDumping 'prep' table...\nDumping 'cvt ' table...\nDumping 'loca' table...\nDumping 'glyf' table...\n/Library/Python/2.7/site-packages/fonttools-2.5-py2.7.egg/fontTools/ttLib/tables/_k_e_r_n.py:121: UserWarning: excess data in 'kern' subtable: 46628 bytes\nDumping 'kern' table...\nDumping 'name' table...\nDumping 'post' table...\nDumping 'gasp' table...\nDumping 'GDEF' table...\nDumping 'GPOS' table...\nDumping 'GSUB' table...\nDumping 'DSIG' table...\n$ ttx OpenSans-Bold.ttx ;\n$ ls -l OpenSans-Bold.ttf OpenSans-Bold#1.ttf ;\n-rw-r--r-- 1 user group 158,720 Jul 1 20:53 OpenSans-Bold#1.ttf\n-rw-r--r-- 1 user group 224,592 Jan 15 16:28 OpenSans-Bold.ttf\n```\n\nSince the data doesn't round trip, I guess its a bug in Open Sans, but before reporting this to the developer, I thought I'd file an issue to better understand this error :)\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools.ttLib import getSearchRange\nfrom fontTools.misc.textTools import safeEval, readHex\nfrom fontTools.misc.fixedTools import (\n\tfixedToFloat as fi2fl,\n\tfloatToFixed as fl2fi)\nfrom . import DefaultTable\nimport struct\nimport sys\nimport array\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\n\nclass table__k_e_r_n(DefaultTable.DefaultTable):\n\n\tdef getkern(self, format):\n\t\tfor subtable in self.kernTables:\n\t\t\tif subtable.format == format:\n\t\t\t\treturn subtable\n\t\treturn None # not found\n\n\tdef decompile(self, data, ttFont):\n\t\tversion, nTables = struct.unpack(\">HH\", data[:4])\n\t\tapple = False\n\t\tif (len(data) >= 8) and (version == 1):\n\t\t\t# AAT Apple's \"new\" format. Hm.\n\t\t\tversion, nTables = struct.unpack(\">LL\", data[:8])\n\t\t\tself.version = fi2fl(version, 16)\n\t\t\tdata = data[8:]\n\t\t\tapple = True\n\t\telse:\n\t\t\tself.version = version\n\t\t\tdata = data[4:]\n\t\tself.kernTables = []\n\t\tfor i in range(nTables):\n\t\t\tif self.version == 1.0:\n\t\t\t\t# Apple\n\t\t\t\tlength, coverage, subtableFormat = struct.unpack(\n\t\t\t\t\t\">LBB\", data[:6])\n\t\t\telse:\n\t\t\t\t# in OpenType spec the \"version\" field refers to the common\n\t\t\t\t# subtable header; the actual subtable format is stored in\n\t\t\t\t# the 8-15 mask bits of \"coverage\" field.\n\t\t\t\t# This \"version\" is always 0 so we ignore it here\n\t\t\t\t_, length, subtableFormat, coverage = struct.unpack(\n\t\t\t\t\t\">HHBB\", data[:6])\n\t\t\tif subtableFormat not in kern_classes:\n\t\t\t\tsubtable = KernTable_format_unkown(subtableFormat)\n\t\t\telse:\n\t\t\t\tsubtable = kern_classes[subtableFormat](apple)\n\t\t\tsubtable.decompile(data[:length], ttFont)\n\t\t\tself.kernTables.append(subtable)\n\t\t\tdata = data[length:]\n\n\tdef compile(self, ttFont):\n\t\tif hasattr(self, \"kernTables\"):\n\t\t\tnTables = len(self.kernTables)\n\t\telse:\n\t\t\tnTables = 0\n\t\tif self.version == 1.0:\n\t\t\t# AAT Apple's \"new\" format.\n\t\t\tdata = struct.pack(\">LL\", fl2fi(self.version, 16), nTables)\n\t\telse:\n\t\t\tdata = struct.pack(\">HH\", self.version, nTables)\n\t\tif hasattr(self, \"kernTables\"):\n\t\t\tfor subtable in self.kernTables:\n\t\t\t\tdata = data + subtable.compile(ttFont)\n\t\treturn data\n\n\tdef toXML(self, writer, ttFont):\n\t\twriter.simpletag(\"version\", value=self.version)\n\t\twriter.newline()\n\t\tfor subtable in self.kernTables:\n\t\t\tsubtable.toXML(writer, ttFont)\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif name == \"version\":\n\t\t\tself.version = safeEval(attrs[\"value\"])\n\t\t\treturn\n\t\tif name != \"kernsubtable\":\n\t\t\treturn\n\t\tif not hasattr(self, \"kernTables\"):\n\t\t\tself.kernTables = []\n\t\tformat = safeEval(attrs[\"format\"])\n\t\tif format not in kern_classes:\n\t\t\tsubtable = KernTable_format_unkown(format)\n\t\telse:\n\t\t\tapple = self.version == 1.0\n\t\t\tsubtable = kern_classes[format](apple)\n\t\tself.kernTables.append(subtable)\n\t\tsubtable.fromXML(name, attrs, content, ttFont)\n\n\nclass KernTable_format_0(object):\n\n\t# 'version' is kept for backward compatibility\n\tversion = format = 0\n\n\tdef __init__(self, apple=False):\n\t\tself.apple = apple\n\n\tdef decompile(self, data, ttFont):\n\t\tif not self.apple:\n\t\t\tversion, length, subtableFormat, coverage = struct.unpack(\n\t\t\t\t\">HHBB\", data[:6])\n\t\t\tif version != 0:\n\t\t\t\tfrom fontTools.ttLib import TTLibError\n\t\t\t\traise TTLibError(\n\t\t\t\t\t\"unsupported kern subtable version: %d\" % version)\n\t\t\ttupleIndex = None\n\t\t\t# Should we also assert length == len(data)?\n\t\t\tdata = data[6:]\n\t\telse:\n\t\t\tlength, coverage, subtableFormat, tupleIndex = struct.unpack(\n\t\t\t\t\">LBBH\", data[:8])\n\t\t\tdata = data[8:]\n\t\tassert self.format == subtableFormat, \"unsupported format\"\n\t\tself.coverage = coverage\n\t\tself.tupleIndex = tupleIndex\n\n\t\tself.kernTable = kernTable = {}\n\n\t\tnPairs, searchRange, entrySelector, rangeShift = struct.unpack(\n\t\t\t\">HHHH\", data[:8])\n\t\tdata = data[8:]\n\n\t\tnPairs = min(nPairs, len(data) // 6)\n\t\tdatas = array.array(\"H\", data[:6 * nPairs])\n\t\tif sys.byteorder != \"big\": datas.byteswap()\n\t\tit = iter(datas)\n\t\tglyphOrder = ttFont.getGlyphOrder()\n\t\tfor k in range(nPairs):\n\t\t\tleft, right, value = next(it), next(it), next(it)\n\t\t\tif value >= 32768:\n\t\t\t\tvalue -= 65536\n\t\t\ttry:\n\t\t\t\tkernTable[(glyphOrder[left], glyphOrder[right])] = value\n\t\t\texcept IndexError:\n\t\t\t\t# Slower, but will not throw an IndexError on an invalid\n\t\t\t\t# glyph id.\n\t\t\t\tkernTable[(\n\t\t\t\t\tttFont.getGlyphName(left),\n\t\t\t\t\tttFont.getGlyphName(right))] = value\n\t\tif len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess\n\t\t\tlog.warning(\n\t\t\t\t\"excess data in 'kern' subtable: %d bytes\",\n\t\t\t\tlen(data) - 6 * nPairs)\n\n\tdef compile(self, ttFont):\n\t\tnPairs = len(self.kernTable)\n\t\tsearchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)\n\t\tdata = struct.pack(\n\t\t\t\">HHHH\", nPairs, searchRange, entrySelector, rangeShift)\n\n\t\t# yeehee! (I mean, turn names into indices)\n\t\ttry:\n\t\t\treverseOrder = ttFont.getReverseGlyphMap()\n\t\t\tkernTable = sorted(\n\t\t\t\t(reverseOrder[left], reverseOrder[right], value)\n\t\t\t\tfor ((left, right), value) in self.kernTable.items())\n\t\texcept KeyError:\n\t\t\t# Slower, but will not throw KeyError on invalid glyph id.\n\t\t\tgetGlyphID = ttFont.getGlyphID\n\t\t\tkernTable = sorted(\n\t\t\t\t(getGlyphID(left), getGlyphID(right), value)\n\t\t\t\tfor ((left, right), value) in self.kernTable.items())\n\n\t\tfor left, right, value in kernTable:\n\t\t\tdata = data + struct.pack(\">HHh\", left, right, value)\n\n\t\tif not self.apple:\n\t\t\tversion = 0\n\t\t\tlength = len(data) + 6\n\t\t\theader = struct.pack(\n\t\t\t\t\">HHBB\", version, length, self.format, self.coverage)\n\t\telse:\n\t\t\tif self.tupleIndex is None:\n\t\t\t\t# sensible default when compiling a TTX from an old fonttools\n\t\t\t\t# or when inserting a Windows-style format 0 subtable into an\n\t\t\t\t# Apple version=1.0 kern table\n\t\t\t\tlog.warning(\"'tupleIndex' is None; default to 0\")\n\t\t\t\tself.tupleIndex = 0\n\t\t\tlength = len(data) + 8\n\t\t\theader = struct.pack(\n\t\t\t\t\">LBBH\", length, self.coverage, self.format, self.tupleIndex)\n\t\treturn header + data\n\n\tdef toXML(self, writer, ttFont):\n\t\tattrs = dict(coverage=self.coverage, format=self.format)\n\t\tif self.apple:\n\t\t\tif self.tupleIndex is None:\n\t\t\t\tlog.warning(\"'tupleIndex' is None; default to 0\")\n\t\t\t\tattrs[\"tupleIndex\"] = 0\n\t\t\telse:\n\t\t\t\tattrs[\"tupleIndex\"] = self.tupleIndex\n\t\twriter.begintag(\"kernsubtable\", **attrs)\n\t\twriter.newline()\n\t\titems = sorted(self.kernTable.items())\n\t\tfor (left, right), value in items:\n\t\t\twriter.simpletag(\"pair\", [\n\t\t\t\t(\"l\", left),\n\t\t\t\t(\"r\", right),\n\t\t\t\t(\"v\", value)\n\t\t\t])\n\t\t\twriter.newline()\n\t\twriter.endtag(\"kernsubtable\")\n\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tself.coverage = safeEval(attrs[\"coverage\"])\n\t\tsubtableFormat = safeEval(attrs[\"format\"])\n\t\tif self.apple:\n\t\t\tif \"tupleIndex\" in attrs:\n\t\t\t\tself.tupleIndex = safeEval(attrs[\"tupleIndex\"])\n\t\t\telse:\n\t\t\t\t# previous fontTools versions didn't export tupleIndex\n\t\t\t\tlog.warning(\n\t\t\t\t\t\"Apple kern subtable is missing 'tupleIndex' attribute\")\n\t\t\t\tself.tupleIndex = None\n\t\telse:\n\t\t\tself.tupleIndex = None\n\t\tassert subtableFormat == self.format, \"unsupported format\"\n\t\tif not hasattr(self, \"kernTable\"):\n\t\t\tself.kernTable = {}\n\t\tfor element in content:\n\t\t\tif not isinstance(element, tuple):\n\t\t\t\tcontinue\n\t\t\tname, attrs, content = element\n\t\t\tself.kernTable[(attrs[\"l\"], attrs[\"r\"])] = safeEval(attrs[\"v\"])\n\n\tdef __getitem__(self, pair):\n\t\treturn self.kernTable[pair]\n\n\tdef __setitem__(self, pair, value):\n\t\tself.kernTable[pair] = value\n\n\tdef __delitem__(self, pair):\n\t\tdel self.kernTable[pair]\n\n\nclass KernTable_format_unkown(object):\n\n\tdef __init__(self, format):\n\t\tself.format = format\n\n\tdef decompile(self, data, ttFont):\n\t\tself.data = data\n\n\tdef compile(self, ttFont):\n\t\treturn self.data\n\n\tdef toXML(self, writer, ttFont):\n\t\twriter.begintag(\"kernsubtable\", format=self.format)\n\t\twriter.newline()\n\t\twriter.comment(\"unknown 'kern' subtable format\")\n\t\twriter.newline()\n\t\twriter.dumphex(self.data)\n\t\twriter.endtag(\"kernsubtable\")\n\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tself.decompile(readHex(content), ttFont)\n\n\nkern_classes = {0: KernTable_format_0}\n", "path": "Lib/fontTools/ttLib/tables/_k_e_r_n.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools.ttLib import getSearchRange\nfrom fontTools.misc.textTools import safeEval, readHex\nfrom fontTools.misc.fixedTools import (\n\tfixedToFloat as fi2fl,\n\tfloatToFixed as fl2fi)\nfrom . import DefaultTable\nimport struct\nimport sys\nimport array\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\n\nclass table__k_e_r_n(DefaultTable.DefaultTable):\n\n\tdef getkern(self, format):\n\t\tfor subtable in self.kernTables:\n\t\t\tif subtable.format == format:\n\t\t\t\treturn subtable\n\t\treturn None # not found\n\n\tdef decompile(self, data, ttFont):\n\t\tversion, nTables = struct.unpack(\">HH\", data[:4])\n\t\tapple = False\n\t\tif (len(data) >= 8) and (version == 1):\n\t\t\t# AAT Apple's \"new\" format. Hm.\n\t\t\tversion, nTables = struct.unpack(\">LL\", data[:8])\n\t\t\tself.version = fi2fl(version, 16)\n\t\t\tdata = data[8:]\n\t\t\tapple = True\n\t\telse:\n\t\t\tself.version = version\n\t\t\tdata = data[4:]\n\t\tself.kernTables = []\n\t\tfor i in range(nTables):\n\t\t\tif self.version == 1.0:\n\t\t\t\t# Apple\n\t\t\t\tlength, coverage, subtableFormat = struct.unpack(\n\t\t\t\t\t\">LBB\", data[:6])\n\t\t\telse:\n\t\t\t\t# in OpenType spec the \"version\" field refers to the common\n\t\t\t\t# subtable header; the actual subtable format is stored in\n\t\t\t\t# the 8-15 mask bits of \"coverage\" field.\n\t\t\t\t# This \"version\" is always 0 so we ignore it here\n\t\t\t\t_, length, subtableFormat, coverage = struct.unpack(\n\t\t\t\t\t\">HHBB\", data[:6])\n\t\t\t\tif nTables == 1 and subtableFormat == 0:\n\t\t\t\t\t# The \"length\" value is ignored since some fonts\n\t\t\t\t\t# (like OpenSans and Calibri) have a subtable larger than\n\t\t\t\t\t# its value.\n\t\t\t\t\tnPairs, = struct.unpack(\">H\", data[6:8])\n\t\t\t\t\tcalculated_length = (nPairs * 6) + 14\n\t\t\t\t\tif length != calculated_length:\n\t\t\t\t\t\tlog.warning(\n\t\t\t\t\t\t\t\"'kern' subtable longer than defined: \"\n\t\t\t\t\t\t\t\"%d bytes instead of %d bytes\" %\n\t\t\t\t\t\t\t(calculated_length, length)\n\t\t\t\t\t\t)\n\t\t\t\t\tlength = calculated_length\n\t\t\tif subtableFormat not in kern_classes:\n\t\t\t\tsubtable = KernTable_format_unkown(subtableFormat)\n\t\t\telse:\n\t\t\t\tsubtable = kern_classes[subtableFormat](apple)\n\t\t\tsubtable.decompile(data[:length], ttFont)\n\t\t\tself.kernTables.append(subtable)\n\t\t\tdata = data[length:]\n\n\tdef compile(self, ttFont):\n\t\tif hasattr(self, \"kernTables\"):\n\t\t\tnTables = len(self.kernTables)\n\t\telse:\n\t\t\tnTables = 0\n\t\tif self.version == 1.0:\n\t\t\t# AAT Apple's \"new\" format.\n\t\t\tdata = struct.pack(\">LL\", fl2fi(self.version, 16), nTables)\n\t\telse:\n\t\t\tdata = struct.pack(\">HH\", self.version, nTables)\n\t\tif hasattr(self, \"kernTables\"):\n\t\t\tfor subtable in self.kernTables:\n\t\t\t\tdata = data + subtable.compile(ttFont)\n\t\treturn data\n\n\tdef toXML(self, writer, ttFont):\n\t\twriter.simpletag(\"version\", value=self.version)\n\t\twriter.newline()\n\t\tfor subtable in self.kernTables:\n\t\t\tsubtable.toXML(writer, ttFont)\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif name == \"version\":\n\t\t\tself.version = safeEval(attrs[\"value\"])\n\t\t\treturn\n\t\tif name != \"kernsubtable\":\n\t\t\treturn\n\t\tif not hasattr(self, \"kernTables\"):\n\t\t\tself.kernTables = []\n\t\tformat = safeEval(attrs[\"format\"])\n\t\tif format not in kern_classes:\n\t\t\tsubtable = KernTable_format_unkown(format)\n\t\telse:\n\t\t\tapple = self.version == 1.0\n\t\t\tsubtable = kern_classes[format](apple)\n\t\tself.kernTables.append(subtable)\n\t\tsubtable.fromXML(name, attrs, content, ttFont)\n\n\nclass KernTable_format_0(object):\n\n\t# 'version' is kept for backward compatibility\n\tversion = format = 0\n\n\tdef __init__(self, apple=False):\n\t\tself.apple = apple\n\n\tdef decompile(self, data, ttFont):\n\t\tif not self.apple:\n\t\t\tversion, length, subtableFormat, coverage = struct.unpack(\n\t\t\t\t\">HHBB\", data[:6])\n\t\t\tif version != 0:\n\t\t\t\tfrom fontTools.ttLib import TTLibError\n\t\t\t\traise TTLibError(\n\t\t\t\t\t\"unsupported kern subtable version: %d\" % version)\n\t\t\ttupleIndex = None\n\t\t\t# Should we also assert length == len(data)?\n\t\t\tdata = data[6:]\n\t\telse:\n\t\t\tlength, coverage, subtableFormat, tupleIndex = struct.unpack(\n\t\t\t\t\">LBBH\", data[:8])\n\t\t\tdata = data[8:]\n\t\tassert self.format == subtableFormat, \"unsupported format\"\n\t\tself.coverage = coverage\n\t\tself.tupleIndex = tupleIndex\n\n\t\tself.kernTable = kernTable = {}\n\n\t\tnPairs, searchRange, entrySelector, rangeShift = struct.unpack(\n\t\t\t\">HHHH\", data[:8])\n\t\tdata = data[8:]\n\n\t\tdatas = array.array(\"H\", data[:6 * nPairs])\n\t\tif sys.byteorder != \"big\": datas.byteswap()\n\t\tit = iter(datas)\n\t\tglyphOrder = ttFont.getGlyphOrder()\n\t\tfor k in range(nPairs):\n\t\t\tleft, right, value = next(it), next(it), next(it)\n\t\t\tif value >= 32768:\n\t\t\t\tvalue -= 65536\n\t\t\ttry:\n\t\t\t\tkernTable[(glyphOrder[left], glyphOrder[right])] = value\n\t\t\texcept IndexError:\n\t\t\t\t# Slower, but will not throw an IndexError on an invalid\n\t\t\t\t# glyph id.\n\t\t\t\tkernTable[(\n\t\t\t\t\tttFont.getGlyphName(left),\n\t\t\t\t\tttFont.getGlyphName(right))] = value\n\t\tif len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess\n\t\t\tlog.warning(\n\t\t\t\t\"excess data in 'kern' subtable: %d bytes\",\n\t\t\t\tlen(data) - 6 * nPairs)\n\n\tdef compile(self, ttFont):\n\t\tnPairs = len(self.kernTable)\n\t\tsearchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)\n\t\tsearchRange &= 0xFFFF\n\t\tdata = struct.pack(\n\t\t\t\">HHHH\", nPairs, searchRange, entrySelector, rangeShift)\n\n\t\t# yeehee! (I mean, turn names into indices)\n\t\ttry:\n\t\t\treverseOrder = ttFont.getReverseGlyphMap()\n\t\t\tkernTable = sorted(\n\t\t\t\t(reverseOrder[left], reverseOrder[right], value)\n\t\t\t\tfor ((left, right), value) in self.kernTable.items())\n\t\texcept KeyError:\n\t\t\t# Slower, but will not throw KeyError on invalid glyph id.\n\t\t\tgetGlyphID = ttFont.getGlyphID\n\t\t\tkernTable = sorted(\n\t\t\t\t(getGlyphID(left), getGlyphID(right), value)\n\t\t\t\tfor ((left, right), value) in self.kernTable.items())\n\n\t\tfor left, right, value in kernTable:\n\t\t\tdata = data + struct.pack(\">HHh\", left, right, value)\n\n\t\tif not self.apple:\n\t\t\tversion = 0\n\t\t\tlength = len(data) + 6\n\t\t\tif length >= 0x10000:\n\t\t\t\tlog.warning('\"kern\" subtable overflow, '\n\t\t\t\t\t\t\t'truncating length value while preserving pairs.')\n\t\t\t\tlength &= 0xFFFF\n\t\t\theader = struct.pack(\n\t\t\t\t\">HHBB\", version, length, self.format, self.coverage)\n\t\telse:\n\t\t\tif self.tupleIndex is None:\n\t\t\t\t# sensible default when compiling a TTX from an old fonttools\n\t\t\t\t# or when inserting a Windows-style format 0 subtable into an\n\t\t\t\t# Apple version=1.0 kern table\n\t\t\t\tlog.warning(\"'tupleIndex' is None; default to 0\")\n\t\t\t\tself.tupleIndex = 0\n\t\t\tlength = len(data) + 8\n\t\t\theader = struct.pack(\n\t\t\t\t\">LBBH\", length, self.coverage, self.format, self.tupleIndex)\n\t\treturn header + data\n\n\tdef toXML(self, writer, ttFont):\n\t\tattrs = dict(coverage=self.coverage, format=self.format)\n\t\tif self.apple:\n\t\t\tif self.tupleIndex is None:\n\t\t\t\tlog.warning(\"'tupleIndex' is None; default to 0\")\n\t\t\t\tattrs[\"tupleIndex\"] = 0\n\t\t\telse:\n\t\t\t\tattrs[\"tupleIndex\"] = self.tupleIndex\n\t\twriter.begintag(\"kernsubtable\", **attrs)\n\t\twriter.newline()\n\t\titems = sorted(self.kernTable.items())\n\t\tfor (left, right), value in items:\n\t\t\twriter.simpletag(\"pair\", [\n\t\t\t\t(\"l\", left),\n\t\t\t\t(\"r\", right),\n\t\t\t\t(\"v\", value)\n\t\t\t])\n\t\t\twriter.newline()\n\t\twriter.endtag(\"kernsubtable\")\n\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tself.coverage = safeEval(attrs[\"coverage\"])\n\t\tsubtableFormat = safeEval(attrs[\"format\"])\n\t\tif self.apple:\n\t\t\tif \"tupleIndex\" in attrs:\n\t\t\t\tself.tupleIndex = safeEval(attrs[\"tupleIndex\"])\n\t\t\telse:\n\t\t\t\t# previous fontTools versions didn't export tupleIndex\n\t\t\t\tlog.warning(\n\t\t\t\t\t\"Apple kern subtable is missing 'tupleIndex' attribute\")\n\t\t\t\tself.tupleIndex = None\n\t\telse:\n\t\t\tself.tupleIndex = None\n\t\tassert subtableFormat == self.format, \"unsupported format\"\n\t\tif not hasattr(self, \"kernTable\"):\n\t\t\tself.kernTable = {}\n\t\tfor element in content:\n\t\t\tif not isinstance(element, tuple):\n\t\t\t\tcontinue\n\t\t\tname, attrs, content = element\n\t\t\tself.kernTable[(attrs[\"l\"], attrs[\"r\"])] = safeEval(attrs[\"v\"])\n\n\tdef __getitem__(self, pair):\n\t\treturn self.kernTable[pair]\n\n\tdef __setitem__(self, pair, value):\n\t\tself.kernTable[pair] = value\n\n\tdef __delitem__(self, pair):\n\t\tdel self.kernTable[pair]\n\n\nclass KernTable_format_unkown(object):\n\n\tdef __init__(self, format):\n\t\tself.format = format\n\n\tdef decompile(self, data, ttFont):\n\t\tself.data = data\n\n\tdef compile(self, ttFont):\n\t\treturn self.data\n\n\tdef toXML(self, writer, ttFont):\n\t\twriter.begintag(\"kernsubtable\", format=self.format)\n\t\twriter.newline()\n\t\twriter.comment(\"unknown 'kern' subtable format\")\n\t\twriter.newline()\n\t\twriter.dumphex(self.data)\n\t\twriter.endtag(\"kernsubtable\")\n\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tself.decompile(readHex(content), ttFont)\n\n\nkern_classes = {0: KernTable_format_0}\n", "path": "Lib/fontTools/ttLib/tables/_k_e_r_n.py"}]} | 3,734 | 584 |
gh_patches_debug_26078 | rasdani/github-patches | git_diff | jazzband__pip-tools-491 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update a specific package does not work.
Update a specific package --upgrade-package or -P flag does not work.
##### Environment Versions
1. MacOS Sierra
2. Python version: 3.5.3
3. pip version: 9.0.1
4. pip-tools version: 1.8.0
##### Steps to replicate
1.
_req.in_
```
jinja2
markupsafe
werkzeug
```
_req.txt_
```
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile --output-file requirements/req.txt requirements/req.in
#
jinja2==2.7.3
markupsafe==0.23
werkzeug==0.6
```
2. Trying to update only jinja2: `pip-compile -P jinja2 requirements/req.in`
##### Expected result
_req.txt_
```
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile --output-file requirements/req.txt requirements/req.in
#
jinja2==2.9.5
markupsafe==0.23
werkzeug==0.6
```
##### Actual result
_req.txt_
```
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile --output-file requirements/req.txt requirements/req.in
#
jinja2==2.9.5
markupsafe==0.23
werkzeug==0.11.15
```
As you can see, werkzeug was updated as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/scripts/compile.py`
Content:
```
1 # coding: utf-8
2 from __future__ import (absolute_import, division, print_function,
3 unicode_literals)
4
5 import optparse
6 import os
7 import sys
8 import tempfile
9
10 import pip
11 from pip.req import InstallRequirement, parse_requirements
12
13 from .. import click
14 from ..exceptions import PipToolsError
15 from ..logging import log
16 from ..repositories import LocalRequirementsRepository, PyPIRepository
17 from ..resolver import Resolver
18 from ..utils import (assert_compatible_pip_version, is_pinned_requirement,
19 key_from_req, dedup)
20 from ..writer import OutputWriter
21
22 # Make sure we're using a compatible version of pip
23 assert_compatible_pip_version()
24
25 DEFAULT_REQUIREMENTS_FILE = 'requirements.in'
26
27
28 class PipCommand(pip.basecommand.Command):
29 name = 'PipCommand'
30
31
32 @click.command()
33 @click.version_option()
34 @click.option('-v', '--verbose', is_flag=True, help="Show more output")
35 @click.option('-n', '--dry-run', is_flag=True, help="Only show what would happen, don't change anything")
36 @click.option('-p', '--pre', is_flag=True, default=None, help="Allow resolving to prereleases (default is not)")
37 @click.option('-r', '--rebuild', is_flag=True, help="Clear any caches upfront, rebuild from scratch")
38 @click.option('-f', '--find-links', multiple=True, help="Look for archives in this directory or on this HTML page", envvar='PIP_FIND_LINKS') # noqa
39 @click.option('-i', '--index-url', help="Change index URL (defaults to PyPI)", envvar='PIP_INDEX_URL')
40 @click.option('--extra-index-url', multiple=True, help="Add additional index URL to search", envvar='PIP_EXTRA_INDEX_URL') # noqa
41 @click.option('--client-cert', help="Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.") # noqa
42 @click.option('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',
43 help="Mark this host as trusted, even though it does not have "
44 "valid or any HTTPS.")
45 @click.option('--header/--no-header', is_flag=True, default=True,
46 help="Add header to generated file")
47 @click.option('--index/--no-index', is_flag=True, default=True,
48 help="Add index URL to generated file")
49 @click.option('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,
50 default=True, help="Add trusted host option to generated file")
51 @click.option('--annotate/--no-annotate', is_flag=True, default=True,
52 help="Annotate results, indicating where dependencies come from")
53 @click.option('-U', '--upgrade', is_flag=True, default=False,
54 help='Try to upgrade all dependencies to their latest versions')
55 @click.option('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,
56 help="Specify particular packages to upgrade.")
57 @click.option('-o', '--output-file', nargs=1, type=str, default=None,
58 help=('Output file name. Required if more than one input file is given. '
59 'Will be derived from input file otherwise.'))
60 @click.option('--allow-unsafe', is_flag=True, default=False,
61 help="Pin packages considered unsafe: pip, setuptools & distribute")
62 @click.option('--generate-hashes', is_flag=True, default=False,
63 help="Generate pip 8 style hashes in the resulting requirements file.")
64 @click.option('--max-rounds', default=10,
65 help="Maximum number of rounds before resolving the requirements aborts.")
66 @click.argument('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))
67 def cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url,
68 client_cert, trusted_host, header, index, emit_trusted_host, annotate,
69 upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,
70 src_files, max_rounds):
71 """Compiles requirements.txt from requirements.in specs."""
72 log.verbose = verbose
73
74 if len(src_files) == 0:
75 if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
76 src_files = (DEFAULT_REQUIREMENTS_FILE,)
77 elif os.path.exists('setup.py'):
78 src_files = ('setup.py',)
79 if not output_file:
80 output_file = 'requirements.txt'
81 else:
82 raise click.BadParameter(("If you do not specify an input file, "
83 "the default is {} or setup.py").format(DEFAULT_REQUIREMENTS_FILE))
84
85 if len(src_files) == 1 and src_files[0] == '-':
86 if not output_file:
87 raise click.BadParameter('--output-file is required if input is from stdin')
88
89 if len(src_files) > 1 and not output_file:
90 raise click.BadParameter('--output-file is required if two or more input files are given.')
91
92 if output_file:
93 dst_file = output_file
94 else:
95 base_name = src_files[0].rsplit('.', 1)[0]
96 dst_file = base_name + '.txt'
97
98 if upgrade and upgrade_packages:
99 raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')
100
101 ###
102 # Setup
103 ###
104
105 pip_command = get_pip_command()
106
107 pip_args = []
108 if find_links:
109 for link in find_links:
110 pip_args.extend(['-f', link])
111 if index_url:
112 pip_args.extend(['-i', index_url])
113 if extra_index_url:
114 for extra_index in extra_index_url:
115 pip_args.extend(['--extra-index-url', extra_index])
116 if client_cert:
117 pip_args.extend(['--client-cert', client_cert])
118 if pre:
119 pip_args.extend(['--pre'])
120 if trusted_host:
121 for host in trusted_host:
122 pip_args.extend(['--trusted-host', host])
123
124 pip_options, _ = pip_command.parse_args(pip_args)
125
126 session = pip_command._build_session(pip_options)
127 repository = PyPIRepository(pip_options, session)
128
129 # Pre-parse the inline package upgrade specs: they should take precedence
130 # over the stuff in the requirements files
131 upgrade_packages = [InstallRequirement.from_line(pkg)
132 for pkg in upgrade_packages]
133
134 # Proxy with a LocalRequirementsRepository if --upgrade is not specified
135 # (= default invocation)
136 if not (upgrade or upgrade_packages) and os.path.exists(dst_file):
137 ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)
138 existing_pins = {key_from_req(ireq.req): ireq for ireq in ireqs if is_pinned_requirement(ireq)}
139 repository = LocalRequirementsRepository(existing_pins, repository)
140
141 log.debug('Using indexes:')
142 # remove duplicate index urls before processing
143 repository.finder.index_urls = list(dedup(repository.finder.index_urls))
144 for index_url in repository.finder.index_urls:
145 log.debug(' {}'.format(index_url))
146
147 if repository.finder.find_links:
148 log.debug('')
149 log.debug('Configuration:')
150 for find_link in repository.finder.find_links:
151 log.debug(' -f {}'.format(find_link))
152
153 ###
154 # Parsing/collecting initial requirements
155 ###
156
157 constraints = []
158 for src_file in src_files:
159 is_setup_file = os.path.basename(src_file) == 'setup.py'
160 if is_setup_file or src_file == '-':
161 # pip requires filenames and not files. Since we want to support
162 # piping from stdin, we need to briefly save the input from stdin
163 # to a temporary file and have pip read that. also used for
164 # reading requirements from install_requires in setup.py.
165 tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)
166 if is_setup_file:
167 from distutils.core import run_setup
168 dist = run_setup(src_file)
169 tmpfile.write('\n'.join(dist.install_requires))
170 else:
171 tmpfile.write(sys.stdin.read())
172 tmpfile.flush()
173 constraints.extend(parse_requirements(
174 tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))
175 else:
176 constraints.extend(parse_requirements(
177 src_file, finder=repository.finder, session=repository.session, options=pip_options))
178
179 # Check the given base set of constraints first
180 Resolver.check_constraints(constraints)
181
182 try:
183 resolver = Resolver(constraints, repository, prereleases=pre,
184 clear_caches=rebuild, allow_unsafe=allow_unsafe)
185 results = resolver.resolve(max_rounds=max_rounds)
186 if generate_hashes:
187 hashes = resolver.resolve_hashes(results)
188 else:
189 hashes = None
190 except PipToolsError as e:
191 log.error(str(e))
192 sys.exit(2)
193
194 log.debug('')
195
196 ##
197 # Output
198 ##
199
200 # Compute reverse dependency annotations statically, from the
201 # dependency cache that the resolver has populated by now.
202 #
203 # TODO (1a): reverse deps for any editable package are lost
204 # what SHOULD happen is that they are cached in memory, just
205 # not persisted to disk!
206 #
207 # TODO (1b): perhaps it's easiest if the dependency cache has an API
208 # that could take InstallRequirements directly, like:
209 #
210 # cache.set(ireq, ...)
211 #
212 # then, when ireq is editable, it would store in
213 #
214 # editables[egg_name][link_without_fragment] = deps
215 # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}
216 #
217 # otherwise:
218 #
219 # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}
220 #
221 reverse_dependencies = None
222 if annotate:
223 reverse_dependencies = resolver.reverse_dependencies(results)
224
225 writer = OutputWriter(src_files, dst_file, dry_run=dry_run,
226 emit_header=header, emit_index=index,
227 emit_trusted_host=emit_trusted_host,
228 annotate=annotate,
229 generate_hashes=generate_hashes,
230 default_index_url=repository.DEFAULT_INDEX_URL,
231 index_urls=repository.finder.index_urls,
232 trusted_hosts=pip_options.trusted_hosts,
233 format_control=repository.finder.format_control)
234 writer.write(results=results,
235 reverse_dependencies=reverse_dependencies,
236 primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},
237 markers={key_from_req(ireq.req): ireq.markers
238 for ireq in constraints if ireq.markers},
239 hashes=hashes)
240
241 if dry_run:
242 log.warning('Dry-run, so nothing updated.')
243
244
245 def get_pip_command():
246 # Use pip's parser for pip.conf management and defaults.
247 # General options (find_links, index_url, extra_index_url, trusted_host,
248 # and pre) are defered to pip.
249 pip_command = PipCommand()
250 index_opts = pip.cmdoptions.make_option_group(
251 pip.cmdoptions.index_group,
252 pip_command.parser,
253 )
254 pip_command.parser.insert_option_group(0, index_opts)
255 pip_command.parser.add_option(optparse.Option('--pre', action='store_true', default=False))
256
257 return pip_command
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/piptools/scripts/compile.py b/piptools/scripts/compile.py
--- a/piptools/scripts/compile.py
+++ b/piptools/scripts/compile.py
@@ -126,16 +126,15 @@
session = pip_command._build_session(pip_options)
repository = PyPIRepository(pip_options, session)
- # Pre-parse the inline package upgrade specs: they should take precedence
- # over the stuff in the requirements files
- upgrade_packages = [InstallRequirement.from_line(pkg)
- for pkg in upgrade_packages]
-
# Proxy with a LocalRequirementsRepository if --upgrade is not specified
# (= default invocation)
- if not (upgrade or upgrade_packages) and os.path.exists(dst_file):
+ if not upgrade and os.path.exists(dst_file):
ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)
- existing_pins = {key_from_req(ireq.req): ireq for ireq in ireqs if is_pinned_requirement(ireq)}
+ # Exclude packages from --upgrade-package/-P from the existing pins: We want to upgrade.
+ upgrade_pkgs_key = {key_from_req(InstallRequirement.from_line(pkg).req) for pkg in upgrade_packages}
+ existing_pins = {key_from_req(ireq.req): ireq
+ for ireq in ireqs
+ if is_pinned_requirement(ireq) and key_from_req(ireq.req) not in upgrade_pkgs_key}
repository = LocalRequirementsRepository(existing_pins, repository)
log.debug('Using indexes:')
| {"golden_diff": "diff --git a/piptools/scripts/compile.py b/piptools/scripts/compile.py\n--- a/piptools/scripts/compile.py\n+++ b/piptools/scripts/compile.py\n@@ -126,16 +126,15 @@\n session = pip_command._build_session(pip_options)\n repository = PyPIRepository(pip_options, session)\n \n- # Pre-parse the inline package upgrade specs: they should take precedence\n- # over the stuff in the requirements files\n- upgrade_packages = [InstallRequirement.from_line(pkg)\n- for pkg in upgrade_packages]\n-\n # Proxy with a LocalRequirementsRepository if --upgrade is not specified\n # (= default invocation)\n- if not (upgrade or upgrade_packages) and os.path.exists(dst_file):\n+ if not upgrade and os.path.exists(dst_file):\n ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)\n- existing_pins = {key_from_req(ireq.req): ireq for ireq in ireqs if is_pinned_requirement(ireq)}\n+ # Exclude packages from --upgrade-package/-P from the existing pins: We want to upgrade.\n+ upgrade_pkgs_key = {key_from_req(InstallRequirement.from_line(pkg).req) for pkg in upgrade_packages}\n+ existing_pins = {key_from_req(ireq.req): ireq\n+ for ireq in ireqs\n+ if is_pinned_requirement(ireq) and key_from_req(ireq.req) not in upgrade_pkgs_key}\n repository = LocalRequirementsRepository(existing_pins, repository)\n \n log.debug('Using indexes:')\n", "issue": "Update a specific package does not work.\nUpdate a specific package --upgrade-package or -P flag does not work.\r\n\r\n##### Environment Versions\r\n\r\n1. MacOS Sierra\r\n2. Python version: 3.5.3\r\n3. pip version: 9.0.1\r\n4. pip-tools version: 1.8.0\r\n\r\n##### Steps to replicate\r\n\r\n1. \r\n_req.in_\r\n ```\r\njinja2\r\nmarkupsafe\r\nwerkzeug\r\n```\r\n\r\n_req.txt_\r\n```\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# pip-compile --output-file requirements/req.txt requirements/req.in\r\n#\r\njinja2==2.7.3\r\nmarkupsafe==0.23\r\nwerkzeug==0.6\r\n```\r\n2. Trying to update only jinja2: `pip-compile -P jinja2 requirements/req.in`\r\n\r\n##### Expected result\r\n_req.txt_\r\n```\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# pip-compile --output-file requirements/req.txt requirements/req.in\r\n#\r\njinja2==2.9.5\r\nmarkupsafe==0.23\r\nwerkzeug==0.6\r\n```\r\n##### Actual result\r\n_req.txt_\r\n```\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# pip-compile --output-file requirements/req.txt requirements/req.in\r\n#\r\njinja2==2.9.5\r\nmarkupsafe==0.23\r\nwerkzeug==0.11.15\r\n```\r\nAs you can see, werkzeug was updated as well.\r\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport optparse\nimport os\nimport sys\nimport tempfile\n\nimport pip\nfrom pip.req import InstallRequirement, parse_requirements\n\nfrom .. import click\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..repositories import LocalRequirementsRepository, PyPIRepository\nfrom ..resolver import Resolver\nfrom ..utils import (assert_compatible_pip_version, is_pinned_requirement,\n key_from_req, dedup)\nfrom ..writer import OutputWriter\n\n# Make sure we're using a compatible version of pip\nassert_compatible_pip_version()\n\nDEFAULT_REQUIREMENTS_FILE = 'requirements.in'\n\n\nclass PipCommand(pip.basecommand.Command):\n name = 'PipCommand'\n\n\[email protected]()\[email protected]_option()\[email protected]('-v', '--verbose', is_flag=True, help=\"Show more output\")\[email protected]('-n', '--dry-run', is_flag=True, help=\"Only show what would happen, don't change anything\")\[email protected]('-p', '--pre', is_flag=True, default=None, help=\"Allow resolving to prereleases (default is not)\")\[email protected]('-r', '--rebuild', is_flag=True, help=\"Clear any caches upfront, rebuild from scratch\")\[email protected]('-f', '--find-links', multiple=True, help=\"Look for archives in this directory or on this HTML page\", envvar='PIP_FIND_LINKS') # noqa\[email protected]('-i', '--index-url', help=\"Change index URL (defaults to PyPI)\", envvar='PIP_INDEX_URL')\[email protected]('--extra-index-url', multiple=True, help=\"Add additional index URL to search\", envvar='PIP_EXTRA_INDEX_URL') # noqa\[email protected]('--client-cert', help=\"Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.\") # noqa\[email protected]('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',\n help=\"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS.\")\[email protected]('--header/--no-header', is_flag=True, default=True,\n help=\"Add header to generated file\")\[email protected]('--index/--no-index', is_flag=True, default=True,\n help=\"Add index URL to generated file\")\[email protected]('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,\n default=True, help=\"Add trusted host option to generated file\")\[email protected]('--annotate/--no-annotate', is_flag=True, default=True,\n help=\"Annotate results, indicating where dependencies come from\")\[email protected]('-U', '--upgrade', is_flag=True, default=False,\n help='Try to upgrade all dependencies to their latest versions')\[email protected]('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,\n help=\"Specify particular packages to upgrade.\")\[email protected]('-o', '--output-file', nargs=1, type=str, default=None,\n help=('Output file name. Required if more than one input file is given. '\n 'Will be derived from input file otherwise.'))\[email protected]('--allow-unsafe', is_flag=True, default=False,\n help=\"Pin packages considered unsafe: pip, setuptools & distribute\")\[email protected]('--generate-hashes', is_flag=True, default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\")\[email protected]('--max-rounds', default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\")\[email protected]('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))\ndef cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url,\n client_cert, trusted_host, header, index, emit_trusted_host, annotate,\n upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,\n src_files, max_rounds):\n \"\"\"Compiles requirements.txt from requirements.in specs.\"\"\"\n log.verbose = verbose\n\n if len(src_files) == 0:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n elif os.path.exists('setup.py'):\n src_files = ('setup.py',)\n if not output_file:\n output_file = 'requirements.txt'\n else:\n raise click.BadParameter((\"If you do not specify an input file, \"\n \"the default is {} or setup.py\").format(DEFAULT_REQUIREMENTS_FILE))\n\n if len(src_files) == 1 and src_files[0] == '-':\n if not output_file:\n raise click.BadParameter('--output-file is required if input is from stdin')\n\n if len(src_files) > 1 and not output_file:\n raise click.BadParameter('--output-file is required if two or more input files are given.')\n\n if output_file:\n dst_file = output_file\n else:\n base_name = src_files[0].rsplit('.', 1)[0]\n dst_file = base_name + '.txt'\n\n if upgrade and upgrade_packages:\n raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')\n\n ###\n # Setup\n ###\n\n pip_command = get_pip_command()\n\n pip_args = []\n if find_links:\n for link in find_links:\n pip_args.extend(['-f', link])\n if index_url:\n pip_args.extend(['-i', index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n pip_args.extend(['--extra-index-url', extra_index])\n if client_cert:\n pip_args.extend(['--client-cert', client_cert])\n if pre:\n pip_args.extend(['--pre'])\n if trusted_host:\n for host in trusted_host:\n pip_args.extend(['--trusted-host', host])\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n repository = PyPIRepository(pip_options, session)\n\n # Pre-parse the inline package upgrade specs: they should take precedence\n # over the stuff in the requirements files\n upgrade_packages = [InstallRequirement.from_line(pkg)\n for pkg in upgrade_packages]\n\n # Proxy with a LocalRequirementsRepository if --upgrade is not specified\n # (= default invocation)\n if not (upgrade or upgrade_packages) and os.path.exists(dst_file):\n ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)\n existing_pins = {key_from_req(ireq.req): ireq for ireq in ireqs if is_pinned_requirement(ireq)}\n repository = LocalRequirementsRepository(existing_pins, repository)\n\n log.debug('Using indexes:')\n # remove duplicate index urls before processing\n repository.finder.index_urls = list(dedup(repository.finder.index_urls))\n for index_url in repository.finder.index_urls:\n log.debug(' {}'.format(index_url))\n\n if repository.finder.find_links:\n log.debug('')\n log.debug('Configuration:')\n for find_link in repository.finder.find_links:\n log.debug(' -f {}'.format(find_link))\n\n ###\n # Parsing/collecting initial requirements\n ###\n\n constraints = []\n for src_file in src_files:\n is_setup_file = os.path.basename(src_file) == 'setup.py'\n if is_setup_file or src_file == '-':\n # pip requires filenames and not files. Since we want to support\n # piping from stdin, we need to briefly save the input from stdin\n # to a temporary file and have pip read that. also used for\n # reading requirements from install_requires in setup.py.\n tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n if is_setup_file:\n from distutils.core import run_setup\n dist = run_setup(src_file)\n tmpfile.write('\\n'.join(dist.install_requires))\n else:\n tmpfile.write(sys.stdin.read())\n tmpfile.flush()\n constraints.extend(parse_requirements(\n tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))\n else:\n constraints.extend(parse_requirements(\n src_file, finder=repository.finder, session=repository.session, options=pip_options))\n\n # Check the given base set of constraints first\n Resolver.check_constraints(constraints)\n\n try:\n resolver = Resolver(constraints, repository, prereleases=pre,\n clear_caches=rebuild, allow_unsafe=allow_unsafe)\n results = resolver.resolve(max_rounds=max_rounds)\n if generate_hashes:\n hashes = resolver.resolve_hashes(results)\n else:\n hashes = None\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n log.debug('')\n\n ##\n # Output\n ##\n\n # Compute reverse dependency annotations statically, from the\n # dependency cache that the resolver has populated by now.\n #\n # TODO (1a): reverse deps for any editable package are lost\n # what SHOULD happen is that they are cached in memory, just\n # not persisted to disk!\n #\n # TODO (1b): perhaps it's easiest if the dependency cache has an API\n # that could take InstallRequirements directly, like:\n #\n # cache.set(ireq, ...)\n #\n # then, when ireq is editable, it would store in\n #\n # editables[egg_name][link_without_fragment] = deps\n # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}\n #\n # otherwise:\n #\n # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}\n #\n reverse_dependencies = None\n if annotate:\n reverse_dependencies = resolver.reverse_dependencies(results)\n\n writer = OutputWriter(src_files, dst_file, dry_run=dry_run,\n emit_header=header, emit_index=index,\n emit_trusted_host=emit_trusted_host,\n annotate=annotate,\n generate_hashes=generate_hashes,\n default_index_url=repository.DEFAULT_INDEX_URL,\n index_urls=repository.finder.index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n format_control=repository.finder.format_control)\n writer.write(results=results,\n reverse_dependencies=reverse_dependencies,\n primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},\n markers={key_from_req(ireq.req): ireq.markers\n for ireq in constraints if ireq.markers},\n hashes=hashes)\n\n if dry_run:\n log.warning('Dry-run, so nothing updated.')\n\n\ndef get_pip_command():\n # Use pip's parser for pip.conf management and defaults.\n # General options (find_links, index_url, extra_index_url, trusted_host,\n # and pre) are defered to pip.\n pip_command = PipCommand()\n index_opts = pip.cmdoptions.make_option_group(\n pip.cmdoptions.index_group,\n pip_command.parser,\n )\n pip_command.parser.insert_option_group(0, index_opts)\n pip_command.parser.add_option(optparse.Option('--pre', action='store_true', default=False))\n\n return pip_command\n", "path": "piptools/scripts/compile.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport optparse\nimport os\nimport sys\nimport tempfile\n\nimport pip\nfrom pip.req import InstallRequirement, parse_requirements\n\nfrom .. import click\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..repositories import LocalRequirementsRepository, PyPIRepository\nfrom ..resolver import Resolver\nfrom ..utils import (assert_compatible_pip_version, is_pinned_requirement,\n key_from_req, dedup)\nfrom ..writer import OutputWriter\n\n# Make sure we're using a compatible version of pip\nassert_compatible_pip_version()\n\nDEFAULT_REQUIREMENTS_FILE = 'requirements.in'\n\n\nclass PipCommand(pip.basecommand.Command):\n name = 'PipCommand'\n\n\[email protected]()\[email protected]_option()\[email protected]('-v', '--verbose', is_flag=True, help=\"Show more output\")\[email protected]('-n', '--dry-run', is_flag=True, help=\"Only show what would happen, don't change anything\")\[email protected]('-p', '--pre', is_flag=True, default=None, help=\"Allow resolving to prereleases (default is not)\")\[email protected]('-r', '--rebuild', is_flag=True, help=\"Clear any caches upfront, rebuild from scratch\")\[email protected]('-f', '--find-links', multiple=True, help=\"Look for archives in this directory or on this HTML page\", envvar='PIP_FIND_LINKS') # noqa\[email protected]('-i', '--index-url', help=\"Change index URL (defaults to PyPI)\", envvar='PIP_INDEX_URL')\[email protected]('--extra-index-url', multiple=True, help=\"Add additional index URL to search\", envvar='PIP_EXTRA_INDEX_URL') # noqa\[email protected]('--client-cert', help=\"Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.\") # noqa\[email protected]('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',\n help=\"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS.\")\[email protected]('--header/--no-header', is_flag=True, default=True,\n help=\"Add header to generated file\")\[email protected]('--index/--no-index', is_flag=True, default=True,\n help=\"Add index URL to generated file\")\[email protected]('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,\n default=True, help=\"Add trusted host option to generated file\")\[email protected]('--annotate/--no-annotate', is_flag=True, default=True,\n help=\"Annotate results, indicating where dependencies come from\")\[email protected]('-U', '--upgrade', is_flag=True, default=False,\n help='Try to upgrade all dependencies to their latest versions')\[email protected]('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,\n help=\"Specify particular packages to upgrade.\")\[email protected]('-o', '--output-file', nargs=1, type=str, default=None,\n help=('Output file name. Required if more than one input file is given. '\n 'Will be derived from input file otherwise.'))\[email protected]('--allow-unsafe', is_flag=True, default=False,\n help=\"Pin packages considered unsafe: pip, setuptools & distribute\")\[email protected]('--generate-hashes', is_flag=True, default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\")\[email protected]('--max-rounds', default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\")\[email protected]('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))\ndef cli(verbose, dry_run, pre, rebuild, find_links, index_url, extra_index_url,\n client_cert, trusted_host, header, index, emit_trusted_host, annotate,\n upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,\n src_files, max_rounds):\n \"\"\"Compiles requirements.txt from requirements.in specs.\"\"\"\n log.verbose = verbose\n\n if len(src_files) == 0:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n elif os.path.exists('setup.py'):\n src_files = ('setup.py',)\n if not output_file:\n output_file = 'requirements.txt'\n else:\n raise click.BadParameter((\"If you do not specify an input file, \"\n \"the default is {} or setup.py\").format(DEFAULT_REQUIREMENTS_FILE))\n\n if len(src_files) == 1 and src_files[0] == '-':\n if not output_file:\n raise click.BadParameter('--output-file is required if input is from stdin')\n\n if len(src_files) > 1 and not output_file:\n raise click.BadParameter('--output-file is required if two or more input files are given.')\n\n if output_file:\n dst_file = output_file\n else:\n base_name = src_files[0].rsplit('.', 1)[0]\n dst_file = base_name + '.txt'\n\n if upgrade and upgrade_packages:\n raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')\n\n ###\n # Setup\n ###\n\n pip_command = get_pip_command()\n\n pip_args = []\n if find_links:\n for link in find_links:\n pip_args.extend(['-f', link])\n if index_url:\n pip_args.extend(['-i', index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n pip_args.extend(['--extra-index-url', extra_index])\n if client_cert:\n pip_args.extend(['--client-cert', client_cert])\n if pre:\n pip_args.extend(['--pre'])\n if trusted_host:\n for host in trusted_host:\n pip_args.extend(['--trusted-host', host])\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n repository = PyPIRepository(pip_options, session)\n\n # Proxy with a LocalRequirementsRepository if --upgrade is not specified\n # (= default invocation)\n if not upgrade and os.path.exists(dst_file):\n ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)\n # Exclude packages from --upgrade-package/-P from the existing pins: We want to upgrade.\n upgrade_pkgs_key = {key_from_req(InstallRequirement.from_line(pkg).req) for pkg in upgrade_packages}\n existing_pins = {key_from_req(ireq.req): ireq\n for ireq in ireqs\n if is_pinned_requirement(ireq) and key_from_req(ireq.req) not in upgrade_pkgs_key}\n repository = LocalRequirementsRepository(existing_pins, repository)\n\n log.debug('Using indexes:')\n # remove duplicate index urls before processing\n repository.finder.index_urls = list(dedup(repository.finder.index_urls))\n for index_url in repository.finder.index_urls:\n log.debug(' {}'.format(index_url))\n\n if repository.finder.find_links:\n log.debug('')\n log.debug('Configuration:')\n for find_link in repository.finder.find_links:\n log.debug(' -f {}'.format(find_link))\n\n ###\n # Parsing/collecting initial requirements\n ###\n\n constraints = []\n for src_file in src_files:\n is_setup_file = os.path.basename(src_file) == 'setup.py'\n if is_setup_file or src_file == '-':\n # pip requires filenames and not files. Since we want to support\n # piping from stdin, we need to briefly save the input from stdin\n # to a temporary file and have pip read that. also used for\n # reading requirements from install_requires in setup.py.\n tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n if is_setup_file:\n from distutils.core import run_setup\n dist = run_setup(src_file)\n tmpfile.write('\\n'.join(dist.install_requires))\n else:\n tmpfile.write(sys.stdin.read())\n tmpfile.flush()\n constraints.extend(parse_requirements(\n tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))\n else:\n constraints.extend(parse_requirements(\n src_file, finder=repository.finder, session=repository.session, options=pip_options))\n\n # Check the given base set of constraints first\n Resolver.check_constraints(constraints)\n\n try:\n resolver = Resolver(constraints, repository, prereleases=pre,\n clear_caches=rebuild, allow_unsafe=allow_unsafe)\n results = resolver.resolve(max_rounds=max_rounds)\n if generate_hashes:\n hashes = resolver.resolve_hashes(results)\n else:\n hashes = None\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n log.debug('')\n\n ##\n # Output\n ##\n\n # Compute reverse dependency annotations statically, from the\n # dependency cache that the resolver has populated by now.\n #\n # TODO (1a): reverse deps for any editable package are lost\n # what SHOULD happen is that they are cached in memory, just\n # not persisted to disk!\n #\n # TODO (1b): perhaps it's easiest if the dependency cache has an API\n # that could take InstallRequirements directly, like:\n #\n # cache.set(ireq, ...)\n #\n # then, when ireq is editable, it would store in\n #\n # editables[egg_name][link_without_fragment] = deps\n # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}\n #\n # otherwise:\n #\n # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}\n #\n reverse_dependencies = None\n if annotate:\n reverse_dependencies = resolver.reverse_dependencies(results)\n\n writer = OutputWriter(src_files, dst_file, dry_run=dry_run,\n emit_header=header, emit_index=index,\n emit_trusted_host=emit_trusted_host,\n annotate=annotate,\n generate_hashes=generate_hashes,\n default_index_url=repository.DEFAULT_INDEX_URL,\n index_urls=repository.finder.index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n format_control=repository.finder.format_control)\n writer.write(results=results,\n reverse_dependencies=reverse_dependencies,\n primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},\n markers={key_from_req(ireq.req): ireq.markers\n for ireq in constraints if ireq.markers},\n hashes=hashes)\n\n if dry_run:\n log.warning('Dry-run, so nothing updated.')\n\n\ndef get_pip_command():\n # Use pip's parser for pip.conf management and defaults.\n # General options (find_links, index_url, extra_index_url, trusted_host,\n # and pre) are defered to pip.\n pip_command = PipCommand()\n index_opts = pip.cmdoptions.make_option_group(\n pip.cmdoptions.index_group,\n pip_command.parser,\n )\n pip_command.parser.insert_option_group(0, index_opts)\n pip_command.parser.add_option(optparse.Option('--pre', action='store_true', default=False))\n\n return pip_command\n", "path": "piptools/scripts/compile.py"}]} | 3,709 | 356 |
gh_patches_debug_43497 | rasdani/github-patches | git_diff | ray-project__ray-4114 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[rllib] make rollout script support multiagent
Hi,
If I'm correct, only single agent/policy is currently supported in rollout.py. For instance https://github.com/ray-project/ray/blob/2e30f7ba386e716bf80f019dcd473b67d83abb95/python/ray/rllib/rollout.py#L109-L110 references default policy to check if policy uses lstm, which fails when a multi agent configuration is loaded.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/rllib/rollout.py`
Content:
```
1 #!/usr/bin/env python
2
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6
7 import argparse
8 import json
9 import os
10 import pickle
11
12 import gym
13 import ray
14 from ray.rllib.agents.registry import get_agent_class
15
16 EXAMPLE_USAGE = """
17 Example Usage via RLlib CLI:
18 rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
19 --env CartPole-v0 --steps 1000000 --out rollouts.pkl
20
21 Example Usage via executable:
22 ./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
23 --env CartPole-v0 --steps 1000000 --out rollouts.pkl
24 """
25
26 # Note: if you use any custom models or envs, register them here first, e.g.:
27 #
28 # ModelCatalog.register_custom_model("pa_model", ParametricActionsModel)
29 # register_env("pa_cartpole", lambda _: ParametricActionCartpole(10))
30
31
32 def create_parser(parser_creator=None):
33 parser_creator = parser_creator or argparse.ArgumentParser
34 parser = parser_creator(
35 formatter_class=argparse.RawDescriptionHelpFormatter,
36 description="Roll out a reinforcement learning agent "
37 "given a checkpoint.",
38 epilog=EXAMPLE_USAGE)
39
40 parser.add_argument(
41 "checkpoint", type=str, help="Checkpoint from which to roll out.")
42 required_named = parser.add_argument_group("required named arguments")
43 required_named.add_argument(
44 "--run",
45 type=str,
46 required=True,
47 help="The algorithm or model to train. This may refer to the name "
48 "of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
49 "user-defined trainable function or class registered in the "
50 "tune registry.")
51 required_named.add_argument(
52 "--env", type=str, help="The gym environment to use.")
53 parser.add_argument(
54 "--no-render",
55 default=False,
56 action="store_const",
57 const=True,
58 help="Surpress rendering of the environment.")
59 parser.add_argument(
60 "--steps", default=10000, help="Number of steps to roll out.")
61 parser.add_argument("--out", default=None, help="Output filename.")
62 parser.add_argument(
63 "--config",
64 default="{}",
65 type=json.loads,
66 help="Algorithm-specific configuration (e.g. env, hyperparams). "
67 "Surpresses loading of configuration from checkpoint.")
68 return parser
69
70
71 def run(args, parser):
72 config = args.config
73 if not config:
74 # Load configuration from file
75 config_dir = os.path.dirname(args.checkpoint)
76 config_path = os.path.join(config_dir, "params.json")
77 if not os.path.exists(config_path):
78 config_path = os.path.join(config_dir, "../params.json")
79 if not os.path.exists(config_path):
80 raise ValueError(
81 "Could not find params.json in either the checkpoint dir or "
82 "its parent directory.")
83 with open(config_path) as f:
84 config = json.load(f)
85 if "num_workers" in config:
86 config["num_workers"] = min(2, config["num_workers"])
87
88 if not args.env:
89 if not config.get("env"):
90 parser.error("the following arguments are required: --env")
91 args.env = config.get("env")
92
93 ray.init()
94
95 cls = get_agent_class(args.run)
96 agent = cls(env=args.env, config=config)
97 agent.restore(args.checkpoint)
98 num_steps = int(args.steps)
99 rollout(agent, args.env, num_steps, args.out, args.no_render)
100
101
102 def rollout(agent, env_name, num_steps, out=None, no_render=True):
103 if hasattr(agent, "local_evaluator"):
104 env = agent.local_evaluator.env
105 else:
106 env = gym.make(env_name)
107
108 if hasattr(agent, "local_evaluator"):
109 state_init = agent.local_evaluator.policy_map[
110 "default"].get_initial_state()
111 else:
112 state_init = []
113 if state_init:
114 use_lstm = True
115 else:
116 use_lstm = False
117
118 if out is not None:
119 rollouts = []
120 steps = 0
121 while steps < (num_steps or steps + 1):
122 if out is not None:
123 rollout = []
124 state = env.reset()
125 done = False
126 reward_total = 0.0
127 while not done and steps < (num_steps or steps + 1):
128 if use_lstm:
129 action, state_init, logits = agent.compute_action(
130 state, state=state_init)
131 else:
132 action = agent.compute_action(state)
133 next_state, reward, done, _ = env.step(action)
134 reward_total += reward
135 if not no_render:
136 env.render()
137 if out is not None:
138 rollout.append([state, action, next_state, reward, done])
139 steps += 1
140 state = next_state
141 if out is not None:
142 rollouts.append(rollout)
143 print("Episode reward", reward_total)
144 if out is not None:
145 pickle.dump(rollouts, open(out, "wb"))
146
147
148 if __name__ == "__main__":
149 parser = create_parser()
150 args = parser.parse_args()
151 run(args, parser)
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/rllib/rollout.py b/python/ray/rllib/rollout.py
--- a/python/ray/rllib/rollout.py
+++ b/python/ray/rllib/rollout.py
@@ -73,15 +73,15 @@
if not config:
# Load configuration from file
config_dir = os.path.dirname(args.checkpoint)
- config_path = os.path.join(config_dir, "params.json")
+ config_path = os.path.join(config_dir, "params.pkl")
if not os.path.exists(config_path):
- config_path = os.path.join(config_dir, "../params.json")
+ config_path = os.path.join(config_dir, "../params.pkl")
if not os.path.exists(config_path):
raise ValueError(
- "Could not find params.json in either the checkpoint dir or "
+ "Could not find params.pkl in either the checkpoint dir or "
"its parent directory.")
- with open(config_path) as f:
- config = json.load(f)
+ with open(config_path, 'rb') as f:
+ config = pickle.load(f)
if "num_workers" in config:
config["num_workers"] = min(2, config["num_workers"])
@@ -102,18 +102,18 @@
def rollout(agent, env_name, num_steps, out=None, no_render=True):
if hasattr(agent, "local_evaluator"):
env = agent.local_evaluator.env
+ multiagent = agent.local_evaluator.multiagent
+ if multiagent:
+ policy_agent_mapping = agent.config["multiagent"][
+ "policy_mapping_fn"]
+ mapping_cache = {}
+ policy_map = agent.local_evaluator.policy_map
+ state_init = {p: m.get_initial_state() for p, m in policy_map.items()}
+ use_lstm = {p: len(s) > 0 for p, s in state_init.items()}
else:
env = gym.make(env_name)
-
- if hasattr(agent, "local_evaluator"):
- state_init = agent.local_evaluator.policy_map[
- "default"].get_initial_state()
- else:
- state_init = []
- if state_init:
- use_lstm = True
- else:
- use_lstm = False
+ multiagent = False
+ use_lstm = {'default': False}
if out is not None:
rollouts = []
@@ -125,13 +125,39 @@
done = False
reward_total = 0.0
while not done and steps < (num_steps or steps + 1):
- if use_lstm:
- action, state_init, logits = agent.compute_action(
- state, state=state_init)
+ if multiagent:
+ action_dict = {}
+ for agent_id in state.keys():
+ a_state = state[agent_id]
+ if a_state is not None:
+ policy_id = mapping_cache.setdefault(
+ agent_id, policy_agent_mapping(agent_id))
+ p_use_lstm = use_lstm[policy_id]
+ if p_use_lstm:
+ a_action, p_state_init, _ = agent.compute_action(
+ a_state,
+ state=state_init[policy_id],
+ policy_id=policy_id)
+ state_init[policy_id] = p_state_init
+ else:
+ a_action = agent.compute_action(
+ a_state, policy_id=policy_id)
+ action_dict[agent_id] = a_action
+ action = action_dict
else:
- action = agent.compute_action(state)
+ if use_lstm["default"]:
+ action, state_init, _ = agent.compute_action(
+ state, state=state_init)
+ else:
+ action = agent.compute_action(state)
+
next_state, reward, done, _ = env.step(action)
- reward_total += reward
+
+ if multiagent:
+ done = done["__all__"]
+ reward_total += sum(reward.values())
+ else:
+ reward_total += reward
if not no_render:
env.render()
if out is not None:
@@ -141,6 +167,7 @@
if out is not None:
rollouts.append(rollout)
print("Episode reward", reward_total)
+
if out is not None:
pickle.dump(rollouts, open(out, "wb"))
| {"golden_diff": "diff --git a/python/ray/rllib/rollout.py b/python/ray/rllib/rollout.py\n--- a/python/ray/rllib/rollout.py\n+++ b/python/ray/rllib/rollout.py\n@@ -73,15 +73,15 @@\n if not config:\n # Load configuration from file\n config_dir = os.path.dirname(args.checkpoint)\n- config_path = os.path.join(config_dir, \"params.json\")\n+ config_path = os.path.join(config_dir, \"params.pkl\")\n if not os.path.exists(config_path):\n- config_path = os.path.join(config_dir, \"../params.json\")\n+ config_path = os.path.join(config_dir, \"../params.pkl\")\n if not os.path.exists(config_path):\n raise ValueError(\n- \"Could not find params.json in either the checkpoint dir or \"\n+ \"Could not find params.pkl in either the checkpoint dir or \"\n \"its parent directory.\")\n- with open(config_path) as f:\n- config = json.load(f)\n+ with open(config_path, 'rb') as f:\n+ config = pickle.load(f)\n if \"num_workers\" in config:\n config[\"num_workers\"] = min(2, config[\"num_workers\"])\n \n@@ -102,18 +102,18 @@\n def rollout(agent, env_name, num_steps, out=None, no_render=True):\n if hasattr(agent, \"local_evaluator\"):\n env = agent.local_evaluator.env\n+ multiagent = agent.local_evaluator.multiagent\n+ if multiagent:\n+ policy_agent_mapping = agent.config[\"multiagent\"][\n+ \"policy_mapping_fn\"]\n+ mapping_cache = {}\n+ policy_map = agent.local_evaluator.policy_map\n+ state_init = {p: m.get_initial_state() for p, m in policy_map.items()}\n+ use_lstm = {p: len(s) > 0 for p, s in state_init.items()}\n else:\n env = gym.make(env_name)\n-\n- if hasattr(agent, \"local_evaluator\"):\n- state_init = agent.local_evaluator.policy_map[\n- \"default\"].get_initial_state()\n- else:\n- state_init = []\n- if state_init:\n- use_lstm = True\n- else:\n- use_lstm = False\n+ multiagent = False\n+ use_lstm = {'default': False}\n \n if out is not None:\n rollouts = []\n@@ -125,13 +125,39 @@\n done = False\n reward_total = 0.0\n while not done and steps < (num_steps or steps + 1):\n- if use_lstm:\n- action, state_init, logits = agent.compute_action(\n- state, state=state_init)\n+ if multiagent:\n+ action_dict = {}\n+ for agent_id in state.keys():\n+ a_state = state[agent_id]\n+ if a_state is not None:\n+ policy_id = mapping_cache.setdefault(\n+ agent_id, policy_agent_mapping(agent_id))\n+ p_use_lstm = use_lstm[policy_id]\n+ if p_use_lstm:\n+ a_action, p_state_init, _ = agent.compute_action(\n+ a_state,\n+ state=state_init[policy_id],\n+ policy_id=policy_id)\n+ state_init[policy_id] = p_state_init\n+ else:\n+ a_action = agent.compute_action(\n+ a_state, policy_id=policy_id)\n+ action_dict[agent_id] = a_action\n+ action = action_dict\n else:\n- action = agent.compute_action(state)\n+ if use_lstm[\"default\"]:\n+ action, state_init, _ = agent.compute_action(\n+ state, state=state_init)\n+ else:\n+ action = agent.compute_action(state)\n+\n next_state, reward, done, _ = env.step(action)\n- reward_total += reward\n+\n+ if multiagent:\n+ done = done[\"__all__\"]\n+ reward_total += sum(reward.values())\n+ else:\n+ reward_total += reward\n if not no_render:\n env.render()\n if out is not None:\n@@ -141,6 +167,7 @@\n if out is not None:\n rollouts.append(rollout)\n print(\"Episode reward\", reward_total)\n+\n if out is not None:\n pickle.dump(rollouts, open(out, \"wb\"))\n", "issue": "[rllib] make rollout script support multiagent \nHi,\r\n\r\nIf I'm correct, only single agent/policy is currently supported in rollout.py. For instance https://github.com/ray-project/ray/blob/2e30f7ba386e716bf80f019dcd473b67d83abb95/python/ray/rllib/rollout.py#L109-L110 references default policy to check if policy uses lstm, which fails when a multi agent configuration is loaded.\r\n\r\nThanks!\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport pickle\n\nimport gym\nimport ray\nfrom ray.rllib.agents.registry import get_agent_class\n\nEXAMPLE_USAGE = \"\"\"\nExample Usage via RLlib CLI:\n rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\nExample Usage via executable:\n ./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\"\"\"\n\n# Note: if you use any custom models or envs, register them here first, e.g.:\n#\n# ModelCatalog.register_custom_model(\"pa_model\", ParametricActionsModel)\n# register_env(\"pa_cartpole\", lambda _: ParametricActionCartpole(10))\n\n\ndef create_parser(parser_creator=None):\n parser_creator = parser_creator or argparse.ArgumentParser\n parser = parser_creator(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Roll out a reinforcement learning agent \"\n \"given a checkpoint.\",\n epilog=EXAMPLE_USAGE)\n\n parser.add_argument(\n \"checkpoint\", type=str, help=\"Checkpoint from which to roll out.\")\n required_named = parser.add_argument_group(\"required named arguments\")\n required_named.add_argument(\n \"--run\",\n type=str,\n required=True,\n help=\"The algorithm or model to train. This may refer to the name \"\n \"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a \"\n \"user-defined trainable function or class registered in the \"\n \"tune registry.\")\n required_named.add_argument(\n \"--env\", type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--no-render\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Surpress rendering of the environment.\")\n parser.add_argument(\n \"--steps\", default=10000, help=\"Number of steps to roll out.\")\n parser.add_argument(\"--out\", default=None, help=\"Output filename.\")\n parser.add_argument(\n \"--config\",\n default=\"{}\",\n type=json.loads,\n help=\"Algorithm-specific configuration (e.g. env, hyperparams). \"\n \"Surpresses loading of configuration from checkpoint.\")\n return parser\n\n\ndef run(args, parser):\n config = args.config\n if not config:\n # Load configuration from file\n config_dir = os.path.dirname(args.checkpoint)\n config_path = os.path.join(config_dir, \"params.json\")\n if not os.path.exists(config_path):\n config_path = os.path.join(config_dir, \"../params.json\")\n if not os.path.exists(config_path):\n raise ValueError(\n \"Could not find params.json in either the checkpoint dir or \"\n \"its parent directory.\")\n with open(config_path) as f:\n config = json.load(f)\n if \"num_workers\" in config:\n config[\"num_workers\"] = min(2, config[\"num_workers\"])\n\n if not args.env:\n if not config.get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n args.env = config.get(\"env\")\n\n ray.init()\n\n cls = get_agent_class(args.run)\n agent = cls(env=args.env, config=config)\n agent.restore(args.checkpoint)\n num_steps = int(args.steps)\n rollout(agent, args.env, num_steps, args.out, args.no_render)\n\n\ndef rollout(agent, env_name, num_steps, out=None, no_render=True):\n if hasattr(agent, \"local_evaluator\"):\n env = agent.local_evaluator.env\n else:\n env = gym.make(env_name)\n\n if hasattr(agent, \"local_evaluator\"):\n state_init = agent.local_evaluator.policy_map[\n \"default\"].get_initial_state()\n else:\n state_init = []\n if state_init:\n use_lstm = True\n else:\n use_lstm = False\n\n if out is not None:\n rollouts = []\n steps = 0\n while steps < (num_steps or steps + 1):\n if out is not None:\n rollout = []\n state = env.reset()\n done = False\n reward_total = 0.0\n while not done and steps < (num_steps or steps + 1):\n if use_lstm:\n action, state_init, logits = agent.compute_action(\n state, state=state_init)\n else:\n action = agent.compute_action(state)\n next_state, reward, done, _ = env.step(action)\n reward_total += reward\n if not no_render:\n env.render()\n if out is not None:\n rollout.append([state, action, next_state, reward, done])\n steps += 1\n state = next_state\n if out is not None:\n rollouts.append(rollout)\n print(\"Episode reward\", reward_total)\n if out is not None:\n pickle.dump(rollouts, open(out, \"wb\"))\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n run(args, parser)\n", "path": "python/ray/rllib/rollout.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport pickle\n\nimport gym\nimport ray\nfrom ray.rllib.agents.registry import get_agent_class\n\nEXAMPLE_USAGE = \"\"\"\nExample Usage via RLlib CLI:\n rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\nExample Usage via executable:\n ./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN\n --env CartPole-v0 --steps 1000000 --out rollouts.pkl\n\"\"\"\n\n# Note: if you use any custom models or envs, register them here first, e.g.:\n#\n# ModelCatalog.register_custom_model(\"pa_model\", ParametricActionsModel)\n# register_env(\"pa_cartpole\", lambda _: ParametricActionCartpole(10))\n\n\ndef create_parser(parser_creator=None):\n parser_creator = parser_creator or argparse.ArgumentParser\n parser = parser_creator(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Roll out a reinforcement learning agent \"\n \"given a checkpoint.\",\n epilog=EXAMPLE_USAGE)\n\n parser.add_argument(\n \"checkpoint\", type=str, help=\"Checkpoint from which to roll out.\")\n required_named = parser.add_argument_group(\"required named arguments\")\n required_named.add_argument(\n \"--run\",\n type=str,\n required=True,\n help=\"The algorithm or model to train. This may refer to the name \"\n \"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a \"\n \"user-defined trainable function or class registered in the \"\n \"tune registry.\")\n required_named.add_argument(\n \"--env\", type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--no-render\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Surpress rendering of the environment.\")\n parser.add_argument(\n \"--steps\", default=10000, help=\"Number of steps to roll out.\")\n parser.add_argument(\"--out\", default=None, help=\"Output filename.\")\n parser.add_argument(\n \"--config\",\n default=\"{}\",\n type=json.loads,\n help=\"Algorithm-specific configuration (e.g. env, hyperparams). \"\n \"Surpresses loading of configuration from checkpoint.\")\n return parser\n\n\ndef run(args, parser):\n config = args.config\n if not config:\n # Load configuration from file\n config_dir = os.path.dirname(args.checkpoint)\n config_path = os.path.join(config_dir, \"params.pkl\")\n if not os.path.exists(config_path):\n config_path = os.path.join(config_dir, \"../params.pkl\")\n if not os.path.exists(config_path):\n raise ValueError(\n \"Could not find params.pkl in either the checkpoint dir or \"\n \"its parent directory.\")\n with open(config_path, 'rb') as f:\n config = pickle.load(f)\n if \"num_workers\" in config:\n config[\"num_workers\"] = min(2, config[\"num_workers\"])\n\n if not args.env:\n if not config.get(\"env\"):\n parser.error(\"the following arguments are required: --env\")\n args.env = config.get(\"env\")\n\n ray.init()\n\n cls = get_agent_class(args.run)\n agent = cls(env=args.env, config=config)\n agent.restore(args.checkpoint)\n num_steps = int(args.steps)\n rollout(agent, args.env, num_steps, args.out, args.no_render)\n\n\ndef rollout(agent, env_name, num_steps, out=None, no_render=True):\n if hasattr(agent, \"local_evaluator\"):\n env = agent.local_evaluator.env\n multiagent = agent.local_evaluator.multiagent\n if multiagent:\n policy_agent_mapping = agent.config[\"multiagent\"][\n \"policy_mapping_fn\"]\n mapping_cache = {}\n policy_map = agent.local_evaluator.policy_map\n state_init = {p: m.get_initial_state() for p, m in policy_map.items()}\n use_lstm = {p: len(s) > 0 for p, s in state_init.items()}\n else:\n env = gym.make(env_name)\n multiagent = False\n use_lstm = {'default': False}\n\n if out is not None:\n rollouts = []\n steps = 0\n while steps < (num_steps or steps + 1):\n if out is not None:\n rollout = []\n state = env.reset()\n done = False\n reward_total = 0.0\n while not done and steps < (num_steps or steps + 1):\n if multiagent:\n action_dict = {}\n for agent_id in state.keys():\n a_state = state[agent_id]\n if a_state is not None:\n policy_id = mapping_cache.setdefault(\n agent_id, policy_agent_mapping(agent_id))\n p_use_lstm = use_lstm[policy_id]\n if p_use_lstm:\n a_action, p_state_init, _ = agent.compute_action(\n a_state,\n state=state_init[policy_id],\n policy_id=policy_id)\n state_init[policy_id] = p_state_init\n else:\n a_action = agent.compute_action(\n a_state, policy_id=policy_id)\n action_dict[agent_id] = a_action\n action = action_dict\n else:\n if use_lstm[\"default\"]:\n action, state_init, _ = agent.compute_action(\n state, state=state_init)\n else:\n action = agent.compute_action(state)\n\n next_state, reward, done, _ = env.step(action)\n\n if multiagent:\n done = done[\"__all__\"]\n reward_total += sum(reward.values())\n else:\n reward_total += reward\n if not no_render:\n env.render()\n if out is not None:\n rollout.append([state, action, next_state, reward, done])\n steps += 1\n state = next_state\n if out is not None:\n rollouts.append(rollout)\n print(\"Episode reward\", reward_total)\n\n if out is not None:\n pickle.dump(rollouts, open(out, \"wb\"))\n\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n run(args, parser)\n", "path": "python/ray/rllib/rollout.py"}]} | 1,876 | 967 |
gh_patches_debug_18727 | rasdani/github-patches | git_diff | scrapy__scrapy-2847 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redirect 308 missing
I did a check on the RedirectMiddleware and noticed that code 308 is missing. Is there a reason for that?
Some websites don't update their sitemap and have a long list of 308 from http to https.
(side note: is there a way to add "s" before a link is scraped?)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/downloadermiddlewares/redirect.py`
Content:
```
1 import logging
2 from six.moves.urllib.parse import urljoin
3
4 from w3lib.url import safe_url_string
5
6 from scrapy.http import HtmlResponse
7 from scrapy.utils.response import get_meta_refresh
8 from scrapy.exceptions import IgnoreRequest, NotConfigured
9
10 logger = logging.getLogger(__name__)
11
12
13 class BaseRedirectMiddleware(object):
14
15 enabled_setting = 'REDIRECT_ENABLED'
16
17 def __init__(self, settings):
18 if not settings.getbool(self.enabled_setting):
19 raise NotConfigured
20
21 self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
22 self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
23
24 @classmethod
25 def from_crawler(cls, crawler):
26 return cls(crawler.settings)
27
28 def _redirect(self, redirected, request, spider, reason):
29 ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
30 redirects = request.meta.get('redirect_times', 0) + 1
31
32 if ttl and redirects <= self.max_redirect_times:
33 redirected.meta['redirect_times'] = redirects
34 redirected.meta['redirect_ttl'] = ttl - 1
35 redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
36 [request.url]
37 redirected.dont_filter = request.dont_filter
38 redirected.priority = request.priority + self.priority_adjust
39 logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s",
40 {'reason': reason, 'redirected': redirected, 'request': request},
41 extra={'spider': spider})
42 return redirected
43 else:
44 logger.debug("Discarding %(request)s: max redirections reached",
45 {'request': request}, extra={'spider': spider})
46 raise IgnoreRequest("max redirections reached")
47
48 def _redirect_request_using_get(self, request, redirect_url):
49 redirected = request.replace(url=redirect_url, method='GET', body='')
50 redirected.headers.pop('Content-Type', None)
51 redirected.headers.pop('Content-Length', None)
52 return redirected
53
54
55 class RedirectMiddleware(BaseRedirectMiddleware):
56 """
57 Handle redirection of requests based on response status
58 and meta-refresh html tag.
59 """
60 def process_response(self, request, response, spider):
61 if (request.meta.get('dont_redirect', False) or
62 response.status in getattr(spider, 'handle_httpstatus_list', []) or
63 response.status in request.meta.get('handle_httpstatus_list', []) or
64 request.meta.get('handle_httpstatus_all', False)):
65 return response
66
67 allowed_status = (301, 302, 303, 307)
68 if 'Location' not in response.headers or response.status not in allowed_status:
69 return response
70
71 location = safe_url_string(response.headers['location'])
72
73 redirected_url = urljoin(request.url, location)
74
75 if response.status in (301, 307) or request.method == 'HEAD':
76 redirected = request.replace(url=redirected_url)
77 return self._redirect(redirected, request, spider, response.status)
78
79 redirected = self._redirect_request_using_get(request, redirected_url)
80 return self._redirect(redirected, request, spider, response.status)
81
82
83 class MetaRefreshMiddleware(BaseRedirectMiddleware):
84
85 enabled_setting = 'METAREFRESH_ENABLED'
86
87 def __init__(self, settings):
88 super(MetaRefreshMiddleware, self).__init__(settings)
89 self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
90 settings.getint('METAREFRESH_MAXDELAY'))
91
92 def process_response(self, request, response, spider):
93 if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \
94 not isinstance(response, HtmlResponse):
95 return response
96
97 interval, url = get_meta_refresh(response)
98 if url and interval < self._maxdelay:
99 redirected = self._redirect_request_using_get(request, url)
100 return self._redirect(redirected, request, spider, 'meta refresh')
101
102 return response
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/downloadermiddlewares/redirect.py b/scrapy/downloadermiddlewares/redirect.py
--- a/scrapy/downloadermiddlewares/redirect.py
+++ b/scrapy/downloadermiddlewares/redirect.py
@@ -64,7 +64,7 @@
request.meta.get('handle_httpstatus_all', False)):
return response
- allowed_status = (301, 302, 303, 307)
+ allowed_status = (301, 302, 303, 307, 308)
if 'Location' not in response.headers or response.status not in allowed_status:
return response
@@ -72,7 +72,7 @@
redirected_url = urljoin(request.url, location)
- if response.status in (301, 307) or request.method == 'HEAD':
+ if response.status in (301, 307, 308) or request.method == 'HEAD':
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
| {"golden_diff": "diff --git a/scrapy/downloadermiddlewares/redirect.py b/scrapy/downloadermiddlewares/redirect.py\n--- a/scrapy/downloadermiddlewares/redirect.py\n+++ b/scrapy/downloadermiddlewares/redirect.py\n@@ -64,7 +64,7 @@\n request.meta.get('handle_httpstatus_all', False)):\n return response\n \n- allowed_status = (301, 302, 303, 307)\n+ allowed_status = (301, 302, 303, 307, 308)\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n \n@@ -72,7 +72,7 @@\n \n redirected_url = urljoin(request.url, location)\n \n- if response.status in (301, 307) or request.method == 'HEAD':\n+ if response.status in (301, 307, 308) or request.method == 'HEAD':\n redirected = request.replace(url=redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n", "issue": "Redirect 308 missing\nI did a check on the RedirectMiddleware and noticed that code 308 is missing. Is there a reason for that?\r\nSome websites don't update their sitemap and have a long list of 308 from http to https.\r\n\r\n(side note: is there a way to add \"s\" before a link is scraped?)\n", "before_files": [{"content": "import logging\nfrom six.moves.urllib.parse import urljoin\n\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.http import HtmlResponse\nfrom scrapy.utils.response import get_meta_refresh\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseRedirectMiddleware(object):\n\n enabled_setting = 'REDIRECT_ENABLED'\n\n def __init__(self, settings):\n if not settings.getbool(self.enabled_setting):\n raise NotConfigured\n\n self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')\n self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def _redirect(self, redirected, request, spider, reason):\n ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)\n redirects = request.meta.get('redirect_times', 0) + 1\n\n if ttl and redirects <= self.max_redirect_times:\n redirected.meta['redirect_times'] = redirects\n redirected.meta['redirect_ttl'] = ttl - 1\n redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \\\n [request.url]\n redirected.dont_filter = request.dont_filter\n redirected.priority = request.priority + self.priority_adjust\n logger.debug(\"Redirecting (%(reason)s) to %(redirected)s from %(request)s\",\n {'reason': reason, 'redirected': redirected, 'request': request},\n extra={'spider': spider})\n return redirected\n else:\n logger.debug(\"Discarding %(request)s: max redirections reached\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest(\"max redirections reached\")\n\n def _redirect_request_using_get(self, request, redirect_url):\n redirected = request.replace(url=redirect_url, method='GET', body='')\n redirected.headers.pop('Content-Type', None)\n redirected.headers.pop('Content-Length', None)\n return redirected\n\n\nclass RedirectMiddleware(BaseRedirectMiddleware):\n \"\"\"\n Handle redirection of requests based on response status\n and meta-refresh html tag.\n \"\"\"\n def process_response(self, request, response, spider):\n if (request.meta.get('dont_redirect', False) or\n response.status in getattr(spider, 'handle_httpstatus_list', []) or\n response.status in request.meta.get('handle_httpstatus_list', []) or\n request.meta.get('handle_httpstatus_all', False)):\n return response\n\n allowed_status = (301, 302, 303, 307)\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n\n location = safe_url_string(response.headers['location'])\n\n redirected_url = urljoin(request.url, location)\n\n if response.status in (301, 307) or request.method == 'HEAD':\n redirected = request.replace(url=redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n redirected = self._redirect_request_using_get(request, redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n\nclass MetaRefreshMiddleware(BaseRedirectMiddleware):\n\n enabled_setting = 'METAREFRESH_ENABLED'\n\n def __init__(self, settings):\n super(MetaRefreshMiddleware, self).__init__(settings)\n self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',\n settings.getint('METAREFRESH_MAXDELAY'))\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \\\n not isinstance(response, HtmlResponse):\n return response\n\n interval, url = get_meta_refresh(response)\n if url and interval < self._maxdelay:\n redirected = self._redirect_request_using_get(request, url)\n return self._redirect(redirected, request, spider, 'meta refresh')\n\n return response\n", "path": "scrapy/downloadermiddlewares/redirect.py"}], "after_files": [{"content": "import logging\nfrom six.moves.urllib.parse import urljoin\n\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.http import HtmlResponse\nfrom scrapy.utils.response import get_meta_refresh\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseRedirectMiddleware(object):\n\n enabled_setting = 'REDIRECT_ENABLED'\n\n def __init__(self, settings):\n if not settings.getbool(self.enabled_setting):\n raise NotConfigured\n\n self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')\n self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def _redirect(self, redirected, request, spider, reason):\n ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)\n redirects = request.meta.get('redirect_times', 0) + 1\n\n if ttl and redirects <= self.max_redirect_times:\n redirected.meta['redirect_times'] = redirects\n redirected.meta['redirect_ttl'] = ttl - 1\n redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \\\n [request.url]\n redirected.dont_filter = request.dont_filter\n redirected.priority = request.priority + self.priority_adjust\n logger.debug(\"Redirecting (%(reason)s) to %(redirected)s from %(request)s\",\n {'reason': reason, 'redirected': redirected, 'request': request},\n extra={'spider': spider})\n return redirected\n else:\n logger.debug(\"Discarding %(request)s: max redirections reached\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest(\"max redirections reached\")\n\n def _redirect_request_using_get(self, request, redirect_url):\n redirected = request.replace(url=redirect_url, method='GET', body='')\n redirected.headers.pop('Content-Type', None)\n redirected.headers.pop('Content-Length', None)\n return redirected\n\n\nclass RedirectMiddleware(BaseRedirectMiddleware):\n \"\"\"\n Handle redirection of requests based on response status\n and meta-refresh html tag.\n \"\"\"\n def process_response(self, request, response, spider):\n if (request.meta.get('dont_redirect', False) or\n response.status in getattr(spider, 'handle_httpstatus_list', []) or\n response.status in request.meta.get('handle_httpstatus_list', []) or\n request.meta.get('handle_httpstatus_all', False)):\n return response\n\n allowed_status = (301, 302, 303, 307, 308)\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n\n location = safe_url_string(response.headers['location'])\n\n redirected_url = urljoin(request.url, location)\n\n if response.status in (301, 307, 308) or request.method == 'HEAD':\n redirected = request.replace(url=redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n redirected = self._redirect_request_using_get(request, redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n\nclass MetaRefreshMiddleware(BaseRedirectMiddleware):\n\n enabled_setting = 'METAREFRESH_ENABLED'\n\n def __init__(self, settings):\n super(MetaRefreshMiddleware, self).__init__(settings)\n self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',\n settings.getint('METAREFRESH_MAXDELAY'))\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \\\n not isinstance(response, HtmlResponse):\n return response\n\n interval, url = get_meta_refresh(response)\n if url and interval < self._maxdelay:\n redirected = self._redirect_request_using_get(request, url)\n return self._redirect(redirected, request, spider, 'meta refresh')\n\n return response\n", "path": "scrapy/downloadermiddlewares/redirect.py"}]} | 1,400 | 251 |
gh_patches_debug_49170 | rasdani/github-patches | git_diff | mkdocs__mkdocs-398 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
jinja2.exceptions.UndefinedError: 'meta' is undefined
**jinja2.exceptions.UndefinedError: 'meta' is undefined**
Copying themes `bootstrap` to project folder named: `mybootstrap`
only .yml entry
```
site_name: The website
theme_dir: 'mybootstrap'
```
ERROR
```
$ mkdocs serve
Traceback (most recent call last):
File "/usr/bin/mkdocs", line 9, in <module>
load_entry_point('mkdocs==0.11.1', 'console_scripts', 'mkdocs')()
File "/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/main.py", line 74, in run_main
main(cmd, args=sys.argv[2:], options=dict(opts))
File "/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/main.py", line 46, in main
serve(config, options=options)
File "/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/serve.py", line 85, in serve
build(config, live_server=True)
File "/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/build.py", line 223, in build
build_pages(config)
File "/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/build.py", line 155, in build_pages
build_404(config, env, site_navigation)
File "/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/build.py", line 142, in build_404
output_content = template.render(global_context)
File "/usr/lib/python3.4/site-packages/jinja2/environment.py", line 969, in render
return self.environment.handle_exception(exc_info, True)
File "/usr/lib/python3.4/site-packages/jinja2/environment.py", line 742, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python3.4/site-packages/jinja2/_compat.py", line 36, in reraise
raise value.with_traceback(tb)
File "/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/themes/mkdocs/404.html", line 1, in <module>
{% extends "base.html" %}
File "mybootstrap/base.html", line 48, in <module>
<div class="col-md-9" role="main">{% include "content.html" %}</div>
File "mybootstrap/content.html", line 1, in <module>
{% if meta.source %}
File "/usr/lib/python3.4/site-packages/jinja2/environment.py", line 397, in getattr
return getattr(obj, attribute)
jinja2.exceptions.UndefinedError: 'meta' is undefined
```
Patch: https://github.com/tomchristie/mkdocs/pull/244 seems not to help for this case
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/build.py`
Content:
```
1 # coding: utf-8
2 from __future__ import print_function
3
4 from datetime import datetime
5
6 from jinja2.exceptions import TemplateNotFound
7 import mkdocs
8 from mkdocs import nav, toc, utils
9 from mkdocs.compat import urljoin, PY2
10 from mkdocs.relative_path_ext import RelativePathExtension
11 import jinja2
12 import json
13 import markdown
14 import os
15 import logging
16
17 log = logging.getLogger('mkdocs')
18
19
20 def convert_markdown(markdown_source, site_navigation=None, extensions=(), strict=False):
21 """
22 Convert the Markdown source file to HTML content, and additionally
23 return the parsed table of contents, and a dictionary of any metadata
24 that was specified in the Markdown file.
25
26 `extensions` is an optional sequence of Python Markdown extensions to add
27 to the default set.
28 """
29
30 # Generate the HTML from the markdown source
31 builtin_extensions = ['meta', 'toc', 'tables', 'fenced_code']
32 mkdocs_extensions = [RelativePathExtension(site_navigation, strict), ]
33 extensions = builtin_extensions + mkdocs_extensions + list(extensions)
34 md = markdown.Markdown(
35 extensions=extensions
36 )
37 html_content = md.convert(markdown_source)
38 meta = md.Meta
39 toc_html = md.toc
40
41 # Post process the generated table of contents into a data structure
42 table_of_contents = toc.TableOfContents(toc_html)
43
44 return (html_content, table_of_contents, meta)
45
46
47 def get_global_context(nav, config):
48 """
49 Given the SiteNavigation and config, generate the context which is relevant
50 to app pages.
51 """
52
53 site_name = config['site_name']
54
55 if config['site_favicon']:
56 site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])
57 else:
58 site_favicon = None
59
60 page_description = config['site_description']
61
62 extra_javascript = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])
63
64 extra_css = utils.create_media_urls(nav=nav, url_list=config['extra_css'])
65
66 return {
67 'site_name': site_name,
68 'site_author': config['site_author'],
69 'favicon': site_favicon,
70 'page_description': page_description,
71
72 # Note that there's intentionally repetition here. Rather than simply
73 # provide the config dictionary we instead pass everything explicitly.
74 #
75 # This helps ensure that we can throughly document the context that
76 # gets passed to themes.
77 'repo_url': config['repo_url'],
78 'repo_name': config['repo_name'],
79 'nav': nav,
80 'base_url': nav.url_context.make_relative('/'),
81 'homepage_url': nav.homepage.url,
82
83 'extra_css': extra_css,
84 'extra_javascript': extra_javascript,
85
86 'include_nav': config['include_nav'],
87 'include_next_prev': config['include_next_prev'],
88 'include_search': config['include_search'],
89
90 'copyright': config['copyright'],
91 'google_analytics': config['google_analytics'],
92
93 'mkdocs_version': mkdocs.__version__,
94 'build_date_utc': datetime.utcnow()
95 }
96
97
98 def get_page_context(page, content, nav, toc, meta, config):
99 """
100 Generate the page context by extending the global context and adding page
101 specific variables.
102 """
103
104 if page.is_homepage or page.title is None:
105 page_title = None
106 else:
107 page_title = page.title
108
109 if page.is_homepage:
110 page_description = config['site_description']
111 else:
112 page_description = None
113
114 if config['site_url']:
115 base = config['site_url']
116 if not base.endswith('/'):
117 base += '/'
118 canonical_url = urljoin(base, page.abs_url.lstrip('/'))
119 else:
120 canonical_url = None
121
122 return {
123 'page_title': page_title,
124 'page_description': page_description,
125
126 'content': content,
127 'toc': toc,
128 'meta': meta,
129
130
131 'canonical_url': canonical_url,
132
133 'current_page': page,
134 'previous_page': page.previous_page,
135 'next_page': page.next_page,
136 }
137
138
139 def build_404(config, env, site_navigation):
140
141 try:
142 template = env.get_template('404.html')
143 except TemplateNotFound:
144 return
145
146 global_context = get_global_context(site_navigation, config)
147
148 output_content = template.render(global_context)
149 output_path = os.path.join(config['site_dir'], '404.html')
150 utils.write_file(output_content.encode('utf-8'), output_path)
151
152
153 def build_pages(config, dump_json=False):
154 """
155 Builds all the pages and writes them into the build directory.
156 """
157 site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls'])
158 loader = jinja2.FileSystemLoader(config['theme_dir'])
159 env = jinja2.Environment(loader=loader)
160
161 build_404(config, env, site_navigation)
162
163 for page in site_navigation.walk_pages():
164 # Read the input file
165 input_path = os.path.join(config['docs_dir'], page.input_path)
166
167 try:
168 input_content = open(input_path, 'r').read()
169 except IOError:
170 log.error('file not found: %s' % input_path)
171 continue
172
173 if PY2:
174 input_content = input_content.decode('utf-8')
175
176 # Process the markdown text
177 html_content, table_of_contents, meta = convert_markdown(
178 input_content, site_navigation,
179 extensions=config['markdown_extensions'], strict=config['strict']
180 )
181
182 context = get_global_context(site_navigation, config)
183 context.update(get_page_context(
184 page, html_content, site_navigation,
185 table_of_contents, meta, config
186 ))
187
188 # Allow 'template:' override in md source files.
189 if 'template' in meta:
190 template = env.get_template(meta['template'][0])
191 else:
192 template = env.get_template('base.html')
193
194 # Render the template.
195 output_content = template.render(context)
196
197 # Write the output file.
198 output_path = os.path.join(config['site_dir'], page.output_path)
199 if dump_json:
200 json_context = {
201 'content': context['content'],
202 'title': context['current_page'].title,
203 'url': context['current_page'].abs_url,
204 'language': 'en',
205 }
206 utils.write_file(json.dumps(json_context, indent=4).encode('utf-8'), output_path.replace('.html', '.json'))
207 else:
208 utils.write_file(output_content.encode('utf-8'), output_path)
209
210
211 def build(config, live_server=False, dump_json=False, clean_site_dir=False):
212 """
213 Perform a full site build.
214 """
215 if clean_site_dir:
216 print("Cleaning site directory")
217 utils.clean_directory(config['site_dir'])
218 if not live_server:
219 print("Building documentation to directory: %s" % config['site_dir'])
220 if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']):
221 print("Directory %s contains stale files. Use --clean to remove them." % config['site_dir'])
222
223 if dump_json:
224 build_pages(config, dump_json=True)
225 else:
226 # Reversed as we want to take the media files from the builtin theme
227 # and then from the custom theme_dir so the custom versions take take
228 # precedence.
229 for theme_dir in reversed(config['theme_dir']):
230 utils.copy_media_files(theme_dir, config['site_dir'])
231 utils.copy_media_files(config['docs_dir'], config['site_dir'])
232 build_pages(config)
233
234
235 def site_directory_contains_stale_files(site_directory):
236 """
237 Check if the site directory contains stale files from a previous build.
238 Right now the check returns true if the directory is not empty.
239 A more sophisticated approach should be found to trigger only if there are
240 files that won't be overwritten anyway.
241 """
242 if os.path.exists(site_directory):
243 if os.listdir(site_directory):
244 return True
245 return False
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/build.py b/mkdocs/build.py
--- a/mkdocs/build.py
+++ b/mkdocs/build.py
@@ -35,8 +35,11 @@
extensions=extensions
)
html_content = md.convert(markdown_source)
- meta = md.Meta
- toc_html = md.toc
+
+ # On completely blank markdown files, no Meta or tox properties are added
+ # to the generated document.
+ meta = getattr(md, 'Meta', {})
+ toc_html = getattr(md, 'toc', '')
# Post process the generated table of contents into a data structure
table_of_contents = toc.TableOfContents(toc_html)
| {"golden_diff": "diff --git a/mkdocs/build.py b/mkdocs/build.py\n--- a/mkdocs/build.py\n+++ b/mkdocs/build.py\n@@ -35,8 +35,11 @@\n extensions=extensions\n )\n html_content = md.convert(markdown_source)\n- meta = md.Meta\n- toc_html = md.toc\n+\n+ # On completely blank markdown files, no Meta or tox properties are added\n+ # to the generated document.\n+ meta = getattr(md, 'Meta', {})\n+ toc_html = getattr(md, 'toc', '')\n \n # Post process the generated table of contents into a data structure\n table_of_contents = toc.TableOfContents(toc_html)\n", "issue": "jinja2.exceptions.UndefinedError: 'meta' is undefined\n**jinja2.exceptions.UndefinedError: 'meta' is undefined**\n\nCopying themes `bootstrap` to project folder named: `mybootstrap`\n\nonly .yml entry\n\n```\nsite_name: The website\ntheme_dir: 'mybootstrap'\n```\n\nERROR\n\n```\n$ mkdocs serve\nTraceback (most recent call last):\n File \"/usr/bin/mkdocs\", line 9, in <module>\n load_entry_point('mkdocs==0.11.1', 'console_scripts', 'mkdocs')()\n File \"/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/main.py\", line 74, in run_main\n main(cmd, args=sys.argv[2:], options=dict(opts))\n File \"/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/main.py\", line 46, in main\n serve(config, options=options)\n File \"/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/serve.py\", line 85, in serve\n build(config, live_server=True)\n File \"/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/build.py\", line 223, in build\n build_pages(config)\n File \"/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/build.py\", line 155, in build_pages\n build_404(config, env, site_navigation)\n File \"/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/build.py\", line 142, in build_404\n output_content = template.render(global_context)\n File \"/usr/lib/python3.4/site-packages/jinja2/environment.py\", line 969, in render\n return self.environment.handle_exception(exc_info, True)\n File \"/usr/lib/python3.4/site-packages/jinja2/environment.py\", line 742, in handle_exception\n reraise(exc_type, exc_value, tb)\n File \"/usr/lib/python3.4/site-packages/jinja2/_compat.py\", line 36, in reraise\n raise value.with_traceback(tb)\n File \"/usr/lib/python3.4/site-packages/mkdocs-0.11.1-py3.4.egg/mkdocs/themes/mkdocs/404.html\", line 1, in <module>\n {% extends \"base.html\" %}\n File \"mybootstrap/base.html\", line 48, in <module>\n <div class=\"col-md-9\" role=\"main\">{% include \"content.html\" %}</div>\n File \"mybootstrap/content.html\", line 1, in <module>\n {% if meta.source %}\n File \"/usr/lib/python3.4/site-packages/jinja2/environment.py\", line 397, in getattr\n return getattr(obj, attribute)\njinja2.exceptions.UndefinedError: 'meta' is undefined\n\n```\n\nPatch: https://github.com/tomchristie/mkdocs/pull/244 seems not to help for this case\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\n\nfrom datetime import datetime\n\nfrom jinja2.exceptions import TemplateNotFound\nimport mkdocs\nfrom mkdocs import nav, toc, utils\nfrom mkdocs.compat import urljoin, PY2\nfrom mkdocs.relative_path_ext import RelativePathExtension\nimport jinja2\nimport json\nimport markdown\nimport os\nimport logging\n\nlog = logging.getLogger('mkdocs')\n\n\ndef convert_markdown(markdown_source, site_navigation=None, extensions=(), strict=False):\n \"\"\"\n Convert the Markdown source file to HTML content, and additionally\n return the parsed table of contents, and a dictionary of any metadata\n that was specified in the Markdown file.\n\n `extensions` is an optional sequence of Python Markdown extensions to add\n to the default set.\n \"\"\"\n\n # Generate the HTML from the markdown source\n builtin_extensions = ['meta', 'toc', 'tables', 'fenced_code']\n mkdocs_extensions = [RelativePathExtension(site_navigation, strict), ]\n extensions = builtin_extensions + mkdocs_extensions + list(extensions)\n md = markdown.Markdown(\n extensions=extensions\n )\n html_content = md.convert(markdown_source)\n meta = md.Meta\n toc_html = md.toc\n\n # Post process the generated table of contents into a data structure\n table_of_contents = toc.TableOfContents(toc_html)\n\n return (html_content, table_of_contents, meta)\n\n\ndef get_global_context(nav, config):\n \"\"\"\n Given the SiteNavigation and config, generate the context which is relevant\n to app pages.\n \"\"\"\n\n site_name = config['site_name']\n\n if config['site_favicon']:\n site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])\n else:\n site_favicon = None\n\n page_description = config['site_description']\n\n extra_javascript = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])\n\n extra_css = utils.create_media_urls(nav=nav, url_list=config['extra_css'])\n\n return {\n 'site_name': site_name,\n 'site_author': config['site_author'],\n 'favicon': site_favicon,\n 'page_description': page_description,\n\n # Note that there's intentionally repetition here. Rather than simply\n # provide the config dictionary we instead pass everything explicitly.\n #\n # This helps ensure that we can throughly document the context that\n # gets passed to themes.\n 'repo_url': config['repo_url'],\n 'repo_name': config['repo_name'],\n 'nav': nav,\n 'base_url': nav.url_context.make_relative('/'),\n 'homepage_url': nav.homepage.url,\n\n 'extra_css': extra_css,\n 'extra_javascript': extra_javascript,\n\n 'include_nav': config['include_nav'],\n 'include_next_prev': config['include_next_prev'],\n 'include_search': config['include_search'],\n\n 'copyright': config['copyright'],\n 'google_analytics': config['google_analytics'],\n\n 'mkdocs_version': mkdocs.__version__,\n 'build_date_utc': datetime.utcnow()\n }\n\n\ndef get_page_context(page, content, nav, toc, meta, config):\n \"\"\"\n Generate the page context by extending the global context and adding page\n specific variables.\n \"\"\"\n\n if page.is_homepage or page.title is None:\n page_title = None\n else:\n page_title = page.title\n\n if page.is_homepage:\n page_description = config['site_description']\n else:\n page_description = None\n\n if config['site_url']:\n base = config['site_url']\n if not base.endswith('/'):\n base += '/'\n canonical_url = urljoin(base, page.abs_url.lstrip('/'))\n else:\n canonical_url = None\n\n return {\n 'page_title': page_title,\n 'page_description': page_description,\n\n 'content': content,\n 'toc': toc,\n 'meta': meta,\n\n\n 'canonical_url': canonical_url,\n\n 'current_page': page,\n 'previous_page': page.previous_page,\n 'next_page': page.next_page,\n }\n\n\ndef build_404(config, env, site_navigation):\n\n try:\n template = env.get_template('404.html')\n except TemplateNotFound:\n return\n\n global_context = get_global_context(site_navigation, config)\n\n output_content = template.render(global_context)\n output_path = os.path.join(config['site_dir'], '404.html')\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n\ndef build_pages(config, dump_json=False):\n \"\"\"\n Builds all the pages and writes them into the build directory.\n \"\"\"\n site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls'])\n loader = jinja2.FileSystemLoader(config['theme_dir'])\n env = jinja2.Environment(loader=loader)\n\n build_404(config, env, site_navigation)\n\n for page in site_navigation.walk_pages():\n # Read the input file\n input_path = os.path.join(config['docs_dir'], page.input_path)\n\n try:\n input_content = open(input_path, 'r').read()\n except IOError:\n log.error('file not found: %s' % input_path)\n continue\n\n if PY2:\n input_content = input_content.decode('utf-8')\n\n # Process the markdown text\n html_content, table_of_contents, meta = convert_markdown(\n input_content, site_navigation,\n extensions=config['markdown_extensions'], strict=config['strict']\n )\n\n context = get_global_context(site_navigation, config)\n context.update(get_page_context(\n page, html_content, site_navigation,\n table_of_contents, meta, config\n ))\n\n # Allow 'template:' override in md source files.\n if 'template' in meta:\n template = env.get_template(meta['template'][0])\n else:\n template = env.get_template('base.html')\n\n # Render the template.\n output_content = template.render(context)\n\n # Write the output file.\n output_path = os.path.join(config['site_dir'], page.output_path)\n if dump_json:\n json_context = {\n 'content': context['content'],\n 'title': context['current_page'].title,\n 'url': context['current_page'].abs_url,\n 'language': 'en',\n }\n utils.write_file(json.dumps(json_context, indent=4).encode('utf-8'), output_path.replace('.html', '.json'))\n else:\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n\ndef build(config, live_server=False, dump_json=False, clean_site_dir=False):\n \"\"\"\n Perform a full site build.\n \"\"\"\n if clean_site_dir:\n print(\"Cleaning site directory\")\n utils.clean_directory(config['site_dir'])\n if not live_server:\n print(\"Building documentation to directory: %s\" % config['site_dir'])\n if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']):\n print(\"Directory %s contains stale files. Use --clean to remove them.\" % config['site_dir'])\n\n if dump_json:\n build_pages(config, dump_json=True)\n else:\n # Reversed as we want to take the media files from the builtin theme\n # and then from the custom theme_dir so the custom versions take take\n # precedence.\n for theme_dir in reversed(config['theme_dir']):\n utils.copy_media_files(theme_dir, config['site_dir'])\n utils.copy_media_files(config['docs_dir'], config['site_dir'])\n build_pages(config)\n\n\ndef site_directory_contains_stale_files(site_directory):\n \"\"\"\n Check if the site directory contains stale files from a previous build.\n Right now the check returns true if the directory is not empty.\n A more sophisticated approach should be found to trigger only if there are\n files that won't be overwritten anyway.\n \"\"\"\n if os.path.exists(site_directory):\n if os.listdir(site_directory):\n return True\n return False\n", "path": "mkdocs/build.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\n\nfrom datetime import datetime\n\nfrom jinja2.exceptions import TemplateNotFound\nimport mkdocs\nfrom mkdocs import nav, toc, utils\nfrom mkdocs.compat import urljoin, PY2\nfrom mkdocs.relative_path_ext import RelativePathExtension\nimport jinja2\nimport json\nimport markdown\nimport os\nimport logging\n\nlog = logging.getLogger('mkdocs')\n\n\ndef convert_markdown(markdown_source, site_navigation=None, extensions=(), strict=False):\n \"\"\"\n Convert the Markdown source file to HTML content, and additionally\n return the parsed table of contents, and a dictionary of any metadata\n that was specified in the Markdown file.\n\n `extensions` is an optional sequence of Python Markdown extensions to add\n to the default set.\n \"\"\"\n\n # Generate the HTML from the markdown source\n builtin_extensions = ['meta', 'toc', 'tables', 'fenced_code']\n mkdocs_extensions = [RelativePathExtension(site_navigation, strict), ]\n extensions = builtin_extensions + mkdocs_extensions + list(extensions)\n md = markdown.Markdown(\n extensions=extensions\n )\n html_content = md.convert(markdown_source)\n\n # On completely blank markdown files, no Meta or tox properties are added\n # to the generated document.\n meta = getattr(md, 'Meta', {})\n toc_html = getattr(md, 'toc', '')\n\n # Post process the generated table of contents into a data structure\n table_of_contents = toc.TableOfContents(toc_html)\n\n return (html_content, table_of_contents, meta)\n\n\ndef get_global_context(nav, config):\n \"\"\"\n Given the SiteNavigation and config, generate the context which is relevant\n to app pages.\n \"\"\"\n\n site_name = config['site_name']\n\n if config['site_favicon']:\n site_favicon = nav.url_context.make_relative('/' + config['site_favicon'])\n else:\n site_favicon = None\n\n page_description = config['site_description']\n\n extra_javascript = utils.create_media_urls(nav=nav, url_list=config['extra_javascript'])\n\n extra_css = utils.create_media_urls(nav=nav, url_list=config['extra_css'])\n\n return {\n 'site_name': site_name,\n 'site_author': config['site_author'],\n 'favicon': site_favicon,\n 'page_description': page_description,\n\n # Note that there's intentionally repetition here. Rather than simply\n # provide the config dictionary we instead pass everything explicitly.\n #\n # This helps ensure that we can throughly document the context that\n # gets passed to themes.\n 'repo_url': config['repo_url'],\n 'repo_name': config['repo_name'],\n 'nav': nav,\n 'base_url': nav.url_context.make_relative('/'),\n 'homepage_url': nav.homepage.url,\n\n 'extra_css': extra_css,\n 'extra_javascript': extra_javascript,\n\n 'include_nav': config['include_nav'],\n 'include_next_prev': config['include_next_prev'],\n 'include_search': config['include_search'],\n\n 'copyright': config['copyright'],\n 'google_analytics': config['google_analytics'],\n\n 'mkdocs_version': mkdocs.__version__,\n 'build_date_utc': datetime.utcnow()\n }\n\n\ndef get_page_context(page, content, nav, toc, meta, config):\n \"\"\"\n Generate the page context by extending the global context and adding page\n specific variables.\n \"\"\"\n\n if page.is_homepage or page.title is None:\n page_title = None\n else:\n page_title = page.title\n\n if page.is_homepage:\n page_description = config['site_description']\n else:\n page_description = None\n\n if config['site_url']:\n base = config['site_url']\n if not base.endswith('/'):\n base += '/'\n canonical_url = urljoin(base, page.abs_url.lstrip('/'))\n else:\n canonical_url = None\n\n return {\n 'page_title': page_title,\n 'page_description': page_description,\n\n 'content': content,\n 'toc': toc,\n 'meta': meta,\n\n\n 'canonical_url': canonical_url,\n\n 'current_page': page,\n 'previous_page': page.previous_page,\n 'next_page': page.next_page,\n }\n\n\ndef build_404(config, env, site_navigation):\n\n try:\n template = env.get_template('404.html')\n except TemplateNotFound:\n return\n\n global_context = get_global_context(site_navigation, config)\n\n output_content = template.render(global_context)\n output_path = os.path.join(config['site_dir'], '404.html')\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n\ndef build_pages(config, dump_json=False):\n \"\"\"\n Builds all the pages and writes them into the build directory.\n \"\"\"\n site_navigation = nav.SiteNavigation(config['pages'], config['use_directory_urls'])\n loader = jinja2.FileSystemLoader(config['theme_dir'])\n env = jinja2.Environment(loader=loader)\n\n build_404(config, env, site_navigation)\n\n for page in site_navigation.walk_pages():\n # Read the input file\n input_path = os.path.join(config['docs_dir'], page.input_path)\n\n try:\n input_content = open(input_path, 'r').read()\n except IOError:\n log.error('file not found: %s' % input_path)\n continue\n\n if PY2:\n input_content = input_content.decode('utf-8')\n\n # Process the markdown text\n html_content, table_of_contents, meta = convert_markdown(\n input_content, site_navigation,\n extensions=config['markdown_extensions'], strict=config['strict']\n )\n\n context = get_global_context(site_navigation, config)\n context.update(get_page_context(\n page, html_content, site_navigation,\n table_of_contents, meta, config\n ))\n\n # Allow 'template:' override in md source files.\n if 'template' in meta:\n template = env.get_template(meta['template'][0])\n else:\n template = env.get_template('base.html')\n\n # Render the template.\n output_content = template.render(context)\n\n # Write the output file.\n output_path = os.path.join(config['site_dir'], page.output_path)\n if dump_json:\n json_context = {\n 'content': context['content'],\n 'title': context['current_page'].title,\n 'url': context['current_page'].abs_url,\n 'language': 'en',\n }\n utils.write_file(json.dumps(json_context, indent=4).encode('utf-8'), output_path.replace('.html', '.json'))\n else:\n utils.write_file(output_content.encode('utf-8'), output_path)\n\n\ndef build(config, live_server=False, dump_json=False, clean_site_dir=False):\n \"\"\"\n Perform a full site build.\n \"\"\"\n if clean_site_dir:\n print(\"Cleaning site directory\")\n utils.clean_directory(config['site_dir'])\n if not live_server:\n print(\"Building documentation to directory: %s\" % config['site_dir'])\n if not clean_site_dir and site_directory_contains_stale_files(config['site_dir']):\n print(\"Directory %s contains stale files. Use --clean to remove them.\" % config['site_dir'])\n\n if dump_json:\n build_pages(config, dump_json=True)\n else:\n # Reversed as we want to take the media files from the builtin theme\n # and then from the custom theme_dir so the custom versions take take\n # precedence.\n for theme_dir in reversed(config['theme_dir']):\n utils.copy_media_files(theme_dir, config['site_dir'])\n utils.copy_media_files(config['docs_dir'], config['site_dir'])\n build_pages(config)\n\n\ndef site_directory_contains_stale_files(site_directory):\n \"\"\"\n Check if the site directory contains stale files from a previous build.\n Right now the check returns true if the directory is not empty.\n A more sophisticated approach should be found to trigger only if there are\n files that won't be overwritten anyway.\n \"\"\"\n if os.path.exists(site_directory):\n if os.listdir(site_directory):\n return True\n return False\n", "path": "mkdocs/build.py"}]} | 3,381 | 155 |
gh_patches_debug_10728 | rasdani/github-patches | git_diff | celery__kombu-790 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No proxy support using SQS
The async HTTP code is not allowing the proxy environment variables to be used by libcurl
It seems to be due to this code in `async/http/curl.py`
```
if request.proxy_host:
if not request.proxy_port:
raise ValueError('Request with proxy_host but no proxy_port')
setopt(_pycurl.PROXY, request.proxy_host)
setopt(_pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
setopt(_pycurl.PROXYUSERPWD, '{0}:{1}'.format(
request.proxy_username, request.proxy_password or ''))
else:
setopt(_pycurl.PROXY, '')
curl.unsetopt(_pycurl.PROXYUSERPWD)
```
Notice that if the original request does not specify a `proxy_host` then it actually sets it to empty in the curl OPTs which makes libcurl NOT read the environment for it either.
And the caller in this case is the SQS transport which does NOT pass any proxy setting in either so when using SQS there doesn't appear to be a way to make Kombu use a proxy for the async HTTP.
(Note: The calls through Boto DO work because Boto reads it from the environment variables by default)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kombu/asynchronous/http/curl.py`
Content:
```
1 """HTTP Client using pyCurl."""
2
3 from collections import deque
4 from functools import partial
5 from io import BytesIO
6 from time import time
7
8 from kombu.asynchronous.hub import READ, WRITE, get_event_loop
9 from kombu.exceptions import HttpError
10 from kombu.utils.encoding import bytes_to_str
11
12 from .base import BaseClient
13
14 try:
15 import pycurl
16 except ImportError: # pragma: no cover
17 pycurl = Curl = METH_TO_CURL = None
18 else:
19 from pycurl import Curl
20
21 METH_TO_CURL = {
22 'GET': pycurl.HTTPGET,
23 'POST': pycurl.POST,
24 'PUT': pycurl.UPLOAD,
25 'HEAD': pycurl.NOBODY,
26 }
27
28 __all__ = ('CurlClient',)
29
30 DEFAULT_USER_AGENT = 'Mozilla/5.0 (compatible; pycurl)'
31 EXTRA_METHODS = frozenset(['DELETE', 'OPTIONS', 'PATCH'])
32
33
34 class CurlClient(BaseClient):
35 """Curl HTTP Client."""
36
37 Curl = Curl
38
39 def __init__(self, hub=None, max_clients=10):
40 if pycurl is None:
41 raise ImportError('The curl client requires the pycurl library.')
42 hub = hub or get_event_loop()
43 super().__init__(hub)
44 self.max_clients = max_clients
45
46 self._multi = pycurl.CurlMulti()
47 self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
48 self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
49 self._curls = [self.Curl() for i in range(max_clients)]
50 self._free_list = self._curls[:]
51 self._pending = deque()
52 self._fds = {}
53
54 self._socket_action = self._multi.socket_action
55 self._timeout_check_tref = self.hub.call_repeatedly(
56 1.0, self._timeout_check,
57 )
58
59 # pycurl 7.29.0 workaround
60 dummy_curl_handle = pycurl.Curl()
61 self._multi.add_handle(dummy_curl_handle)
62 self._multi.remove_handle(dummy_curl_handle)
63
64 def close(self):
65 self._timeout_check_tref.cancel()
66 for _curl in self._curls:
67 _curl.close()
68 self._multi.close()
69
70 def add_request(self, request):
71 self._pending.append(request)
72 self._process_queue()
73 self._set_timeout(0)
74 return request
75
76 # the next two methods are used for linux/epoll workaround:
77 # we temporarily remove all curl fds from hub, so curl cannot
78 # close a fd which is still inside epoll
79 def _pop_from_hub(self):
80 for fd in self._fds:
81 self.hub.remove(fd)
82
83 def _push_to_hub(self):
84 for fd, events in self._fds.items():
85 if events & READ:
86 self.hub.add_reader(fd, self.on_readable, fd)
87 if events & WRITE:
88 self.hub.add_writer(fd, self.on_writable, fd)
89
90 def _handle_socket(self, event, fd, multi, data, _pycurl=pycurl):
91 if event == _pycurl.POLL_REMOVE:
92 if fd in self._fds:
93 self._fds.pop(fd, None)
94 else:
95 if event == _pycurl.POLL_IN:
96 self._fds[fd] = READ
97 elif event == _pycurl.POLL_OUT:
98 self._fds[fd] = WRITE
99 elif event == _pycurl.POLL_INOUT:
100 self._fds[fd] = READ | WRITE
101
102 def _set_timeout(self, msecs):
103 self.hub.call_later(msecs, self._timeout_check)
104
105 def _timeout_check(self, _pycurl=pycurl):
106 self._pop_from_hub()
107 try:
108 while 1:
109 try:
110 ret, _ = self._multi.socket_all()
111 except pycurl.error as exc:
112 ret = exc.args[0]
113 if ret != _pycurl.E_CALL_MULTI_PERFORM:
114 break
115 finally:
116 self._push_to_hub()
117 self._process_pending_requests()
118
119 def on_readable(self, fd, _pycurl=pycurl):
120 return self._on_event(fd, _pycurl.CSELECT_IN)
121
122 def on_writable(self, fd, _pycurl=pycurl):
123 return self._on_event(fd, _pycurl.CSELECT_OUT)
124
125 def _on_event(self, fd, event, _pycurl=pycurl):
126 self._pop_from_hub()
127 try:
128 while 1:
129 try:
130 ret, _ = self._socket_action(fd, event)
131 except pycurl.error as exc:
132 ret = exc.args[0]
133 if ret != _pycurl.E_CALL_MULTI_PERFORM:
134 break
135 finally:
136 self._push_to_hub()
137 self._process_pending_requests()
138
139 def _process_pending_requests(self):
140 while 1:
141 q, succeeded, failed = self._multi.info_read()
142 for curl in succeeded:
143 self._process(curl)
144 for curl, errno, reason in failed:
145 self._process(curl, errno, reason)
146 if q == 0:
147 break
148 self._process_queue()
149
150 def _process_queue(self):
151 while 1:
152 started = 0
153 while self._free_list and self._pending:
154 started += 1
155 curl = self._free_list.pop()
156 request = self._pending.popleft()
157 headers = self.Headers()
158 buf = BytesIO()
159 curl.info = {
160 'headers': headers,
161 'buffer': buf,
162 'request': request,
163 'curl_start_time': time(),
164 }
165 self._setup_request(curl, request, buf, headers)
166 self._multi.add_handle(curl)
167 if not started:
168 break
169
170 def _process(self, curl, errno=None, reason=None, _pycurl=pycurl):
171 info, curl.info = curl.info, None
172 self._multi.remove_handle(curl)
173 self._free_list.append(curl)
174 buffer = info['buffer']
175 if errno:
176 code = 599
177 error = HttpError(code, reason)
178 error.errno = errno
179 effective_url = None
180 buffer.close()
181 buffer = None
182 else:
183 error = None
184 code = curl.getinfo(_pycurl.HTTP_CODE)
185 effective_url = curl.getinfo(_pycurl.EFFECTIVE_URL)
186 buffer.seek(0)
187 # try:
188 request = info['request']
189 request.on_ready(self.Response(
190 request=request, code=code, headers=info['headers'],
191 buffer=buffer, effective_url=effective_url, error=error,
192 ))
193
194 def _setup_request(self, curl, request, buffer, headers, _pycurl=pycurl):
195 setopt = curl.setopt
196 setopt(_pycurl.URL, bytes_to_str(request.url))
197
198 # see tornado curl client
199 request.headers.setdefault('Expect', '')
200 request.headers.setdefault('Pragma', '')
201
202 setopt(
203 _pycurl.HTTPHEADER,
204 ['{}: {}'.format(*h) for h in request.headers.items()],
205 )
206
207 setopt(
208 _pycurl.HEADERFUNCTION,
209 partial(request.on_header or self.on_header, request.headers),
210 )
211 setopt(
212 _pycurl.WRITEFUNCTION, request.on_stream or buffer.write,
213 )
214 setopt(
215 _pycurl.FOLLOWLOCATION, request.follow_redirects,
216 )
217 setopt(
218 _pycurl.USERAGENT,
219 bytes_to_str(request.user_agent or DEFAULT_USER_AGENT),
220 )
221 if request.network_interface:
222 setopt(_pycurl.INTERFACE, request.network_interface)
223 setopt(
224 _pycurl.ENCODING, 'gzip,deflate' if request.use_gzip else 'none',
225 )
226 if request.proxy_host:
227 if not request.proxy_port:
228 raise ValueError('Request with proxy_host but no proxy_port')
229 setopt(_pycurl.PROXY, request.proxy_host)
230 setopt(_pycurl.PROXYPORT, request.proxy_port)
231 if request.proxy_username:
232 setopt(_pycurl.PROXYUSERPWD, '{}:{}'.format(
233 request.proxy_username, request.proxy_password or ''))
234 else:
235 setopt(_pycurl.PROXY, '')
236 curl.unsetopt(_pycurl.PROXYUSERPWD)
237
238 setopt(_pycurl.SSL_VERIFYPEER, 1 if request.validate_cert else 0)
239 setopt(_pycurl.SSL_VERIFYHOST, 2 if request.validate_cert else 0)
240 if request.ca_certs is not None:
241 setopt(_pycurl.CAINFO, request.ca_certs)
242
243 setopt(_pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
244
245 for meth in METH_TO_CURL.values():
246 setopt(meth, False)
247 try:
248 meth = METH_TO_CURL[request.method]
249 except KeyError:
250 curl.setopt(_pycurl.CUSTOMREQUEST, request.method)
251 else:
252 curl.unsetopt(_pycurl.CUSTOMREQUEST)
253 setopt(meth, True)
254
255 if request.method in ('POST', 'PUT'):
256 body = request.body.encode('utf-8') if request.body else bytes()
257 reqbuffer = BytesIO(body)
258 setopt(_pycurl.READFUNCTION, reqbuffer.read)
259 if request.method == 'POST':
260
261 def ioctl(cmd):
262 if cmd == _pycurl.IOCMD_RESTARTREAD:
263 reqbuffer.seek(0)
264 setopt(_pycurl.IOCTLFUNCTION, ioctl)
265 setopt(_pycurl.POSTFIELDSIZE, len(body))
266 else:
267 setopt(_pycurl.INFILESIZE, len(body))
268 elif request.method == 'GET':
269 assert not request.body
270
271 if request.auth_username is not None:
272 auth_mode = {
273 'basic': _pycurl.HTTPAUTH_BASIC,
274 'digest': _pycurl.HTTPAUTH_DIGEST
275 }[request.auth_mode or 'basic']
276 setopt(_pycurl.HTTPAUTH, auth_mode)
277 userpwd = '{}:{}'.format(
278 request.auth_username, request.auth_password or '',
279 )
280 setopt(_pycurl.USERPWD, userpwd)
281 else:
282 curl.unsetopt(_pycurl.USERPWD)
283
284 if request.client_cert is not None:
285 setopt(_pycurl.SSLCERT, request.client_cert)
286 if request.client_key is not None:
287 setopt(_pycurl.SSLKEY, request.client_key)
288
289 if request.on_prepare is not None:
290 request.on_prepare(curl)
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kombu/asynchronous/http/curl.py b/kombu/asynchronous/http/curl.py
--- a/kombu/asynchronous/http/curl.py
+++ b/kombu/asynchronous/http/curl.py
@@ -231,9 +231,6 @@
if request.proxy_username:
setopt(_pycurl.PROXYUSERPWD, '{}:{}'.format(
request.proxy_username, request.proxy_password or ''))
- else:
- setopt(_pycurl.PROXY, '')
- curl.unsetopt(_pycurl.PROXYUSERPWD)
setopt(_pycurl.SSL_VERIFYPEER, 1 if request.validate_cert else 0)
setopt(_pycurl.SSL_VERIFYHOST, 2 if request.validate_cert else 0)
| {"golden_diff": "diff --git a/kombu/asynchronous/http/curl.py b/kombu/asynchronous/http/curl.py\n--- a/kombu/asynchronous/http/curl.py\n+++ b/kombu/asynchronous/http/curl.py\n@@ -231,9 +231,6 @@\n if request.proxy_username:\n setopt(_pycurl.PROXYUSERPWD, '{}:{}'.format(\n request.proxy_username, request.proxy_password or ''))\n- else:\n- setopt(_pycurl.PROXY, '')\n- curl.unsetopt(_pycurl.PROXYUSERPWD)\n \n setopt(_pycurl.SSL_VERIFYPEER, 1 if request.validate_cert else 0)\n setopt(_pycurl.SSL_VERIFYHOST, 2 if request.validate_cert else 0)\n", "issue": "No proxy support using SQS\nThe async HTTP code is not allowing the proxy environment variables to be used by libcurl\r\n\r\nIt seems to be due to this code in `async/http/curl.py`\r\n\r\n```\r\n if request.proxy_host:\r\n if not request.proxy_port:\r\n raise ValueError('Request with proxy_host but no proxy_port')\r\n setopt(_pycurl.PROXY, request.proxy_host)\r\n setopt(_pycurl.PROXYPORT, request.proxy_port)\r\n if request.proxy_username:\r\n setopt(_pycurl.PROXYUSERPWD, '{0}:{1}'.format(\r\n request.proxy_username, request.proxy_password or ''))\r\n else:\r\n setopt(_pycurl.PROXY, '')\r\n curl.unsetopt(_pycurl.PROXYUSERPWD)\r\n```\r\nNotice that if the original request does not specify a `proxy_host` then it actually sets it to empty in the curl OPTs which makes libcurl NOT read the environment for it either.\r\n\r\nAnd the caller in this case is the SQS transport which does NOT pass any proxy setting in either so when using SQS there doesn't appear to be a way to make Kombu use a proxy for the async HTTP. \r\n\r\n(Note: The calls through Boto DO work because Boto reads it from the environment variables by default)\n", "before_files": [{"content": "\"\"\"HTTP Client using pyCurl.\"\"\"\n\nfrom collections import deque\nfrom functools import partial\nfrom io import BytesIO\nfrom time import time\n\nfrom kombu.asynchronous.hub import READ, WRITE, get_event_loop\nfrom kombu.exceptions import HttpError\nfrom kombu.utils.encoding import bytes_to_str\n\nfrom .base import BaseClient\n\ntry:\n import pycurl\nexcept ImportError: # pragma: no cover\n pycurl = Curl = METH_TO_CURL = None\nelse:\n from pycurl import Curl\n\n METH_TO_CURL = {\n 'GET': pycurl.HTTPGET,\n 'POST': pycurl.POST,\n 'PUT': pycurl.UPLOAD,\n 'HEAD': pycurl.NOBODY,\n }\n\n__all__ = ('CurlClient',)\n\nDEFAULT_USER_AGENT = 'Mozilla/5.0 (compatible; pycurl)'\nEXTRA_METHODS = frozenset(['DELETE', 'OPTIONS', 'PATCH'])\n\n\nclass CurlClient(BaseClient):\n \"\"\"Curl HTTP Client.\"\"\"\n\n Curl = Curl\n\n def __init__(self, hub=None, max_clients=10):\n if pycurl is None:\n raise ImportError('The curl client requires the pycurl library.')\n hub = hub or get_event_loop()\n super().__init__(hub)\n self.max_clients = max_clients\n\n self._multi = pycurl.CurlMulti()\n self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)\n self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)\n self._curls = [self.Curl() for i in range(max_clients)]\n self._free_list = self._curls[:]\n self._pending = deque()\n self._fds = {}\n\n self._socket_action = self._multi.socket_action\n self._timeout_check_tref = self.hub.call_repeatedly(\n 1.0, self._timeout_check,\n )\n\n # pycurl 7.29.0 workaround\n dummy_curl_handle = pycurl.Curl()\n self._multi.add_handle(dummy_curl_handle)\n self._multi.remove_handle(dummy_curl_handle)\n\n def close(self):\n self._timeout_check_tref.cancel()\n for _curl in self._curls:\n _curl.close()\n self._multi.close()\n\n def add_request(self, request):\n self._pending.append(request)\n self._process_queue()\n self._set_timeout(0)\n return request\n\n # the next two methods are used for linux/epoll workaround:\n # we temporarily remove all curl fds from hub, so curl cannot\n # close a fd which is still inside epoll\n def _pop_from_hub(self):\n for fd in self._fds:\n self.hub.remove(fd)\n\n def _push_to_hub(self):\n for fd, events in self._fds.items():\n if events & READ:\n self.hub.add_reader(fd, self.on_readable, fd)\n if events & WRITE:\n self.hub.add_writer(fd, self.on_writable, fd)\n\n def _handle_socket(self, event, fd, multi, data, _pycurl=pycurl):\n if event == _pycurl.POLL_REMOVE:\n if fd in self._fds:\n self._fds.pop(fd, None)\n else:\n if event == _pycurl.POLL_IN:\n self._fds[fd] = READ\n elif event == _pycurl.POLL_OUT:\n self._fds[fd] = WRITE\n elif event == _pycurl.POLL_INOUT:\n self._fds[fd] = READ | WRITE\n\n def _set_timeout(self, msecs):\n self.hub.call_later(msecs, self._timeout_check)\n\n def _timeout_check(self, _pycurl=pycurl):\n self._pop_from_hub()\n try:\n while 1:\n try:\n ret, _ = self._multi.socket_all()\n except pycurl.error as exc:\n ret = exc.args[0]\n if ret != _pycurl.E_CALL_MULTI_PERFORM:\n break\n finally:\n self._push_to_hub()\n self._process_pending_requests()\n\n def on_readable(self, fd, _pycurl=pycurl):\n return self._on_event(fd, _pycurl.CSELECT_IN)\n\n def on_writable(self, fd, _pycurl=pycurl):\n return self._on_event(fd, _pycurl.CSELECT_OUT)\n\n def _on_event(self, fd, event, _pycurl=pycurl):\n self._pop_from_hub()\n try:\n while 1:\n try:\n ret, _ = self._socket_action(fd, event)\n except pycurl.error as exc:\n ret = exc.args[0]\n if ret != _pycurl.E_CALL_MULTI_PERFORM:\n break\n finally:\n self._push_to_hub()\n self._process_pending_requests()\n\n def _process_pending_requests(self):\n while 1:\n q, succeeded, failed = self._multi.info_read()\n for curl in succeeded:\n self._process(curl)\n for curl, errno, reason in failed:\n self._process(curl, errno, reason)\n if q == 0:\n break\n self._process_queue()\n\n def _process_queue(self):\n while 1:\n started = 0\n while self._free_list and self._pending:\n started += 1\n curl = self._free_list.pop()\n request = self._pending.popleft()\n headers = self.Headers()\n buf = BytesIO()\n curl.info = {\n 'headers': headers,\n 'buffer': buf,\n 'request': request,\n 'curl_start_time': time(),\n }\n self._setup_request(curl, request, buf, headers)\n self._multi.add_handle(curl)\n if not started:\n break\n\n def _process(self, curl, errno=None, reason=None, _pycurl=pycurl):\n info, curl.info = curl.info, None\n self._multi.remove_handle(curl)\n self._free_list.append(curl)\n buffer = info['buffer']\n if errno:\n code = 599\n error = HttpError(code, reason)\n error.errno = errno\n effective_url = None\n buffer.close()\n buffer = None\n else:\n error = None\n code = curl.getinfo(_pycurl.HTTP_CODE)\n effective_url = curl.getinfo(_pycurl.EFFECTIVE_URL)\n buffer.seek(0)\n # try:\n request = info['request']\n request.on_ready(self.Response(\n request=request, code=code, headers=info['headers'],\n buffer=buffer, effective_url=effective_url, error=error,\n ))\n\n def _setup_request(self, curl, request, buffer, headers, _pycurl=pycurl):\n setopt = curl.setopt\n setopt(_pycurl.URL, bytes_to_str(request.url))\n\n # see tornado curl client\n request.headers.setdefault('Expect', '')\n request.headers.setdefault('Pragma', '')\n\n setopt(\n _pycurl.HTTPHEADER,\n ['{}: {}'.format(*h) for h in request.headers.items()],\n )\n\n setopt(\n _pycurl.HEADERFUNCTION,\n partial(request.on_header or self.on_header, request.headers),\n )\n setopt(\n _pycurl.WRITEFUNCTION, request.on_stream or buffer.write,\n )\n setopt(\n _pycurl.FOLLOWLOCATION, request.follow_redirects,\n )\n setopt(\n _pycurl.USERAGENT,\n bytes_to_str(request.user_agent or DEFAULT_USER_AGENT),\n )\n if request.network_interface:\n setopt(_pycurl.INTERFACE, request.network_interface)\n setopt(\n _pycurl.ENCODING, 'gzip,deflate' if request.use_gzip else 'none',\n )\n if request.proxy_host:\n if not request.proxy_port:\n raise ValueError('Request with proxy_host but no proxy_port')\n setopt(_pycurl.PROXY, request.proxy_host)\n setopt(_pycurl.PROXYPORT, request.proxy_port)\n if request.proxy_username:\n setopt(_pycurl.PROXYUSERPWD, '{}:{}'.format(\n request.proxy_username, request.proxy_password or ''))\n else:\n setopt(_pycurl.PROXY, '')\n curl.unsetopt(_pycurl.PROXYUSERPWD)\n\n setopt(_pycurl.SSL_VERIFYPEER, 1 if request.validate_cert else 0)\n setopt(_pycurl.SSL_VERIFYHOST, 2 if request.validate_cert else 0)\n if request.ca_certs is not None:\n setopt(_pycurl.CAINFO, request.ca_certs)\n\n setopt(_pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)\n\n for meth in METH_TO_CURL.values():\n setopt(meth, False)\n try:\n meth = METH_TO_CURL[request.method]\n except KeyError:\n curl.setopt(_pycurl.CUSTOMREQUEST, request.method)\n else:\n curl.unsetopt(_pycurl.CUSTOMREQUEST)\n setopt(meth, True)\n\n if request.method in ('POST', 'PUT'):\n body = request.body.encode('utf-8') if request.body else bytes()\n reqbuffer = BytesIO(body)\n setopt(_pycurl.READFUNCTION, reqbuffer.read)\n if request.method == 'POST':\n\n def ioctl(cmd):\n if cmd == _pycurl.IOCMD_RESTARTREAD:\n reqbuffer.seek(0)\n setopt(_pycurl.IOCTLFUNCTION, ioctl)\n setopt(_pycurl.POSTFIELDSIZE, len(body))\n else:\n setopt(_pycurl.INFILESIZE, len(body))\n elif request.method == 'GET':\n assert not request.body\n\n if request.auth_username is not None:\n auth_mode = {\n 'basic': _pycurl.HTTPAUTH_BASIC,\n 'digest': _pycurl.HTTPAUTH_DIGEST\n }[request.auth_mode or 'basic']\n setopt(_pycurl.HTTPAUTH, auth_mode)\n userpwd = '{}:{}'.format(\n request.auth_username, request.auth_password or '',\n )\n setopt(_pycurl.USERPWD, userpwd)\n else:\n curl.unsetopt(_pycurl.USERPWD)\n\n if request.client_cert is not None:\n setopt(_pycurl.SSLCERT, request.client_cert)\n if request.client_key is not None:\n setopt(_pycurl.SSLKEY, request.client_key)\n\n if request.on_prepare is not None:\n request.on_prepare(curl)\n", "path": "kombu/asynchronous/http/curl.py"}], "after_files": [{"content": "\"\"\"HTTP Client using pyCurl.\"\"\"\n\nfrom collections import deque\nfrom functools import partial\nfrom io import BytesIO\nfrom time import time\n\nfrom kombu.asynchronous.hub import READ, WRITE, get_event_loop\nfrom kombu.exceptions import HttpError\nfrom kombu.utils.encoding import bytes_to_str\n\nfrom .base import BaseClient\n\ntry:\n import pycurl\nexcept ImportError: # pragma: no cover\n pycurl = Curl = METH_TO_CURL = None\nelse:\n from pycurl import Curl\n\n METH_TO_CURL = {\n 'GET': pycurl.HTTPGET,\n 'POST': pycurl.POST,\n 'PUT': pycurl.UPLOAD,\n 'HEAD': pycurl.NOBODY,\n }\n\n__all__ = ('CurlClient',)\n\nDEFAULT_USER_AGENT = 'Mozilla/5.0 (compatible; pycurl)'\nEXTRA_METHODS = frozenset(['DELETE', 'OPTIONS', 'PATCH'])\n\n\nclass CurlClient(BaseClient):\n \"\"\"Curl HTTP Client.\"\"\"\n\n Curl = Curl\n\n def __init__(self, hub=None, max_clients=10):\n if pycurl is None:\n raise ImportError('The curl client requires the pycurl library.')\n hub = hub or get_event_loop()\n super().__init__(hub)\n self.max_clients = max_clients\n\n self._multi = pycurl.CurlMulti()\n self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)\n self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)\n self._curls = [self.Curl() for i in range(max_clients)]\n self._free_list = self._curls[:]\n self._pending = deque()\n self._fds = {}\n\n self._socket_action = self._multi.socket_action\n self._timeout_check_tref = self.hub.call_repeatedly(\n 1.0, self._timeout_check,\n )\n\n # pycurl 7.29.0 workaround\n dummy_curl_handle = pycurl.Curl()\n self._multi.add_handle(dummy_curl_handle)\n self._multi.remove_handle(dummy_curl_handle)\n\n def close(self):\n self._timeout_check_tref.cancel()\n for _curl in self._curls:\n _curl.close()\n self._multi.close()\n\n def add_request(self, request):\n self._pending.append(request)\n self._process_queue()\n self._set_timeout(0)\n return request\n\n # the next two methods are used for linux/epoll workaround:\n # we temporarily remove all curl fds from hub, so curl cannot\n # close a fd which is still inside epoll\n def _pop_from_hub(self):\n for fd in self._fds:\n self.hub.remove(fd)\n\n def _push_to_hub(self):\n for fd, events in self._fds.items():\n if events & READ:\n self.hub.add_reader(fd, self.on_readable, fd)\n if events & WRITE:\n self.hub.add_writer(fd, self.on_writable, fd)\n\n def _handle_socket(self, event, fd, multi, data, _pycurl=pycurl):\n if event == _pycurl.POLL_REMOVE:\n if fd in self._fds:\n self._fds.pop(fd, None)\n else:\n if event == _pycurl.POLL_IN:\n self._fds[fd] = READ\n elif event == _pycurl.POLL_OUT:\n self._fds[fd] = WRITE\n elif event == _pycurl.POLL_INOUT:\n self._fds[fd] = READ | WRITE\n\n def _set_timeout(self, msecs):\n self.hub.call_later(msecs, self._timeout_check)\n\n def _timeout_check(self, _pycurl=pycurl):\n self._pop_from_hub()\n try:\n while 1:\n try:\n ret, _ = self._multi.socket_all()\n except pycurl.error as exc:\n ret = exc.args[0]\n if ret != _pycurl.E_CALL_MULTI_PERFORM:\n break\n finally:\n self._push_to_hub()\n self._process_pending_requests()\n\n def on_readable(self, fd, _pycurl=pycurl):\n return self._on_event(fd, _pycurl.CSELECT_IN)\n\n def on_writable(self, fd, _pycurl=pycurl):\n return self._on_event(fd, _pycurl.CSELECT_OUT)\n\n def _on_event(self, fd, event, _pycurl=pycurl):\n self._pop_from_hub()\n try:\n while 1:\n try:\n ret, _ = self._socket_action(fd, event)\n except pycurl.error as exc:\n ret = exc.args[0]\n if ret != _pycurl.E_CALL_MULTI_PERFORM:\n break\n finally:\n self._push_to_hub()\n self._process_pending_requests()\n\n def _process_pending_requests(self):\n while 1:\n q, succeeded, failed = self._multi.info_read()\n for curl in succeeded:\n self._process(curl)\n for curl, errno, reason in failed:\n self._process(curl, errno, reason)\n if q == 0:\n break\n self._process_queue()\n\n def _process_queue(self):\n while 1:\n started = 0\n while self._free_list and self._pending:\n started += 1\n curl = self._free_list.pop()\n request = self._pending.popleft()\n headers = self.Headers()\n buf = BytesIO()\n curl.info = {\n 'headers': headers,\n 'buffer': buf,\n 'request': request,\n 'curl_start_time': time(),\n }\n self._setup_request(curl, request, buf, headers)\n self._multi.add_handle(curl)\n if not started:\n break\n\n def _process(self, curl, errno=None, reason=None, _pycurl=pycurl):\n info, curl.info = curl.info, None\n self._multi.remove_handle(curl)\n self._free_list.append(curl)\n buffer = info['buffer']\n if errno:\n code = 599\n error = HttpError(code, reason)\n error.errno = errno\n effective_url = None\n buffer.close()\n buffer = None\n else:\n error = None\n code = curl.getinfo(_pycurl.HTTP_CODE)\n effective_url = curl.getinfo(_pycurl.EFFECTIVE_URL)\n buffer.seek(0)\n # try:\n request = info['request']\n request.on_ready(self.Response(\n request=request, code=code, headers=info['headers'],\n buffer=buffer, effective_url=effective_url, error=error,\n ))\n\n def _setup_request(self, curl, request, buffer, headers, _pycurl=pycurl):\n setopt = curl.setopt\n setopt(_pycurl.URL, bytes_to_str(request.url))\n\n # see tornado curl client\n request.headers.setdefault('Expect', '')\n request.headers.setdefault('Pragma', '')\n\n setopt(\n _pycurl.HTTPHEADER,\n ['{}: {}'.format(*h) for h in request.headers.items()],\n )\n\n setopt(\n _pycurl.HEADERFUNCTION,\n partial(request.on_header or self.on_header, request.headers),\n )\n setopt(\n _pycurl.WRITEFUNCTION, request.on_stream or buffer.write,\n )\n setopt(\n _pycurl.FOLLOWLOCATION, request.follow_redirects,\n )\n setopt(\n _pycurl.USERAGENT,\n bytes_to_str(request.user_agent or DEFAULT_USER_AGENT),\n )\n if request.network_interface:\n setopt(_pycurl.INTERFACE, request.network_interface)\n setopt(\n _pycurl.ENCODING, 'gzip,deflate' if request.use_gzip else 'none',\n )\n if request.proxy_host:\n if not request.proxy_port:\n raise ValueError('Request with proxy_host but no proxy_port')\n setopt(_pycurl.PROXY, request.proxy_host)\n setopt(_pycurl.PROXYPORT, request.proxy_port)\n if request.proxy_username:\n setopt(_pycurl.PROXYUSERPWD, '{}:{}'.format(\n request.proxy_username, request.proxy_password or ''))\n\n setopt(_pycurl.SSL_VERIFYPEER, 1 if request.validate_cert else 0)\n setopt(_pycurl.SSL_VERIFYHOST, 2 if request.validate_cert else 0)\n if request.ca_certs is not None:\n setopt(_pycurl.CAINFO, request.ca_certs)\n\n setopt(_pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)\n\n for meth in METH_TO_CURL.values():\n setopt(meth, False)\n try:\n meth = METH_TO_CURL[request.method]\n except KeyError:\n curl.setopt(_pycurl.CUSTOMREQUEST, request.method)\n else:\n curl.unsetopt(_pycurl.CUSTOMREQUEST)\n setopt(meth, True)\n\n if request.method in ('POST', 'PUT'):\n body = request.body.encode('utf-8') if request.body else bytes()\n reqbuffer = BytesIO(body)\n setopt(_pycurl.READFUNCTION, reqbuffer.read)\n if request.method == 'POST':\n\n def ioctl(cmd):\n if cmd == _pycurl.IOCMD_RESTARTREAD:\n reqbuffer.seek(0)\n setopt(_pycurl.IOCTLFUNCTION, ioctl)\n setopt(_pycurl.POSTFIELDSIZE, len(body))\n else:\n setopt(_pycurl.INFILESIZE, len(body))\n elif request.method == 'GET':\n assert not request.body\n\n if request.auth_username is not None:\n auth_mode = {\n 'basic': _pycurl.HTTPAUTH_BASIC,\n 'digest': _pycurl.HTTPAUTH_DIGEST\n }[request.auth_mode or 'basic']\n setopt(_pycurl.HTTPAUTH, auth_mode)\n userpwd = '{}:{}'.format(\n request.auth_username, request.auth_password or '',\n )\n setopt(_pycurl.USERPWD, userpwd)\n else:\n curl.unsetopt(_pycurl.USERPWD)\n\n if request.client_cert is not None:\n setopt(_pycurl.SSLCERT, request.client_cert)\n if request.client_key is not None:\n setopt(_pycurl.SSLKEY, request.client_key)\n\n if request.on_prepare is not None:\n request.on_prepare(curl)\n", "path": "kombu/asynchronous/http/curl.py"}]} | 3,588 | 169 |
gh_patches_debug_42897 | rasdani/github-patches | git_diff | webkom__lego-1092 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Meetings contains the report property at the list endpoint
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lego/apps/meetings/serializers.py`
Content:
```
1 from rest_framework import serializers
2
3 from lego.apps.content.fields import ContentSerializerField
4 from lego.apps.meetings import constants
5 from lego.apps.meetings.models import Meeting, MeetingInvitation
6 from lego.apps.users.fields import PublicUserField
7 from lego.apps.users.models import AbakusGroup, User
8 from lego.apps.users.serializers.users import PublicUserSerializer
9 from lego.utils.fields import PrimaryKeyRelatedFieldNoPKOpt
10 from lego.utils.serializers import BasisModelSerializer
11
12
13 class MeetingInvitationSerializer(BasisModelSerializer):
14 user = PublicUserSerializer()
15 status = serializers.ChoiceField(choices=(constants.ATTENDING, constants.NOT_ATTENDING))
16
17 class Meta:
18 model = MeetingInvitation
19 fields = ('user', 'status', 'meeting')
20
21 def create(self, validated_data):
22 meeting = Meeting.objects.get(id=self.context['view'].kwargs['meeting_pk'])
23 meeting_invitation = MeetingInvitation.objects.create(meeting=meeting, **validated_data)
24 return meeting_invitation
25
26
27 class MeetingInvitationUpdateSerializer(BasisModelSerializer):
28 class Meta:
29 model = MeetingInvitation
30 fields = ('status', )
31
32
33 class MeetingGroupInvite(serializers.Serializer):
34 group = PrimaryKeyRelatedFieldNoPKOpt(queryset=AbakusGroup.objects.all())
35
36
37 class MeetingUserInvite(serializers.Serializer):
38 user = PrimaryKeyRelatedFieldNoPKOpt(queryset=User.objects.all())
39
40
41 class MeetingBulkInvite(serializers.Serializer):
42 users = PrimaryKeyRelatedFieldNoPKOpt(queryset=User.objects.all(), many=True, required=False)
43 groups = PrimaryKeyRelatedFieldNoPKOpt(
44 queryset=AbakusGroup.objects.all(), many=True, required=False
45 )
46
47
48 class MeetingSerializer(BasisModelSerializer):
49 invitations = MeetingInvitationSerializer(many=True, read_only=True)
50 report = ContentSerializerField()
51 report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)
52 created_by = PublicUserField(read_only=True)
53
54 class Meta:
55 model = Meeting
56 fields = (
57 'id', 'created_by', 'title', 'location', 'start_time', 'end_time', 'report',
58 'report_author', 'invitations'
59 )
60
61 def create(self, validated_data):
62 meeting = Meeting.objects.create(**validated_data)
63 owner = validated_data['current_user']
64 meeting.invite_user(owner, owner)
65 return meeting
66
```
Path: `lego/apps/notifications/serializers.py`
Content:
```
1 from rest_framework import serializers
2
3 from lego.apps.events.serializers.events import EventReadSerializer
4 from lego.apps.meetings.serializers import MeetingSerializer
5 from lego.apps.users.serializers.abakus_groups import PublicAbakusGroupSerializer
6 from lego.apps.users.serializers.users import PublicUserSerializer
7 from lego.utils.serializers import BasisModelSerializer
8
9 from .models import Announcement, NotificationSetting
10
11
12 class NotificationSettingSerializer(serializers.ModelSerializer):
13 class Meta:
14 model = NotificationSetting
15 fields = ('notification_type', 'enabled', 'channels')
16 read_only_fields = ('notification_type', )
17
18
19 class NotificationSettingCreateSerializer(serializers.ModelSerializer):
20 def create(self, validated_data):
21 validated_data['user'] = self.context['request'].user
22 return super().create(validated_data)
23
24 class Meta:
25 model = NotificationSetting
26 fields = ('notification_type', 'enabled', 'channels')
27
28
29 class AnnouncementListSerializer(BasisModelSerializer):
30
31 users = PublicUserSerializer(many=True, read_only=True)
32 groups = PublicAbakusGroupSerializer(many=True, read_only=True)
33 events = EventReadSerializer(many=True, read_only=True)
34 meetings = MeetingSerializer(many=True, read_only=True)
35
36 class Meta:
37 model = Announcement
38 fields = (
39 'id',
40 'message',
41 'sent',
42 'users',
43 'groups',
44 'events',
45 'meetings',
46 )
47 read_only_fields = ('sent', )
48
49
50 class AnnouncementDetailSerializer(BasisModelSerializer):
51 class Meta(AnnouncementListSerializer.Meta):
52 model = Announcement
53 fields = (
54 'id',
55 'message',
56 'sent',
57 'users',
58 'groups',
59 'events',
60 'meetings',
61 )
62 read_only_fields = ('sent', )
63
```
Path: `lego/apps/meetings/views.py`
Content:
```
1 from rest_framework import decorators, permissions, status, viewsets
2 from rest_framework.exceptions import ValidationError
3 from rest_framework.response import Response
4
5 from lego.apps.meetings.authentication import MeetingInvitationTokenAuthentication
6 from lego.apps.meetings.filters import MeetingFilterSet
7 from lego.apps.meetings.models import Meeting, MeetingInvitation
8 from lego.apps.meetings.serializers import (
9 MeetingBulkInvite, MeetingGroupInvite, MeetingInvitationSerializer,
10 MeetingInvitationUpdateSerializer, MeetingSerializer, MeetingUserInvite
11 )
12 from lego.apps.permissions.api.views import AllowedPermissionsMixin
13 from lego.apps.permissions.utils import get_permission_handler
14
15
16 class MeetingViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):
17
18 filter_class = MeetingFilterSet
19 serializer_class = MeetingSerializer
20
21 def get_queryset(self):
22 permission_handler = get_permission_handler(Meeting)
23 return permission_handler.filter_queryset(
24 self.request.user, Meeting.objects.prefetch_related('invitations', 'invitations__user')
25 )
26
27 def get_ordering(self):
28 ordering = self.request.query_params.get('ordering', None)
29 if ordering in ['start_time', '-start_time']:
30 return ordering
31 return 'start_time'
32
33 @decorators.detail_route(methods=['POST'], serializer_class=MeetingUserInvite)
34 def invite_user(self, request, *args, **kwargs):
35 meeting = self.get_object()
36 serializer = self.get_serializer(data=request.data)
37 serializer.is_valid(raise_exception=True)
38 user = serializer.validated_data['user']
39 meeting.invite_user(user, request.user)
40 return Response(data=serializer.data, status=status.HTTP_200_OK)
41
42 @decorators.detail_route(methods=['POST'], serializer_class=MeetingBulkInvite)
43 def bulk_invite(self, request, *args, **kwargs):
44 meeting = self.get_object()
45 serializer = self.get_serializer(data=request.data)
46 serializer.is_valid(raise_exception=True)
47 users = serializer.validated_data['users']
48 groups = serializer.validated_data['groups']
49 if not len(users) and not len(groups):
50 raise ValidationError({'error': 'No users or groups given'})
51
52 for user in users:
53 meeting.invite_user(user, request.user)
54 for group in groups:
55 meeting.invite_group(group, request.user)
56 return Response(data=serializer.data, status=status.HTTP_200_OK)
57
58 @decorators.detail_route(methods=['POST'], serializer_class=MeetingGroupInvite)
59 def invite_group(self, request, *args, **kwargs):
60 meeting = self.get_object()
61 serializer = self.get_serializer(data=request.data)
62 serializer.is_valid(raise_exception=True)
63 group = serializer.validated_data['group']
64 meeting.invite_group(group, request.user)
65 return Response(data=serializer.data, status=status.HTTP_200_OK)
66
67
68 class MeetingInvitationViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):
69
70 queryset = MeetingInvitation.objects.select_related('user')
71 lookup_field = 'user__id'
72
73 def get_serializer_class(self):
74 if self.action in ('update', 'partial_update'):
75 return MeetingInvitationUpdateSerializer
76 return MeetingInvitationSerializer
77
78 def get_queryset(self):
79 return MeetingInvitation.objects.filter(meeting=self.kwargs['meeting_pk'])
80
81
82 class MeetingInvitationTokenViewSet(viewsets.ViewSet):
83 """
84 Accept or reject invitation
85
86 Reject or accept invitation to meeting. It is genereated when
87 user is invited to a meeting, and sendt in the invitation email.
88
89 To accept: [accept/?token=yourtoken](accept/)
90
91 To reject: [reject/?token=yourtoken](reject/)
92 """
93 authentication_classes = (MeetingInvitationTokenAuthentication, )
94 permission_classes = (permissions.IsAuthenticated, )
95
96 @decorators.list_route(methods=['POST'])
97 def accept(self, request):
98 invitation = request.token_invitation
99 invitation.accept()
100 return Response(data=MeetingInvitationSerializer(invitation).data)
101
102 def list(self, request):
103 invitation = request.token_invitation
104 return Response(data=MeetingInvitationSerializer(invitation).data)
105
106 @decorators.list_route(methods=['POST'])
107 def reject(self, request):
108 invitation = request.token_invitation
109 invitation.reject()
110 return Response(data=MeetingInvitationSerializer(invitation).data)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lego/apps/meetings/serializers.py b/lego/apps/meetings/serializers.py
--- a/lego/apps/meetings/serializers.py
+++ b/lego/apps/meetings/serializers.py
@@ -45,7 +45,7 @@
)
-class MeetingSerializer(BasisModelSerializer):
+class MeetingDetailSerializer(BasisModelSerializer):
invitations = MeetingInvitationSerializer(many=True, read_only=True)
report = ContentSerializerField()
report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)
@@ -63,3 +63,14 @@
owner = validated_data['current_user']
meeting.invite_user(owner, owner)
return meeting
+
+
+class MeetingListSerializer(BasisModelSerializer):
+ report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)
+ created_by = PublicUserField(read_only=True)
+
+ class Meta:
+ model = Meeting
+ fields = (
+ 'id', 'created_by', 'title', 'location', 'start_time', 'end_time', 'report_author'
+ )
diff --git a/lego/apps/meetings/views.py b/lego/apps/meetings/views.py
--- a/lego/apps/meetings/views.py
+++ b/lego/apps/meetings/views.py
@@ -6,8 +6,8 @@
from lego.apps.meetings.filters import MeetingFilterSet
from lego.apps.meetings.models import Meeting, MeetingInvitation
from lego.apps.meetings.serializers import (
- MeetingBulkInvite, MeetingGroupInvite, MeetingInvitationSerializer,
- MeetingInvitationUpdateSerializer, MeetingSerializer, MeetingUserInvite
+ MeetingBulkInvite, MeetingDetailSerializer, MeetingGroupInvite, MeetingInvitationSerializer,
+ MeetingInvitationUpdateSerializer, MeetingListSerializer, MeetingUserInvite
)
from lego.apps.permissions.api.views import AllowedPermissionsMixin
from lego.apps.permissions.utils import get_permission_handler
@@ -16,7 +16,7 @@
class MeetingViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):
filter_class = MeetingFilterSet
- serializer_class = MeetingSerializer
+ serializer_class = MeetingDetailSerializer
def get_queryset(self):
permission_handler = get_permission_handler(Meeting)
@@ -30,6 +30,11 @@
return ordering
return 'start_time'
+ def get_serializer_class(self):
+ if self.action == 'list':
+ return MeetingListSerializer
+ return super().get_serializer_class()
+
@decorators.detail_route(methods=['POST'], serializer_class=MeetingUserInvite)
def invite_user(self, request, *args, **kwargs):
meeting = self.get_object()
diff --git a/lego/apps/notifications/serializers.py b/lego/apps/notifications/serializers.py
--- a/lego/apps/notifications/serializers.py
+++ b/lego/apps/notifications/serializers.py
@@ -1,7 +1,7 @@
from rest_framework import serializers
from lego.apps.events.serializers.events import EventReadSerializer
-from lego.apps.meetings.serializers import MeetingSerializer
+from lego.apps.meetings.serializers import MeetingDetailSerializer
from lego.apps.users.serializers.abakus_groups import PublicAbakusGroupSerializer
from lego.apps.users.serializers.users import PublicUserSerializer
from lego.utils.serializers import BasisModelSerializer
@@ -31,7 +31,7 @@
users = PublicUserSerializer(many=True, read_only=True)
groups = PublicAbakusGroupSerializer(many=True, read_only=True)
events = EventReadSerializer(many=True, read_only=True)
- meetings = MeetingSerializer(many=True, read_only=True)
+ meetings = MeetingDetailSerializer(many=True, read_only=True)
class Meta:
model = Announcement
| {"golden_diff": "diff --git a/lego/apps/meetings/serializers.py b/lego/apps/meetings/serializers.py\n--- a/lego/apps/meetings/serializers.py\n+++ b/lego/apps/meetings/serializers.py\n@@ -45,7 +45,7 @@\n )\n \n \n-class MeetingSerializer(BasisModelSerializer):\n+class MeetingDetailSerializer(BasisModelSerializer):\n invitations = MeetingInvitationSerializer(many=True, read_only=True)\n report = ContentSerializerField()\n report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)\n@@ -63,3 +63,14 @@\n owner = validated_data['current_user']\n meeting.invite_user(owner, owner)\n return meeting\n+\n+\n+class MeetingListSerializer(BasisModelSerializer):\n+ report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)\n+ created_by = PublicUserField(read_only=True)\n+\n+ class Meta:\n+ model = Meeting\n+ fields = (\n+ 'id', 'created_by', 'title', 'location', 'start_time', 'end_time', 'report_author'\n+ )\ndiff --git a/lego/apps/meetings/views.py b/lego/apps/meetings/views.py\n--- a/lego/apps/meetings/views.py\n+++ b/lego/apps/meetings/views.py\n@@ -6,8 +6,8 @@\n from lego.apps.meetings.filters import MeetingFilterSet\n from lego.apps.meetings.models import Meeting, MeetingInvitation\n from lego.apps.meetings.serializers import (\n- MeetingBulkInvite, MeetingGroupInvite, MeetingInvitationSerializer,\n- MeetingInvitationUpdateSerializer, MeetingSerializer, MeetingUserInvite\n+ MeetingBulkInvite, MeetingDetailSerializer, MeetingGroupInvite, MeetingInvitationSerializer,\n+ MeetingInvitationUpdateSerializer, MeetingListSerializer, MeetingUserInvite\n )\n from lego.apps.permissions.api.views import AllowedPermissionsMixin\n from lego.apps.permissions.utils import get_permission_handler\n@@ -16,7 +16,7 @@\n class MeetingViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):\n \n filter_class = MeetingFilterSet\n- serializer_class = MeetingSerializer\n+ serializer_class = MeetingDetailSerializer\n \n def get_queryset(self):\n permission_handler = get_permission_handler(Meeting)\n@@ -30,6 +30,11 @@\n return ordering\n return 'start_time'\n \n+ def get_serializer_class(self):\n+ if self.action == 'list':\n+ return MeetingListSerializer\n+ return super().get_serializer_class()\n+\n @decorators.detail_route(methods=['POST'], serializer_class=MeetingUserInvite)\n def invite_user(self, request, *args, **kwargs):\n meeting = self.get_object()\ndiff --git a/lego/apps/notifications/serializers.py b/lego/apps/notifications/serializers.py\n--- a/lego/apps/notifications/serializers.py\n+++ b/lego/apps/notifications/serializers.py\n@@ -1,7 +1,7 @@\n from rest_framework import serializers\n \n from lego.apps.events.serializers.events import EventReadSerializer\n-from lego.apps.meetings.serializers import MeetingSerializer\n+from lego.apps.meetings.serializers import MeetingDetailSerializer\n from lego.apps.users.serializers.abakus_groups import PublicAbakusGroupSerializer\n from lego.apps.users.serializers.users import PublicUserSerializer\n from lego.utils.serializers import BasisModelSerializer\n@@ -31,7 +31,7 @@\n users = PublicUserSerializer(many=True, read_only=True)\n groups = PublicAbakusGroupSerializer(many=True, read_only=True)\n events = EventReadSerializer(many=True, read_only=True)\n- meetings = MeetingSerializer(many=True, read_only=True)\n+ meetings = MeetingDetailSerializer(many=True, read_only=True)\n \n class Meta:\n model = Announcement\n", "issue": "Meetings contains the report property at the list endpoint\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom lego.apps.content.fields import ContentSerializerField\nfrom lego.apps.meetings import constants\nfrom lego.apps.meetings.models import Meeting, MeetingInvitation\nfrom lego.apps.users.fields import PublicUserField\nfrom lego.apps.users.models import AbakusGroup, User\nfrom lego.apps.users.serializers.users import PublicUserSerializer\nfrom lego.utils.fields import PrimaryKeyRelatedFieldNoPKOpt\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass MeetingInvitationSerializer(BasisModelSerializer):\n user = PublicUserSerializer()\n status = serializers.ChoiceField(choices=(constants.ATTENDING, constants.NOT_ATTENDING))\n\n class Meta:\n model = MeetingInvitation\n fields = ('user', 'status', 'meeting')\n\n def create(self, validated_data):\n meeting = Meeting.objects.get(id=self.context['view'].kwargs['meeting_pk'])\n meeting_invitation = MeetingInvitation.objects.create(meeting=meeting, **validated_data)\n return meeting_invitation\n\n\nclass MeetingInvitationUpdateSerializer(BasisModelSerializer):\n class Meta:\n model = MeetingInvitation\n fields = ('status', )\n\n\nclass MeetingGroupInvite(serializers.Serializer):\n group = PrimaryKeyRelatedFieldNoPKOpt(queryset=AbakusGroup.objects.all())\n\n\nclass MeetingUserInvite(serializers.Serializer):\n user = PrimaryKeyRelatedFieldNoPKOpt(queryset=User.objects.all())\n\n\nclass MeetingBulkInvite(serializers.Serializer):\n users = PrimaryKeyRelatedFieldNoPKOpt(queryset=User.objects.all(), many=True, required=False)\n groups = PrimaryKeyRelatedFieldNoPKOpt(\n queryset=AbakusGroup.objects.all(), many=True, required=False\n )\n\n\nclass MeetingSerializer(BasisModelSerializer):\n invitations = MeetingInvitationSerializer(many=True, read_only=True)\n report = ContentSerializerField()\n report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)\n created_by = PublicUserField(read_only=True)\n\n class Meta:\n model = Meeting\n fields = (\n 'id', 'created_by', 'title', 'location', 'start_time', 'end_time', 'report',\n 'report_author', 'invitations'\n )\n\n def create(self, validated_data):\n meeting = Meeting.objects.create(**validated_data)\n owner = validated_data['current_user']\n meeting.invite_user(owner, owner)\n return meeting\n", "path": "lego/apps/meetings/serializers.py"}, {"content": "from rest_framework import serializers\n\nfrom lego.apps.events.serializers.events import EventReadSerializer\nfrom lego.apps.meetings.serializers import MeetingSerializer\nfrom lego.apps.users.serializers.abakus_groups import PublicAbakusGroupSerializer\nfrom lego.apps.users.serializers.users import PublicUserSerializer\nfrom lego.utils.serializers import BasisModelSerializer\n\nfrom .models import Announcement, NotificationSetting\n\n\nclass NotificationSettingSerializer(serializers.ModelSerializer):\n class Meta:\n model = NotificationSetting\n fields = ('notification_type', 'enabled', 'channels')\n read_only_fields = ('notification_type', )\n\n\nclass NotificationSettingCreateSerializer(serializers.ModelSerializer):\n def create(self, validated_data):\n validated_data['user'] = self.context['request'].user\n return super().create(validated_data)\n\n class Meta:\n model = NotificationSetting\n fields = ('notification_type', 'enabled', 'channels')\n\n\nclass AnnouncementListSerializer(BasisModelSerializer):\n\n users = PublicUserSerializer(many=True, read_only=True)\n groups = PublicAbakusGroupSerializer(many=True, read_only=True)\n events = EventReadSerializer(many=True, read_only=True)\n meetings = MeetingSerializer(many=True, read_only=True)\n\n class Meta:\n model = Announcement\n fields = (\n 'id',\n 'message',\n 'sent',\n 'users',\n 'groups',\n 'events',\n 'meetings',\n )\n read_only_fields = ('sent', )\n\n\nclass AnnouncementDetailSerializer(BasisModelSerializer):\n class Meta(AnnouncementListSerializer.Meta):\n model = Announcement\n fields = (\n 'id',\n 'message',\n 'sent',\n 'users',\n 'groups',\n 'events',\n 'meetings',\n )\n read_only_fields = ('sent', )\n", "path": "lego/apps/notifications/serializers.py"}, {"content": "from rest_framework import decorators, permissions, status, viewsets\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom lego.apps.meetings.authentication import MeetingInvitationTokenAuthentication\nfrom lego.apps.meetings.filters import MeetingFilterSet\nfrom lego.apps.meetings.models import Meeting, MeetingInvitation\nfrom lego.apps.meetings.serializers import (\n MeetingBulkInvite, MeetingGroupInvite, MeetingInvitationSerializer,\n MeetingInvitationUpdateSerializer, MeetingSerializer, MeetingUserInvite\n)\nfrom lego.apps.permissions.api.views import AllowedPermissionsMixin\nfrom lego.apps.permissions.utils import get_permission_handler\n\n\nclass MeetingViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):\n\n filter_class = MeetingFilterSet\n serializer_class = MeetingSerializer\n\n def get_queryset(self):\n permission_handler = get_permission_handler(Meeting)\n return permission_handler.filter_queryset(\n self.request.user, Meeting.objects.prefetch_related('invitations', 'invitations__user')\n )\n\n def get_ordering(self):\n ordering = self.request.query_params.get('ordering', None)\n if ordering in ['start_time', '-start_time']:\n return ordering\n return 'start_time'\n\n @decorators.detail_route(methods=['POST'], serializer_class=MeetingUserInvite)\n def invite_user(self, request, *args, **kwargs):\n meeting = self.get_object()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n meeting.invite_user(user, request.user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n @decorators.detail_route(methods=['POST'], serializer_class=MeetingBulkInvite)\n def bulk_invite(self, request, *args, **kwargs):\n meeting = self.get_object()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n users = serializer.validated_data['users']\n groups = serializer.validated_data['groups']\n if not len(users) and not len(groups):\n raise ValidationError({'error': 'No users or groups given'})\n\n for user in users:\n meeting.invite_user(user, request.user)\n for group in groups:\n meeting.invite_group(group, request.user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n @decorators.detail_route(methods=['POST'], serializer_class=MeetingGroupInvite)\n def invite_group(self, request, *args, **kwargs):\n meeting = self.get_object()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n group = serializer.validated_data['group']\n meeting.invite_group(group, request.user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass MeetingInvitationViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):\n\n queryset = MeetingInvitation.objects.select_related('user')\n lookup_field = 'user__id'\n\n def get_serializer_class(self):\n if self.action in ('update', 'partial_update'):\n return MeetingInvitationUpdateSerializer\n return MeetingInvitationSerializer\n\n def get_queryset(self):\n return MeetingInvitation.objects.filter(meeting=self.kwargs['meeting_pk'])\n\n\nclass MeetingInvitationTokenViewSet(viewsets.ViewSet):\n \"\"\"\n Accept or reject invitation\n\n Reject or accept invitation to meeting. It is genereated when\n user is invited to a meeting, and sendt in the invitation email.\n\n To accept: [accept/?token=yourtoken](accept/)\n\n To reject: [reject/?token=yourtoken](reject/)\n \"\"\"\n authentication_classes = (MeetingInvitationTokenAuthentication, )\n permission_classes = (permissions.IsAuthenticated, )\n\n @decorators.list_route(methods=['POST'])\n def accept(self, request):\n invitation = request.token_invitation\n invitation.accept()\n return Response(data=MeetingInvitationSerializer(invitation).data)\n\n def list(self, request):\n invitation = request.token_invitation\n return Response(data=MeetingInvitationSerializer(invitation).data)\n\n @decorators.list_route(methods=['POST'])\n def reject(self, request):\n invitation = request.token_invitation\n invitation.reject()\n return Response(data=MeetingInvitationSerializer(invitation).data)\n", "path": "lego/apps/meetings/views.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom lego.apps.content.fields import ContentSerializerField\nfrom lego.apps.meetings import constants\nfrom lego.apps.meetings.models import Meeting, MeetingInvitation\nfrom lego.apps.users.fields import PublicUserField\nfrom lego.apps.users.models import AbakusGroup, User\nfrom lego.apps.users.serializers.users import PublicUserSerializer\nfrom lego.utils.fields import PrimaryKeyRelatedFieldNoPKOpt\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass MeetingInvitationSerializer(BasisModelSerializer):\n user = PublicUserSerializer()\n status = serializers.ChoiceField(choices=(constants.ATTENDING, constants.NOT_ATTENDING))\n\n class Meta:\n model = MeetingInvitation\n fields = ('user', 'status', 'meeting')\n\n def create(self, validated_data):\n meeting = Meeting.objects.get(id=self.context['view'].kwargs['meeting_pk'])\n meeting_invitation = MeetingInvitation.objects.create(meeting=meeting, **validated_data)\n return meeting_invitation\n\n\nclass MeetingInvitationUpdateSerializer(BasisModelSerializer):\n class Meta:\n model = MeetingInvitation\n fields = ('status', )\n\n\nclass MeetingGroupInvite(serializers.Serializer):\n group = PrimaryKeyRelatedFieldNoPKOpt(queryset=AbakusGroup.objects.all())\n\n\nclass MeetingUserInvite(serializers.Serializer):\n user = PrimaryKeyRelatedFieldNoPKOpt(queryset=User.objects.all())\n\n\nclass MeetingBulkInvite(serializers.Serializer):\n users = PrimaryKeyRelatedFieldNoPKOpt(queryset=User.objects.all(), many=True, required=False)\n groups = PrimaryKeyRelatedFieldNoPKOpt(\n queryset=AbakusGroup.objects.all(), many=True, required=False\n )\n\n\nclass MeetingDetailSerializer(BasisModelSerializer):\n invitations = MeetingInvitationSerializer(many=True, read_only=True)\n report = ContentSerializerField()\n report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)\n created_by = PublicUserField(read_only=True)\n\n class Meta:\n model = Meeting\n fields = (\n 'id', 'created_by', 'title', 'location', 'start_time', 'end_time', 'report',\n 'report_author', 'invitations'\n )\n\n def create(self, validated_data):\n meeting = Meeting.objects.create(**validated_data)\n owner = validated_data['current_user']\n meeting.invite_user(owner, owner)\n return meeting\n\n\nclass MeetingListSerializer(BasisModelSerializer):\n report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)\n created_by = PublicUserField(read_only=True)\n\n class Meta:\n model = Meeting\n fields = (\n 'id', 'created_by', 'title', 'location', 'start_time', 'end_time', 'report_author'\n )\n", "path": "lego/apps/meetings/serializers.py"}, {"content": "from rest_framework import serializers\n\nfrom lego.apps.events.serializers.events import EventReadSerializer\nfrom lego.apps.meetings.serializers import MeetingDetailSerializer\nfrom lego.apps.users.serializers.abakus_groups import PublicAbakusGroupSerializer\nfrom lego.apps.users.serializers.users import PublicUserSerializer\nfrom lego.utils.serializers import BasisModelSerializer\n\nfrom .models import Announcement, NotificationSetting\n\n\nclass NotificationSettingSerializer(serializers.ModelSerializer):\n class Meta:\n model = NotificationSetting\n fields = ('notification_type', 'enabled', 'channels')\n read_only_fields = ('notification_type', )\n\n\nclass NotificationSettingCreateSerializer(serializers.ModelSerializer):\n def create(self, validated_data):\n validated_data['user'] = self.context['request'].user\n return super().create(validated_data)\n\n class Meta:\n model = NotificationSetting\n fields = ('notification_type', 'enabled', 'channels')\n\n\nclass AnnouncementListSerializer(BasisModelSerializer):\n\n users = PublicUserSerializer(many=True, read_only=True)\n groups = PublicAbakusGroupSerializer(many=True, read_only=True)\n events = EventReadSerializer(many=True, read_only=True)\n meetings = MeetingDetailSerializer(many=True, read_only=True)\n\n class Meta:\n model = Announcement\n fields = (\n 'id',\n 'message',\n 'sent',\n 'users',\n 'groups',\n 'events',\n 'meetings',\n )\n read_only_fields = ('sent', )\n\n\nclass AnnouncementDetailSerializer(BasisModelSerializer):\n class Meta(AnnouncementListSerializer.Meta):\n model = Announcement\n fields = (\n 'id',\n 'message',\n 'sent',\n 'users',\n 'groups',\n 'events',\n 'meetings',\n )\n read_only_fields = ('sent', )\n", "path": "lego/apps/notifications/serializers.py"}, {"content": "from rest_framework import decorators, permissions, status, viewsets\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom lego.apps.meetings.authentication import MeetingInvitationTokenAuthentication\nfrom lego.apps.meetings.filters import MeetingFilterSet\nfrom lego.apps.meetings.models import Meeting, MeetingInvitation\nfrom lego.apps.meetings.serializers import (\n MeetingBulkInvite, MeetingDetailSerializer, MeetingGroupInvite, MeetingInvitationSerializer,\n MeetingInvitationUpdateSerializer, MeetingListSerializer, MeetingUserInvite\n)\nfrom lego.apps.permissions.api.views import AllowedPermissionsMixin\nfrom lego.apps.permissions.utils import get_permission_handler\n\n\nclass MeetingViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):\n\n filter_class = MeetingFilterSet\n serializer_class = MeetingDetailSerializer\n\n def get_queryset(self):\n permission_handler = get_permission_handler(Meeting)\n return permission_handler.filter_queryset(\n self.request.user, Meeting.objects.prefetch_related('invitations', 'invitations__user')\n )\n\n def get_ordering(self):\n ordering = self.request.query_params.get('ordering', None)\n if ordering in ['start_time', '-start_time']:\n return ordering\n return 'start_time'\n\n def get_serializer_class(self):\n if self.action == 'list':\n return MeetingListSerializer\n return super().get_serializer_class()\n\n @decorators.detail_route(methods=['POST'], serializer_class=MeetingUserInvite)\n def invite_user(self, request, *args, **kwargs):\n meeting = self.get_object()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n meeting.invite_user(user, request.user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n @decorators.detail_route(methods=['POST'], serializer_class=MeetingBulkInvite)\n def bulk_invite(self, request, *args, **kwargs):\n meeting = self.get_object()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n users = serializer.validated_data['users']\n groups = serializer.validated_data['groups']\n if not len(users) and not len(groups):\n raise ValidationError({'error': 'No users or groups given'})\n\n for user in users:\n meeting.invite_user(user, request.user)\n for group in groups:\n meeting.invite_group(group, request.user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n @decorators.detail_route(methods=['POST'], serializer_class=MeetingGroupInvite)\n def invite_group(self, request, *args, **kwargs):\n meeting = self.get_object()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n group = serializer.validated_data['group']\n meeting.invite_group(group, request.user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass MeetingInvitationViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):\n\n queryset = MeetingInvitation.objects.select_related('user')\n lookup_field = 'user__id'\n\n def get_serializer_class(self):\n if self.action in ('update', 'partial_update'):\n return MeetingInvitationUpdateSerializer\n return MeetingInvitationSerializer\n\n def get_queryset(self):\n return MeetingInvitation.objects.filter(meeting=self.kwargs['meeting_pk'])\n\n\nclass MeetingInvitationTokenViewSet(viewsets.ViewSet):\n \"\"\"\n Accept or reject invitation\n\n Reject or accept invitation to meeting. It is genereated when\n user is invited to a meeting, and sendt in the invitation email.\n\n To accept: [accept/?token=yourtoken](accept/)\n\n To reject: [reject/?token=yourtoken](reject/)\n \"\"\"\n authentication_classes = (MeetingInvitationTokenAuthentication, )\n permission_classes = (permissions.IsAuthenticated, )\n\n @decorators.list_route(methods=['POST'])\n def accept(self, request):\n invitation = request.token_invitation\n invitation.accept()\n return Response(data=MeetingInvitationSerializer(invitation).data)\n\n def list(self, request):\n invitation = request.token_invitation\n return Response(data=MeetingInvitationSerializer(invitation).data)\n\n @decorators.list_route(methods=['POST'])\n def reject(self, request):\n invitation = request.token_invitation\n invitation.reject()\n return Response(data=MeetingInvitationSerializer(invitation).data)\n", "path": "lego/apps/meetings/views.py"}]} | 2,601 | 855 |
gh_patches_debug_11217 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1879 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: WasteNet Southland not working after 1.46.0
### I Have A Problem With:
A specific source
### What's Your Problem
The WasteNet Southland website and url has changed about a month ago. The issue created by this change was supposed to be fixed in 1.46.0, but unfortunately it is still not working.
Tested with my address and even with the example data, returning all sensors as unknown.
### Source (if relevant)
wastenet_org_nz
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
_No response_
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py`
Content:
```
1 import re
2 from datetime import datetime
3 from html.parser import HTMLParser
4
5 import requests
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7
8 TITLE = "Gore, Invercargill & Southland"
9 DESCRIPTION = "Source for Wastenet.org.nz."
10 URL = "http://www.wastenet.org.nz"
11 TEST_CASES = {
12 "166 Lewis Street": {"address": "166 Lewis Street"},
13 "Old Format: 199 Crawford Street": {"address": "199 Crawford Street INVERCARGILL"},
14 "Old Format: 156 Tay Street": {"address": "156 Tay Street INVERCARGILL"},
15 "entry_id glass only": {"entry_id": "23571"},
16 # "31 Conyers Street": {"address": "31 Conyers Street INVERCARGILL"}, # Thursday
17 # "67 Chesney Street": {"address": "67 Chesney Street INVERCARGILL"}, # Friday
18 }
19
20 ICON_MAP = {
21 "Glass": "mdi:glass-mug-variant",
22 "Rubbish": "mdi:delete-empty",
23 "Recycle": "mdi:recycle",
24 }
25
26
27 class WasteSearchResultsParser(HTMLParser):
28 def __init__(self):
29 super().__init__()
30 self._entries = []
31 self._wasteType = None
32 self._withinCollectionDay = False
33 self._withinType = False
34
35 @property
36 def entries(self):
37 return self._entries
38
39 def handle_starttag(self, tag, attrs):
40 if tag == "span":
41 d = dict(attrs)
42 if d.get("class", "").startswith("badge"):
43 self._withinType = True
44
45 def handle_data(self, data):
46 if self._withinType:
47 self._withinType = False
48 self._wasteType = data
49 elif data.startswith("Next Service Date:"):
50 self._withinCollectionDay = True
51 elif self._withinCollectionDay:
52 date = datetime.strptime(data, "%y/%m/%d").date()
53 if self._wasteType is not None:
54 self._entries.append(Collection(date, self._wasteType))
55 self._withinCollectionDay = False
56
57
58 HEADER = {"User-Agent": "Mozilla/5.0"}
59
60 SITE_URL = "https://www.wastenet.org.nz/bin-day/"
61 ADDRESS_URL = "https://www.wastenet.org.nz/wp-admin/admin-ajax.php"
62
63
64 class Source:
65 def __init__(self, address: str | None = None, entry_id=None):
66 if not address and not entry_id:
67 raise ValueError("Address or entry_id must be provided")
68
69 self._address = address.replace(" INVERCARGILL", "") if address else None
70 self._entry_id = entry_id
71
72 def get_entry_id(self, s):
73 r = s.get(SITE_URL)
74 r.raise_for_status()
75 # regex find security: 'KEY'
76 match = re.search(r"security: '(\w+)'", r.text)
77 if not match:
78 raise ValueError("Security key not found")
79 security_key = match.group(1)
80
81 # get token
82 params = {
83 "action": "we_data_autocomplete",
84 "term": self._address,
85 "security": security_key,
86 }
87
88 r = s.get(
89 ADDRESS_URL,
90 params=params,
91 )
92 r.raise_for_status()
93
94 return r.json()["data"][0]["url"].split("=")[1]
95
96 def fetch(self):
97 s = requests.Session()
98 s.headers.update(HEADER)
99
100 if self._entry_id is None:
101 self._entry_id = self.get_entry_id(s)
102
103 r = s.get(SITE_URL, params={"entry_id": self._entry_id})
104 r.raise_for_status()
105 p = WasteSearchResultsParser()
106 p.feed(r.text)
107 return p.entries
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py
@@ -49,7 +49,7 @@
elif data.startswith("Next Service Date:"):
self._withinCollectionDay = True
elif self._withinCollectionDay:
- date = datetime.strptime(data, "%y/%m/%d").date()
+ date = datetime.strptime(data, "%d/%m/%y").date()
if self._wasteType is not None:
self._entries.append(Collection(date, self._wasteType))
self._withinCollectionDay = False
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py\n@@ -49,7 +49,7 @@\n elif data.startswith(\"Next Service Date:\"):\n self._withinCollectionDay = True\n elif self._withinCollectionDay:\n- date = datetime.strptime(data, \"%y/%m/%d\").date()\n+ date = datetime.strptime(data, \"%d/%m/%y\").date()\n if self._wasteType is not None:\n self._entries.append(Collection(date, self._wasteType))\n self._withinCollectionDay = False\n", "issue": "[Bug]: WasteNet Southland not working after 1.46.0\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe WasteNet Southland website and url has changed about a month ago. The issue created by this change was supposed to be fixed in 1.46.0, but unfortunately it is still not working.\r\nTested with my address and even with the example data, returning all sensors as unknown.\n\n### Source (if relevant)\n\nwastenet_org_nz\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n_No response_\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import re\nfrom datetime import datetime\nfrom html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Gore, Invercargill & Southland\"\nDESCRIPTION = \"Source for Wastenet.org.nz.\"\nURL = \"http://www.wastenet.org.nz\"\nTEST_CASES = {\n \"166 Lewis Street\": {\"address\": \"166 Lewis Street\"},\n \"Old Format: 199 Crawford Street\": {\"address\": \"199 Crawford Street INVERCARGILL\"},\n \"Old Format: 156 Tay Street\": {\"address\": \"156 Tay Street INVERCARGILL\"},\n \"entry_id glass only\": {\"entry_id\": \"23571\"},\n # \"31 Conyers Street\": {\"address\": \"31 Conyers Street INVERCARGILL\"}, # Thursday\n # \"67 Chesney Street\": {\"address\": \"67 Chesney Street INVERCARGILL\"}, # Friday\n}\n\nICON_MAP = {\n \"Glass\": \"mdi:glass-mug-variant\",\n \"Rubbish\": \"mdi:delete-empty\",\n \"Recycle\": \"mdi:recycle\",\n}\n\n\nclass WasteSearchResultsParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._entries = []\n self._wasteType = None\n self._withinCollectionDay = False\n self._withinType = False\n\n @property\n def entries(self):\n return self._entries\n\n def handle_starttag(self, tag, attrs):\n if tag == \"span\":\n d = dict(attrs)\n if d.get(\"class\", \"\").startswith(\"badge\"):\n self._withinType = True\n\n def handle_data(self, data):\n if self._withinType:\n self._withinType = False\n self._wasteType = data\n elif data.startswith(\"Next Service Date:\"):\n self._withinCollectionDay = True\n elif self._withinCollectionDay:\n date = datetime.strptime(data, \"%y/%m/%d\").date()\n if self._wasteType is not None:\n self._entries.append(Collection(date, self._wasteType))\n self._withinCollectionDay = False\n\n\nHEADER = {\"User-Agent\": \"Mozilla/5.0\"}\n\nSITE_URL = \"https://www.wastenet.org.nz/bin-day/\"\nADDRESS_URL = \"https://www.wastenet.org.nz/wp-admin/admin-ajax.php\"\n\n\nclass Source:\n def __init__(self, address: str | None = None, entry_id=None):\n if not address and not entry_id:\n raise ValueError(\"Address or entry_id must be provided\")\n\n self._address = address.replace(\" INVERCARGILL\", \"\") if address else None\n self._entry_id = entry_id\n\n def get_entry_id(self, s):\n r = s.get(SITE_URL)\n r.raise_for_status()\n # regex find security: 'KEY'\n match = re.search(r\"security: '(\\w+)'\", r.text)\n if not match:\n raise ValueError(\"Security key not found\")\n security_key = match.group(1)\n\n # get token\n params = {\n \"action\": \"we_data_autocomplete\",\n \"term\": self._address,\n \"security\": security_key,\n }\n\n r = s.get(\n ADDRESS_URL,\n params=params,\n )\n r.raise_for_status()\n\n return r.json()[\"data\"][0][\"url\"].split(\"=\")[1]\n\n def fetch(self):\n s = requests.Session()\n s.headers.update(HEADER)\n\n if self._entry_id is None:\n self._entry_id = self.get_entry_id(s)\n\n r = s.get(SITE_URL, params={\"entry_id\": self._entry_id})\n r.raise_for_status()\n p = WasteSearchResultsParser()\n p.feed(r.text)\n return p.entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py"}], "after_files": [{"content": "import re\nfrom datetime import datetime\nfrom html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Gore, Invercargill & Southland\"\nDESCRIPTION = \"Source for Wastenet.org.nz.\"\nURL = \"http://www.wastenet.org.nz\"\nTEST_CASES = {\n \"166 Lewis Street\": {\"address\": \"166 Lewis Street\"},\n \"Old Format: 199 Crawford Street\": {\"address\": \"199 Crawford Street INVERCARGILL\"},\n \"Old Format: 156 Tay Street\": {\"address\": \"156 Tay Street INVERCARGILL\"},\n \"entry_id glass only\": {\"entry_id\": \"23571\"},\n # \"31 Conyers Street\": {\"address\": \"31 Conyers Street INVERCARGILL\"}, # Thursday\n # \"67 Chesney Street\": {\"address\": \"67 Chesney Street INVERCARGILL\"}, # Friday\n}\n\nICON_MAP = {\n \"Glass\": \"mdi:glass-mug-variant\",\n \"Rubbish\": \"mdi:delete-empty\",\n \"Recycle\": \"mdi:recycle\",\n}\n\n\nclass WasteSearchResultsParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._entries = []\n self._wasteType = None\n self._withinCollectionDay = False\n self._withinType = False\n\n @property\n def entries(self):\n return self._entries\n\n def handle_starttag(self, tag, attrs):\n if tag == \"span\":\n d = dict(attrs)\n if d.get(\"class\", \"\").startswith(\"badge\"):\n self._withinType = True\n\n def handle_data(self, data):\n if self._withinType:\n self._withinType = False\n self._wasteType = data\n elif data.startswith(\"Next Service Date:\"):\n self._withinCollectionDay = True\n elif self._withinCollectionDay:\n date = datetime.strptime(data, \"%d/%m/%y\").date()\n if self._wasteType is not None:\n self._entries.append(Collection(date, self._wasteType))\n self._withinCollectionDay = False\n\n\nHEADER = {\"User-Agent\": \"Mozilla/5.0\"}\n\nSITE_URL = \"https://www.wastenet.org.nz/bin-day/\"\nADDRESS_URL = \"https://www.wastenet.org.nz/wp-admin/admin-ajax.php\"\n\n\nclass Source:\n def __init__(self, address: str | None = None, entry_id=None):\n if not address and not entry_id:\n raise ValueError(\"Address or entry_id must be provided\")\n\n self._address = address.replace(\" INVERCARGILL\", \"\") if address else None\n self._entry_id = entry_id\n\n def get_entry_id(self, s):\n r = s.get(SITE_URL)\n r.raise_for_status()\n # regex find security: 'KEY'\n match = re.search(r\"security: '(\\w+)'\", r.text)\n if not match:\n raise ValueError(\"Security key not found\")\n security_key = match.group(1)\n\n # get token\n params = {\n \"action\": \"we_data_autocomplete\",\n \"term\": self._address,\n \"security\": security_key,\n }\n\n r = s.get(\n ADDRESS_URL,\n params=params,\n )\n r.raise_for_status()\n\n return r.json()[\"data\"][0][\"url\"].split(\"=\")[1]\n\n def fetch(self):\n s = requests.Session()\n s.headers.update(HEADER)\n\n if self._entry_id is None:\n self._entry_id = self.get_entry_id(s)\n\n r = s.get(SITE_URL, params={\"entry_id\": self._entry_id})\n r.raise_for_status()\n p = WasteSearchResultsParser()\n p.feed(r.text)\n return p.entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/wastenet_org_nz.py"}]} | 1,678 | 191 |
gh_patches_debug_44235 | rasdani/github-patches | git_diff | localstack__localstack-7667 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feature request: PROVIDER_OVERRIDE_LAMBDA=asf, Lambda container prefix
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Feature description
We're trying to switch from the old lambda implementation to asf. In the old lambda implementation, when using `LAMBDA_EXECUTOR=docker-reuse`, the lambda container name received a prefix that was equal to that of the local stack container in docker. (This was not the case for all `LAMBDA_EXECUTOR` flavours)
This way, we were able to track the lambda containers that were started for a given localstack instance, print the logs, kill them when necessary.
Request: Would it be possible to prefix the lambda containers with the name of the local stack container itself if present?
### 🧑💻 Implementation
_No response_
### Anything else?
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/awslambda/invocation/docker_runtime_executor.py`
Content:
```
1 import dataclasses
2 import json
3 import logging
4 import shutil
5 import time
6 from pathlib import Path
7 from typing import Callable, Dict, Literal, Optional
8
9 from localstack import config
10 from localstack.aws.api.lambda_ import PackageType, Runtime
11 from localstack.services.awslambda import hooks as lambda_hooks
12 from localstack.services.awslambda.invocation.executor_endpoint import (
13 INVOCATION_PORT,
14 ExecutorEndpoint,
15 ServiceEndpoint,
16 )
17 from localstack.services.awslambda.invocation.lambda_models import IMAGE_MAPPING, FunctionVersion
18 from localstack.services.awslambda.invocation.runtime_executor import (
19 LambdaRuntimeException,
20 RuntimeExecutor,
21 )
22 from localstack.services.awslambda.lambda_utils import (
23 get_container_network_for_lambda,
24 get_main_endpoint_from_container,
25 )
26 from localstack.services.awslambda.packages import awslambda_runtime_package
27 from localstack.utils.container_utils.container_client import (
28 ContainerConfiguration,
29 PortMappings,
30 VolumeBind,
31 VolumeMappings,
32 )
33 from localstack.utils.docker_utils import DOCKER_CLIENT as CONTAINER_CLIENT
34 from localstack.utils.net import get_free_tcp_port
35 from localstack.utils.strings import truncate
36
37 LOG = logging.getLogger(__name__)
38
39 IMAGE_PREFIX = "public.ecr.aws/lambda/"
40 # IMAGE_PREFIX = "amazon/aws-lambda-"
41
42 RAPID_ENTRYPOINT = "/var/rapid/init"
43
44 InitializationType = Literal["on-demand", "provisioned-concurrency"]
45
46 LAMBDA_DOCKERFILE = """FROM {base_img}
47 COPY aws-lambda-rie {rapid_entrypoint}
48 COPY code/ /var/task
49 """
50
51 PULLED_IMAGES: set[str] = set()
52
53 HOT_RELOADING_ENV_VARIABLE = "LOCALSTACK_HOT_RELOADING_PATHS"
54
55
56 def get_image_name_for_function(function_version: FunctionVersion) -> str:
57 return f"localstack/lambda-{function_version.id.qualified_arn().replace(':', '_').replace('$', '_').lower()}"
58
59
60 def get_default_image_for_runtime(runtime: str) -> str:
61 postfix = IMAGE_MAPPING.get(runtime)
62 if not postfix:
63 raise ValueError(f"Unsupported runtime {runtime}!")
64 return f"{IMAGE_PREFIX}{postfix}"
65
66
67 class RuntimeImageResolver:
68 """
69 Resolves Lambda runtimes to corresponding docker images
70 The default behavior resolves based on a prefix (including the repository) and a suffix (per runtime).
71
72 This can be customized via the LAMBDA_RUNTIME_IMAGE_MAPPING config in 2 distinct ways:
73
74 Option A: use a pattern string for the config variable that includes the "<runtime>" string
75 e.g. "myrepo/lambda:<runtime>-custom" would resolve the runtime "python3.9" to "myrepo/lambda:python3.9-custom"
76
77 Option B: use a JSON dict string for the config variable, mapping the runtime to the full image name & tag
78 e.g. {"python3.9": "myrepo/lambda:python3.9-custom", "python3.8": "myotherrepo/pylambda:3.8"}
79
80 Note that with Option B this will only apply to the runtimes included in the dict.
81 All other (non-included) runtimes will fall back to the default behavior.
82 """
83
84 _mapping: dict[Runtime, str]
85 _default_resolve_fn: Callable[[Runtime], str]
86
87 def __init__(
88 self, default_resolve_fn: Callable[[Runtime], str] = get_default_image_for_runtime
89 ):
90 self._mapping = dict()
91 self._default_resolve_fn = default_resolve_fn
92
93 def _resolve(self, runtime: Runtime, custom_image_mapping: str = "") -> str:
94 if runtime not in IMAGE_MAPPING:
95 raise ValueError(f"Unsupported runtime {runtime}")
96
97 if not custom_image_mapping:
98 return self._default_resolve_fn(runtime)
99
100 # Option A (pattern string that includes <runtime> to replace)
101 if "<runtime>" in custom_image_mapping:
102 return custom_image_mapping.replace("<runtime>", runtime)
103
104 # Option B (json dict mapping with fallback)
105 try:
106 mapping: dict = json.loads(custom_image_mapping)
107 # at this point we're loading the whole dict to avoid parsing multiple times
108 for k, v in mapping.items():
109 if k not in IMAGE_MAPPING:
110 raise ValueError(
111 f"Unsupported runtime ({runtime}) provided in LAMBDA_RUNTIME_IMAGE_MAPPING"
112 )
113 self._mapping[k] = v
114
115 if runtime in self._mapping:
116 return self._mapping[runtime]
117
118 # fall back to default behavior if the runtime was not present in the custom config
119 return self._default_resolve_fn(runtime)
120
121 except Exception:
122 LOG.error(
123 f"Failed to load config from LAMBDA_RUNTIME_IMAGE_MAPPING={custom_image_mapping}"
124 )
125 raise # TODO: validate config at start and prevent startup
126
127 def get_image_for_runtime(self, runtime: Runtime) -> str:
128 if runtime not in self._mapping:
129 resolved_image = self._resolve(runtime, config.LAMBDA_RUNTIME_IMAGE_MAPPING)
130 self._mapping[runtime] = resolved_image
131
132 return self._mapping[runtime]
133
134
135 resolver = RuntimeImageResolver()
136
137
138 def get_runtime_client_path() -> Path:
139 installer = awslambda_runtime_package.get_installer()
140 installer.install()
141 return Path(installer.get_installed_dir())
142
143
144 def prepare_image(target_path: Path, function_version: FunctionVersion) -> None:
145 if not function_version.config.runtime:
146 raise NotImplementedError("Custom images are currently not supported")
147 src_init = get_runtime_client_path()
148 # copy init file
149 target_init = awslambda_runtime_package.get_installer().get_executable_path()
150 shutil.copy(src_init, target_init)
151 target_init.chmod(0o755)
152 # copy code
153 # create dockerfile
154 docker_file_path = target_path / "Dockerfile"
155 docker_file = LAMBDA_DOCKERFILE.format(
156 base_img=resolver.get_image_for_runtime(function_version.config.runtime),
157 rapid_entrypoint=RAPID_ENTRYPOINT,
158 )
159 with docker_file_path.open(mode="w") as f:
160 f.write(docker_file)
161 try:
162 CONTAINER_CLIENT.build_image(
163 dockerfile_path=str(docker_file_path),
164 image_name=get_image_name_for_function(function_version),
165 )
166 except Exception as e:
167 if LOG.isEnabledFor(logging.DEBUG):
168 LOG.exception(
169 "Error while building prebuilt lambda image for '%s'",
170 function_version.qualified_arn,
171 )
172 else:
173 LOG.error(
174 "Error while building prebuilt lambda image for '%s', Error: %s",
175 function_version.qualified_arn,
176 e,
177 )
178
179
180 @dataclasses.dataclass
181 class LambdaContainerConfiguration(ContainerConfiguration):
182 copy_folders: list[tuple[str, str]] = dataclasses.field(default_factory=list)
183
184
185 class DockerRuntimeExecutor(RuntimeExecutor):
186 ip: Optional[str]
187 executor_endpoint: Optional[ExecutorEndpoint]
188
189 def __init__(
190 self, id: str, function_version: FunctionVersion, service_endpoint: ServiceEndpoint
191 ) -> None:
192 super(DockerRuntimeExecutor, self).__init__(
193 id=id, function_version=function_version, service_endpoint=service_endpoint
194 )
195 self.ip = None
196 self.executor_endpoint = self._build_executor_endpoint(service_endpoint)
197
198 def get_image(self) -> str:
199 if not self.function_version.config.runtime:
200 raise NotImplementedError("Custom images are currently not supported")
201 return (
202 get_image_name_for_function(self.function_version)
203 if config.LAMBDA_PREBUILD_IMAGES
204 else resolver.get_image_for_runtime(self.function_version.config.runtime)
205 )
206
207 def _build_executor_endpoint(self, service_endpoint: ServiceEndpoint) -> ExecutorEndpoint:
208 LOG.debug(
209 "Creating service endpoint for function %s executor %s",
210 self.function_version.qualified_arn,
211 self.id,
212 )
213 executor_endpoint = ExecutorEndpoint(self.id, service_endpoint=service_endpoint)
214 LOG.debug(
215 "Finished creating service endpoint for function %s executor %s",
216 self.function_version.qualified_arn,
217 self.id,
218 )
219 return executor_endpoint
220
221 def start(self, env_vars: dict[str, str]) -> None:
222 self.executor_endpoint.start()
223 network = self._get_network_for_executor()
224 container_config = LambdaContainerConfiguration(
225 image_name=None,
226 name=self.id,
227 env_vars=env_vars,
228 network=network,
229 entrypoint=RAPID_ENTRYPOINT,
230 additional_flags=config.LAMBDA_DOCKER_FLAGS,
231 )
232 if self.function_version.config.package_type == PackageType.Zip:
233 if self.function_version.config.code.is_hot_reloading():
234 container_config.env_vars[HOT_RELOADING_ENV_VARIABLE] = "/var/task"
235 if container_config.volumes is None:
236 container_config.volumes = VolumeMappings()
237 container_config.volumes.append(
238 VolumeBind(
239 str(self.function_version.config.code.get_unzipped_code_location()),
240 "/var/task",
241 read_only=True,
242 )
243 )
244 else:
245 container_config.copy_folders.append(
246 (
247 f"{str(self.function_version.config.code.get_unzipped_code_location())}/.",
248 "/var/task",
249 )
250 )
251
252 lambda_hooks.start_docker_executor.run(container_config, self.function_version)
253
254 if not container_config.image_name:
255 container_config.image_name = self.get_image()
256 if config.LAMBDA_DEV_PORT_EXPOSE:
257 self.executor_endpoint.container_port = get_free_tcp_port()
258 if container_config.ports is None:
259 container_config.ports = PortMappings()
260 container_config.ports.add(self.executor_endpoint.container_port, INVOCATION_PORT)
261 CONTAINER_CLIENT.create_container_from_config(container_config)
262 if (
263 not config.LAMBDA_PREBUILD_IMAGES
264 or self.function_version.config.package_type != PackageType.Zip
265 ):
266 CONTAINER_CLIENT.copy_into_container(
267 self.id, f"{str(get_runtime_client_path())}/.", "/"
268 )
269 if not config.LAMBDA_PREBUILD_IMAGES:
270 # copy_folders should be empty here if package type is not zip
271 for source, target in container_config.copy_folders:
272 CONTAINER_CLIENT.copy_into_container(self.id, source, target)
273
274 CONTAINER_CLIENT.start_container(self.id)
275 self.ip = CONTAINER_CLIENT.get_container_ipv4_for_network(
276 container_name_or_id=self.id, container_network=network
277 )
278 if config.LAMBDA_DEV_PORT_EXPOSE:
279 self.ip = "127.0.0.1"
280 self.executor_endpoint.container_address = self.ip
281
282 def stop(self) -> None:
283 CONTAINER_CLIENT.stop_container(container_name=self.id, timeout=5)
284 if config.LAMBDA_REMOVE_CONTAINERS:
285 CONTAINER_CLIENT.remove_container(container_name=self.id)
286 try:
287 self.executor_endpoint.shutdown()
288 except Exception as e:
289 LOG.debug(
290 "Error while stopping executor endpoint for lambda %s, error: %s",
291 self.function_version.qualified_arn,
292 e,
293 )
294
295 def get_address(self) -> str:
296 if not self.ip:
297 raise LambdaRuntimeException(f"IP address of executor '{self.id}' unknown")
298 return self.ip
299
300 def get_endpoint_from_executor(self) -> str:
301 return get_main_endpoint_from_container()
302
303 def _get_network_for_executor(self) -> str:
304 return get_container_network_for_lambda()
305
306 def invoke(self, payload: Dict[str, str]):
307 LOG.debug(
308 "Sending invoke-payload '%s' to executor '%s'",
309 truncate(json.dumps(payload), config.LAMBDA_TRUNCATE_STDOUT),
310 self.id,
311 )
312 self.executor_endpoint.invoke(payload)
313
314 @classmethod
315 def prepare_version(cls, function_version: FunctionVersion) -> None:
316 time_before = time.perf_counter()
317 lambda_hooks.prepare_docker_executor.run(function_version)
318 if function_version.config.code:
319 function_version.config.code.prepare_for_execution()
320 image_name = resolver.get_image_for_runtime(function_version.config.runtime)
321 if image_name not in PULLED_IMAGES:
322 CONTAINER_CLIENT.pull_image(image_name)
323 PULLED_IMAGES.add(image_name)
324 if config.LAMBDA_PREBUILD_IMAGES:
325 target_path = function_version.config.code.get_unzipped_code_location()
326 prepare_image(target_path, function_version)
327 LOG.debug(
328 "Version preparation of version %s took %0.2fms",
329 function_version.qualified_arn,
330 (time.perf_counter() - time_before) * 1000,
331 )
332
333 @classmethod
334 def cleanup_version(cls, function_version: FunctionVersion) -> None:
335 if config.LAMBDA_PREBUILD_IMAGES:
336 CONTAINER_CLIENT.remove_image(get_image_name_for_function(function_version))
337
338 def get_runtime_endpoint(self) -> str:
339 return f"http://{self.get_endpoint_from_executor()}:{config.EDGE_PORT}{self.executor_endpoint.get_endpoint_prefix()}"
340
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/awslambda/invocation/docker_runtime_executor.py b/localstack/services/awslambda/invocation/docker_runtime_executor.py
--- a/localstack/services/awslambda/invocation/docker_runtime_executor.py
+++ b/localstack/services/awslambda/invocation/docker_runtime_executor.py
@@ -24,6 +24,7 @@
get_main_endpoint_from_container,
)
from localstack.services.awslambda.packages import awslambda_runtime_package
+from localstack.utils.container_networking import get_main_container_name
from localstack.utils.container_utils.container_client import (
ContainerConfiguration,
PortMappings,
@@ -185,6 +186,7 @@
class DockerRuntimeExecutor(RuntimeExecutor):
ip: Optional[str]
executor_endpoint: Optional[ExecutorEndpoint]
+ container_name: str
def __init__(
self, id: str, function_version: FunctionVersion, service_endpoint: ServiceEndpoint
@@ -194,6 +196,8 @@
)
self.ip = None
self.executor_endpoint = self._build_executor_endpoint(service_endpoint)
+ self.container_name = self._generate_container_name()
+ LOG.debug("Assigning container name of %s to executor %s", self.container_name, self.id)
def get_image(self) -> str:
if not self.function_version.config.runtime:
@@ -218,12 +222,26 @@
)
return executor_endpoint
+ def _generate_container_name(self):
+ """
+ Format <main-container-name>-lambda-<function-name>-<executor-id>
+ TODO: make the format configurable
+ """
+ container_name = "-".join(
+ [
+ get_main_container_name() or "localstack",
+ "lambda",
+ self.function_version.id.function_name.lower(),
+ ]
+ ).replace("_", "-")
+ return f"{container_name}-{self.id}"
+
def start(self, env_vars: dict[str, str]) -> None:
self.executor_endpoint.start()
network = self._get_network_for_executor()
container_config = LambdaContainerConfiguration(
image_name=None,
- name=self.id,
+ name=self.container_name,
env_vars=env_vars,
network=network,
entrypoint=RAPID_ENTRYPOINT,
@@ -264,25 +282,25 @@
or self.function_version.config.package_type != PackageType.Zip
):
CONTAINER_CLIENT.copy_into_container(
- self.id, f"{str(get_runtime_client_path())}/.", "/"
+ self.container_name, f"{str(get_runtime_client_path())}/.", "/"
)
if not config.LAMBDA_PREBUILD_IMAGES:
# copy_folders should be empty here if package type is not zip
for source, target in container_config.copy_folders:
- CONTAINER_CLIENT.copy_into_container(self.id, source, target)
+ CONTAINER_CLIENT.copy_into_container(self.container_name, source, target)
- CONTAINER_CLIENT.start_container(self.id)
+ CONTAINER_CLIENT.start_container(self.container_name)
self.ip = CONTAINER_CLIENT.get_container_ipv4_for_network(
- container_name_or_id=self.id, container_network=network
+ container_name_or_id=self.container_name, container_network=network
)
if config.LAMBDA_DEV_PORT_EXPOSE:
self.ip = "127.0.0.1"
self.executor_endpoint.container_address = self.ip
def stop(self) -> None:
- CONTAINER_CLIENT.stop_container(container_name=self.id, timeout=5)
+ CONTAINER_CLIENT.stop_container(container_name=self.container_name, timeout=5)
if config.LAMBDA_REMOVE_CONTAINERS:
- CONTAINER_CLIENT.remove_container(container_name=self.id)
+ CONTAINER_CLIENT.remove_container(container_name=self.container_name)
try:
self.executor_endpoint.shutdown()
except Exception as e:
| {"golden_diff": "diff --git a/localstack/services/awslambda/invocation/docker_runtime_executor.py b/localstack/services/awslambda/invocation/docker_runtime_executor.py\n--- a/localstack/services/awslambda/invocation/docker_runtime_executor.py\n+++ b/localstack/services/awslambda/invocation/docker_runtime_executor.py\n@@ -24,6 +24,7 @@\n get_main_endpoint_from_container,\n )\n from localstack.services.awslambda.packages import awslambda_runtime_package\n+from localstack.utils.container_networking import get_main_container_name\n from localstack.utils.container_utils.container_client import (\n ContainerConfiguration,\n PortMappings,\n@@ -185,6 +186,7 @@\n class DockerRuntimeExecutor(RuntimeExecutor):\n ip: Optional[str]\n executor_endpoint: Optional[ExecutorEndpoint]\n+ container_name: str\n \n def __init__(\n self, id: str, function_version: FunctionVersion, service_endpoint: ServiceEndpoint\n@@ -194,6 +196,8 @@\n )\n self.ip = None\n self.executor_endpoint = self._build_executor_endpoint(service_endpoint)\n+ self.container_name = self._generate_container_name()\n+ LOG.debug(\"Assigning container name of %s to executor %s\", self.container_name, self.id)\n \n def get_image(self) -> str:\n if not self.function_version.config.runtime:\n@@ -218,12 +222,26 @@\n )\n return executor_endpoint\n \n+ def _generate_container_name(self):\n+ \"\"\"\n+ Format <main-container-name>-lambda-<function-name>-<executor-id>\n+ TODO: make the format configurable\n+ \"\"\"\n+ container_name = \"-\".join(\n+ [\n+ get_main_container_name() or \"localstack\",\n+ \"lambda\",\n+ self.function_version.id.function_name.lower(),\n+ ]\n+ ).replace(\"_\", \"-\")\n+ return f\"{container_name}-{self.id}\"\n+\n def start(self, env_vars: dict[str, str]) -> None:\n self.executor_endpoint.start()\n network = self._get_network_for_executor()\n container_config = LambdaContainerConfiguration(\n image_name=None,\n- name=self.id,\n+ name=self.container_name,\n env_vars=env_vars,\n network=network,\n entrypoint=RAPID_ENTRYPOINT,\n@@ -264,25 +282,25 @@\n or self.function_version.config.package_type != PackageType.Zip\n ):\n CONTAINER_CLIENT.copy_into_container(\n- self.id, f\"{str(get_runtime_client_path())}/.\", \"/\"\n+ self.container_name, f\"{str(get_runtime_client_path())}/.\", \"/\"\n )\n if not config.LAMBDA_PREBUILD_IMAGES:\n # copy_folders should be empty here if package type is not zip\n for source, target in container_config.copy_folders:\n- CONTAINER_CLIENT.copy_into_container(self.id, source, target)\n+ CONTAINER_CLIENT.copy_into_container(self.container_name, source, target)\n \n- CONTAINER_CLIENT.start_container(self.id)\n+ CONTAINER_CLIENT.start_container(self.container_name)\n self.ip = CONTAINER_CLIENT.get_container_ipv4_for_network(\n- container_name_or_id=self.id, container_network=network\n+ container_name_or_id=self.container_name, container_network=network\n )\n if config.LAMBDA_DEV_PORT_EXPOSE:\n self.ip = \"127.0.0.1\"\n self.executor_endpoint.container_address = self.ip\n \n def stop(self) -> None:\n- CONTAINER_CLIENT.stop_container(container_name=self.id, timeout=5)\n+ CONTAINER_CLIENT.stop_container(container_name=self.container_name, timeout=5)\n if config.LAMBDA_REMOVE_CONTAINERS:\n- CONTAINER_CLIENT.remove_container(container_name=self.id)\n+ CONTAINER_CLIENT.remove_container(container_name=self.container_name)\n try:\n self.executor_endpoint.shutdown()\n except Exception as e:\n", "issue": "feature request: PROVIDER_OVERRIDE_LAMBDA=asf, Lambda container prefix\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Feature description\r\n\r\nWe're trying to switch from the old lambda implementation to asf. In the old lambda implementation, when using `LAMBDA_EXECUTOR=docker-reuse`, the lambda container name received a prefix that was equal to that of the local stack container in docker. (This was not the case for all `LAMBDA_EXECUTOR` flavours)\r\n\r\nThis way, we were able to track the lambda containers that were started for a given localstack instance, print the logs, kill them when necessary.\r\n\r\nRequest: Would it be possible to prefix the lambda containers with the name of the local stack container itself if present?\r\n\r\n### \ud83e\uddd1\u200d\ud83d\udcbb Implementation\r\n\r\n_No response_\r\n\r\n### Anything else?\r\n\r\n_No response_\n", "before_files": [{"content": "import dataclasses\nimport json\nimport logging\nimport shutil\nimport time\nfrom pathlib import Path\nfrom typing import Callable, Dict, Literal, Optional\n\nfrom localstack import config\nfrom localstack.aws.api.lambda_ import PackageType, Runtime\nfrom localstack.services.awslambda import hooks as lambda_hooks\nfrom localstack.services.awslambda.invocation.executor_endpoint import (\n INVOCATION_PORT,\n ExecutorEndpoint,\n ServiceEndpoint,\n)\nfrom localstack.services.awslambda.invocation.lambda_models import IMAGE_MAPPING, FunctionVersion\nfrom localstack.services.awslambda.invocation.runtime_executor import (\n LambdaRuntimeException,\n RuntimeExecutor,\n)\nfrom localstack.services.awslambda.lambda_utils import (\n get_container_network_for_lambda,\n get_main_endpoint_from_container,\n)\nfrom localstack.services.awslambda.packages import awslambda_runtime_package\nfrom localstack.utils.container_utils.container_client import (\n ContainerConfiguration,\n PortMappings,\n VolumeBind,\n VolumeMappings,\n)\nfrom localstack.utils.docker_utils import DOCKER_CLIENT as CONTAINER_CLIENT\nfrom localstack.utils.net import get_free_tcp_port\nfrom localstack.utils.strings import truncate\n\nLOG = logging.getLogger(__name__)\n\nIMAGE_PREFIX = \"public.ecr.aws/lambda/\"\n# IMAGE_PREFIX = \"amazon/aws-lambda-\"\n\nRAPID_ENTRYPOINT = \"/var/rapid/init\"\n\nInitializationType = Literal[\"on-demand\", \"provisioned-concurrency\"]\n\nLAMBDA_DOCKERFILE = \"\"\"FROM {base_img}\nCOPY aws-lambda-rie {rapid_entrypoint}\nCOPY code/ /var/task\n\"\"\"\n\nPULLED_IMAGES: set[str] = set()\n\nHOT_RELOADING_ENV_VARIABLE = \"LOCALSTACK_HOT_RELOADING_PATHS\"\n\n\ndef get_image_name_for_function(function_version: FunctionVersion) -> str:\n return f\"localstack/lambda-{function_version.id.qualified_arn().replace(':', '_').replace('$', '_').lower()}\"\n\n\ndef get_default_image_for_runtime(runtime: str) -> str:\n postfix = IMAGE_MAPPING.get(runtime)\n if not postfix:\n raise ValueError(f\"Unsupported runtime {runtime}!\")\n return f\"{IMAGE_PREFIX}{postfix}\"\n\n\nclass RuntimeImageResolver:\n \"\"\"\n Resolves Lambda runtimes to corresponding docker images\n The default behavior resolves based on a prefix (including the repository) and a suffix (per runtime).\n\n This can be customized via the LAMBDA_RUNTIME_IMAGE_MAPPING config in 2 distinct ways:\n\n Option A: use a pattern string for the config variable that includes the \"<runtime>\" string\n e.g. \"myrepo/lambda:<runtime>-custom\" would resolve the runtime \"python3.9\" to \"myrepo/lambda:python3.9-custom\"\n\n Option B: use a JSON dict string for the config variable, mapping the runtime to the full image name & tag\n e.g. {\"python3.9\": \"myrepo/lambda:python3.9-custom\", \"python3.8\": \"myotherrepo/pylambda:3.8\"}\n\n Note that with Option B this will only apply to the runtimes included in the dict.\n All other (non-included) runtimes will fall back to the default behavior.\n \"\"\"\n\n _mapping: dict[Runtime, str]\n _default_resolve_fn: Callable[[Runtime], str]\n\n def __init__(\n self, default_resolve_fn: Callable[[Runtime], str] = get_default_image_for_runtime\n ):\n self._mapping = dict()\n self._default_resolve_fn = default_resolve_fn\n\n def _resolve(self, runtime: Runtime, custom_image_mapping: str = \"\") -> str:\n if runtime not in IMAGE_MAPPING:\n raise ValueError(f\"Unsupported runtime {runtime}\")\n\n if not custom_image_mapping:\n return self._default_resolve_fn(runtime)\n\n # Option A (pattern string that includes <runtime> to replace)\n if \"<runtime>\" in custom_image_mapping:\n return custom_image_mapping.replace(\"<runtime>\", runtime)\n\n # Option B (json dict mapping with fallback)\n try:\n mapping: dict = json.loads(custom_image_mapping)\n # at this point we're loading the whole dict to avoid parsing multiple times\n for k, v in mapping.items():\n if k not in IMAGE_MAPPING:\n raise ValueError(\n f\"Unsupported runtime ({runtime}) provided in LAMBDA_RUNTIME_IMAGE_MAPPING\"\n )\n self._mapping[k] = v\n\n if runtime in self._mapping:\n return self._mapping[runtime]\n\n # fall back to default behavior if the runtime was not present in the custom config\n return self._default_resolve_fn(runtime)\n\n except Exception:\n LOG.error(\n f\"Failed to load config from LAMBDA_RUNTIME_IMAGE_MAPPING={custom_image_mapping}\"\n )\n raise # TODO: validate config at start and prevent startup\n\n def get_image_for_runtime(self, runtime: Runtime) -> str:\n if runtime not in self._mapping:\n resolved_image = self._resolve(runtime, config.LAMBDA_RUNTIME_IMAGE_MAPPING)\n self._mapping[runtime] = resolved_image\n\n return self._mapping[runtime]\n\n\nresolver = RuntimeImageResolver()\n\n\ndef get_runtime_client_path() -> Path:\n installer = awslambda_runtime_package.get_installer()\n installer.install()\n return Path(installer.get_installed_dir())\n\n\ndef prepare_image(target_path: Path, function_version: FunctionVersion) -> None:\n if not function_version.config.runtime:\n raise NotImplementedError(\"Custom images are currently not supported\")\n src_init = get_runtime_client_path()\n # copy init file\n target_init = awslambda_runtime_package.get_installer().get_executable_path()\n shutil.copy(src_init, target_init)\n target_init.chmod(0o755)\n # copy code\n # create dockerfile\n docker_file_path = target_path / \"Dockerfile\"\n docker_file = LAMBDA_DOCKERFILE.format(\n base_img=resolver.get_image_for_runtime(function_version.config.runtime),\n rapid_entrypoint=RAPID_ENTRYPOINT,\n )\n with docker_file_path.open(mode=\"w\") as f:\n f.write(docker_file)\n try:\n CONTAINER_CLIENT.build_image(\n dockerfile_path=str(docker_file_path),\n image_name=get_image_name_for_function(function_version),\n )\n except Exception as e:\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.exception(\n \"Error while building prebuilt lambda image for '%s'\",\n function_version.qualified_arn,\n )\n else:\n LOG.error(\n \"Error while building prebuilt lambda image for '%s', Error: %s\",\n function_version.qualified_arn,\n e,\n )\n\n\[email protected]\nclass LambdaContainerConfiguration(ContainerConfiguration):\n copy_folders: list[tuple[str, str]] = dataclasses.field(default_factory=list)\n\n\nclass DockerRuntimeExecutor(RuntimeExecutor):\n ip: Optional[str]\n executor_endpoint: Optional[ExecutorEndpoint]\n\n def __init__(\n self, id: str, function_version: FunctionVersion, service_endpoint: ServiceEndpoint\n ) -> None:\n super(DockerRuntimeExecutor, self).__init__(\n id=id, function_version=function_version, service_endpoint=service_endpoint\n )\n self.ip = None\n self.executor_endpoint = self._build_executor_endpoint(service_endpoint)\n\n def get_image(self) -> str:\n if not self.function_version.config.runtime:\n raise NotImplementedError(\"Custom images are currently not supported\")\n return (\n get_image_name_for_function(self.function_version)\n if config.LAMBDA_PREBUILD_IMAGES\n else resolver.get_image_for_runtime(self.function_version.config.runtime)\n )\n\n def _build_executor_endpoint(self, service_endpoint: ServiceEndpoint) -> ExecutorEndpoint:\n LOG.debug(\n \"Creating service endpoint for function %s executor %s\",\n self.function_version.qualified_arn,\n self.id,\n )\n executor_endpoint = ExecutorEndpoint(self.id, service_endpoint=service_endpoint)\n LOG.debug(\n \"Finished creating service endpoint for function %s executor %s\",\n self.function_version.qualified_arn,\n self.id,\n )\n return executor_endpoint\n\n def start(self, env_vars: dict[str, str]) -> None:\n self.executor_endpoint.start()\n network = self._get_network_for_executor()\n container_config = LambdaContainerConfiguration(\n image_name=None,\n name=self.id,\n env_vars=env_vars,\n network=network,\n entrypoint=RAPID_ENTRYPOINT,\n additional_flags=config.LAMBDA_DOCKER_FLAGS,\n )\n if self.function_version.config.package_type == PackageType.Zip:\n if self.function_version.config.code.is_hot_reloading():\n container_config.env_vars[HOT_RELOADING_ENV_VARIABLE] = \"/var/task\"\n if container_config.volumes is None:\n container_config.volumes = VolumeMappings()\n container_config.volumes.append(\n VolumeBind(\n str(self.function_version.config.code.get_unzipped_code_location()),\n \"/var/task\",\n read_only=True,\n )\n )\n else:\n container_config.copy_folders.append(\n (\n f\"{str(self.function_version.config.code.get_unzipped_code_location())}/.\",\n \"/var/task\",\n )\n )\n\n lambda_hooks.start_docker_executor.run(container_config, self.function_version)\n\n if not container_config.image_name:\n container_config.image_name = self.get_image()\n if config.LAMBDA_DEV_PORT_EXPOSE:\n self.executor_endpoint.container_port = get_free_tcp_port()\n if container_config.ports is None:\n container_config.ports = PortMappings()\n container_config.ports.add(self.executor_endpoint.container_port, INVOCATION_PORT)\n CONTAINER_CLIENT.create_container_from_config(container_config)\n if (\n not config.LAMBDA_PREBUILD_IMAGES\n or self.function_version.config.package_type != PackageType.Zip\n ):\n CONTAINER_CLIENT.copy_into_container(\n self.id, f\"{str(get_runtime_client_path())}/.\", \"/\"\n )\n if not config.LAMBDA_PREBUILD_IMAGES:\n # copy_folders should be empty here if package type is not zip\n for source, target in container_config.copy_folders:\n CONTAINER_CLIENT.copy_into_container(self.id, source, target)\n\n CONTAINER_CLIENT.start_container(self.id)\n self.ip = CONTAINER_CLIENT.get_container_ipv4_for_network(\n container_name_or_id=self.id, container_network=network\n )\n if config.LAMBDA_DEV_PORT_EXPOSE:\n self.ip = \"127.0.0.1\"\n self.executor_endpoint.container_address = self.ip\n\n def stop(self) -> None:\n CONTAINER_CLIENT.stop_container(container_name=self.id, timeout=5)\n if config.LAMBDA_REMOVE_CONTAINERS:\n CONTAINER_CLIENT.remove_container(container_name=self.id)\n try:\n self.executor_endpoint.shutdown()\n except Exception as e:\n LOG.debug(\n \"Error while stopping executor endpoint for lambda %s, error: %s\",\n self.function_version.qualified_arn,\n e,\n )\n\n def get_address(self) -> str:\n if not self.ip:\n raise LambdaRuntimeException(f\"IP address of executor '{self.id}' unknown\")\n return self.ip\n\n def get_endpoint_from_executor(self) -> str:\n return get_main_endpoint_from_container()\n\n def _get_network_for_executor(self) -> str:\n return get_container_network_for_lambda()\n\n def invoke(self, payload: Dict[str, str]):\n LOG.debug(\n \"Sending invoke-payload '%s' to executor '%s'\",\n truncate(json.dumps(payload), config.LAMBDA_TRUNCATE_STDOUT),\n self.id,\n )\n self.executor_endpoint.invoke(payload)\n\n @classmethod\n def prepare_version(cls, function_version: FunctionVersion) -> None:\n time_before = time.perf_counter()\n lambda_hooks.prepare_docker_executor.run(function_version)\n if function_version.config.code:\n function_version.config.code.prepare_for_execution()\n image_name = resolver.get_image_for_runtime(function_version.config.runtime)\n if image_name not in PULLED_IMAGES:\n CONTAINER_CLIENT.pull_image(image_name)\n PULLED_IMAGES.add(image_name)\n if config.LAMBDA_PREBUILD_IMAGES:\n target_path = function_version.config.code.get_unzipped_code_location()\n prepare_image(target_path, function_version)\n LOG.debug(\n \"Version preparation of version %s took %0.2fms\",\n function_version.qualified_arn,\n (time.perf_counter() - time_before) * 1000,\n )\n\n @classmethod\n def cleanup_version(cls, function_version: FunctionVersion) -> None:\n if config.LAMBDA_PREBUILD_IMAGES:\n CONTAINER_CLIENT.remove_image(get_image_name_for_function(function_version))\n\n def get_runtime_endpoint(self) -> str:\n return f\"http://{self.get_endpoint_from_executor()}:{config.EDGE_PORT}{self.executor_endpoint.get_endpoint_prefix()}\"\n", "path": "localstack/services/awslambda/invocation/docker_runtime_executor.py"}], "after_files": [{"content": "import dataclasses\nimport json\nimport logging\nimport shutil\nimport time\nfrom pathlib import Path\nfrom typing import Callable, Dict, Literal, Optional\n\nfrom localstack import config\nfrom localstack.aws.api.lambda_ import PackageType, Runtime\nfrom localstack.services.awslambda import hooks as lambda_hooks\nfrom localstack.services.awslambda.invocation.executor_endpoint import (\n INVOCATION_PORT,\n ExecutorEndpoint,\n ServiceEndpoint,\n)\nfrom localstack.services.awslambda.invocation.lambda_models import IMAGE_MAPPING, FunctionVersion\nfrom localstack.services.awslambda.invocation.runtime_executor import (\n LambdaRuntimeException,\n RuntimeExecutor,\n)\nfrom localstack.services.awslambda.lambda_utils import (\n get_container_network_for_lambda,\n get_main_endpoint_from_container,\n)\nfrom localstack.services.awslambda.packages import awslambda_runtime_package\nfrom localstack.utils.container_networking import get_main_container_name\nfrom localstack.utils.container_utils.container_client import (\n ContainerConfiguration,\n PortMappings,\n VolumeBind,\n VolumeMappings,\n)\nfrom localstack.utils.docker_utils import DOCKER_CLIENT as CONTAINER_CLIENT\nfrom localstack.utils.net import get_free_tcp_port\nfrom localstack.utils.strings import truncate\n\nLOG = logging.getLogger(__name__)\n\nIMAGE_PREFIX = \"public.ecr.aws/lambda/\"\n# IMAGE_PREFIX = \"amazon/aws-lambda-\"\n\nRAPID_ENTRYPOINT = \"/var/rapid/init\"\n\nInitializationType = Literal[\"on-demand\", \"provisioned-concurrency\"]\n\nLAMBDA_DOCKERFILE = \"\"\"FROM {base_img}\nCOPY aws-lambda-rie {rapid_entrypoint}\nCOPY code/ /var/task\n\"\"\"\n\nPULLED_IMAGES: set[str] = set()\n\nHOT_RELOADING_ENV_VARIABLE = \"LOCALSTACK_HOT_RELOADING_PATHS\"\n\n\ndef get_image_name_for_function(function_version: FunctionVersion) -> str:\n return f\"localstack/lambda-{function_version.id.qualified_arn().replace(':', '_').replace('$', '_').lower()}\"\n\n\ndef get_default_image_for_runtime(runtime: str) -> str:\n postfix = IMAGE_MAPPING.get(runtime)\n if not postfix:\n raise ValueError(f\"Unsupported runtime {runtime}!\")\n return f\"{IMAGE_PREFIX}{postfix}\"\n\n\nclass RuntimeImageResolver:\n \"\"\"\n Resolves Lambda runtimes to corresponding docker images\n The default behavior resolves based on a prefix (including the repository) and a suffix (per runtime).\n\n This can be customized via the LAMBDA_RUNTIME_IMAGE_MAPPING config in 2 distinct ways:\n\n Option A: use a pattern string for the config variable that includes the \"<runtime>\" string\n e.g. \"myrepo/lambda:<runtime>-custom\" would resolve the runtime \"python3.9\" to \"myrepo/lambda:python3.9-custom\"\n\n Option B: use a JSON dict string for the config variable, mapping the runtime to the full image name & tag\n e.g. {\"python3.9\": \"myrepo/lambda:python3.9-custom\", \"python3.8\": \"myotherrepo/pylambda:3.8\"}\n\n Note that with Option B this will only apply to the runtimes included in the dict.\n All other (non-included) runtimes will fall back to the default behavior.\n \"\"\"\n\n _mapping: dict[Runtime, str]\n _default_resolve_fn: Callable[[Runtime], str]\n\n def __init__(\n self, default_resolve_fn: Callable[[Runtime], str] = get_default_image_for_runtime\n ):\n self._mapping = dict()\n self._default_resolve_fn = default_resolve_fn\n\n def _resolve(self, runtime: Runtime, custom_image_mapping: str = \"\") -> str:\n if runtime not in IMAGE_MAPPING:\n raise ValueError(f\"Unsupported runtime {runtime}\")\n\n if not custom_image_mapping:\n return self._default_resolve_fn(runtime)\n\n # Option A (pattern string that includes <runtime> to replace)\n if \"<runtime>\" in custom_image_mapping:\n return custom_image_mapping.replace(\"<runtime>\", runtime)\n\n # Option B (json dict mapping with fallback)\n try:\n mapping: dict = json.loads(custom_image_mapping)\n # at this point we're loading the whole dict to avoid parsing multiple times\n for k, v in mapping.items():\n if k not in IMAGE_MAPPING:\n raise ValueError(\n f\"Unsupported runtime ({runtime}) provided in LAMBDA_RUNTIME_IMAGE_MAPPING\"\n )\n self._mapping[k] = v\n\n if runtime in self._mapping:\n return self._mapping[runtime]\n\n # fall back to default behavior if the runtime was not present in the custom config\n return self._default_resolve_fn(runtime)\n\n except Exception:\n LOG.error(\n f\"Failed to load config from LAMBDA_RUNTIME_IMAGE_MAPPING={custom_image_mapping}\"\n )\n raise # TODO: validate config at start and prevent startup\n\n def get_image_for_runtime(self, runtime: Runtime) -> str:\n if runtime not in self._mapping:\n resolved_image = self._resolve(runtime, config.LAMBDA_RUNTIME_IMAGE_MAPPING)\n self._mapping[runtime] = resolved_image\n\n return self._mapping[runtime]\n\n\nresolver = RuntimeImageResolver()\n\n\ndef get_runtime_client_path() -> Path:\n installer = awslambda_runtime_package.get_installer()\n installer.install()\n return Path(installer.get_installed_dir())\n\n\ndef prepare_image(target_path: Path, function_version: FunctionVersion) -> None:\n if not function_version.config.runtime:\n raise NotImplementedError(\"Custom images are currently not supported\")\n src_init = get_runtime_client_path()\n # copy init file\n target_init = awslambda_runtime_package.get_installer().get_executable_path()\n shutil.copy(src_init, target_init)\n target_init.chmod(0o755)\n # copy code\n # create dockerfile\n docker_file_path = target_path / \"Dockerfile\"\n docker_file = LAMBDA_DOCKERFILE.format(\n base_img=resolver.get_image_for_runtime(function_version.config.runtime),\n rapid_entrypoint=RAPID_ENTRYPOINT,\n )\n with docker_file_path.open(mode=\"w\") as f:\n f.write(docker_file)\n try:\n CONTAINER_CLIENT.build_image(\n dockerfile_path=str(docker_file_path),\n image_name=get_image_name_for_function(function_version),\n )\n except Exception as e:\n if LOG.isEnabledFor(logging.DEBUG):\n LOG.exception(\n \"Error while building prebuilt lambda image for '%s'\",\n function_version.qualified_arn,\n )\n else:\n LOG.error(\n \"Error while building prebuilt lambda image for '%s', Error: %s\",\n function_version.qualified_arn,\n e,\n )\n\n\[email protected]\nclass LambdaContainerConfiguration(ContainerConfiguration):\n copy_folders: list[tuple[str, str]] = dataclasses.field(default_factory=list)\n\n\nclass DockerRuntimeExecutor(RuntimeExecutor):\n ip: Optional[str]\n executor_endpoint: Optional[ExecutorEndpoint]\n container_name: str\n\n def __init__(\n self, id: str, function_version: FunctionVersion, service_endpoint: ServiceEndpoint\n ) -> None:\n super(DockerRuntimeExecutor, self).__init__(\n id=id, function_version=function_version, service_endpoint=service_endpoint\n )\n self.ip = None\n self.executor_endpoint = self._build_executor_endpoint(service_endpoint)\n self.container_name = self._generate_container_name()\n LOG.debug(\"Assigning container name of %s to executor %s\", self.container_name, self.id)\n\n def get_image(self) -> str:\n if not self.function_version.config.runtime:\n raise NotImplementedError(\"Custom images are currently not supported\")\n return (\n get_image_name_for_function(self.function_version)\n if config.LAMBDA_PREBUILD_IMAGES\n else resolver.get_image_for_runtime(self.function_version.config.runtime)\n )\n\n def _build_executor_endpoint(self, service_endpoint: ServiceEndpoint) -> ExecutorEndpoint:\n LOG.debug(\n \"Creating service endpoint for function %s executor %s\",\n self.function_version.qualified_arn,\n self.id,\n )\n executor_endpoint = ExecutorEndpoint(self.id, service_endpoint=service_endpoint)\n LOG.debug(\n \"Finished creating service endpoint for function %s executor %s\",\n self.function_version.qualified_arn,\n self.id,\n )\n return executor_endpoint\n\n def _generate_container_name(self):\n \"\"\"\n Format <main-container-name>-lambda-<function-name>-<executor-id>\n TODO: make the format configurable\n \"\"\"\n container_name = \"-\".join(\n [\n get_main_container_name() or \"localstack\",\n \"lambda\",\n self.function_version.id.function_name.lower(),\n ]\n ).replace(\"_\", \"-\")\n return f\"{container_name}-{self.id}\"\n\n def start(self, env_vars: dict[str, str]) -> None:\n self.executor_endpoint.start()\n network = self._get_network_for_executor()\n container_config = LambdaContainerConfiguration(\n image_name=None,\n name=self.container_name,\n env_vars=env_vars,\n network=network,\n entrypoint=RAPID_ENTRYPOINT,\n additional_flags=config.LAMBDA_DOCKER_FLAGS,\n )\n if self.function_version.config.package_type == PackageType.Zip:\n if self.function_version.config.code.is_hot_reloading():\n container_config.env_vars[HOT_RELOADING_ENV_VARIABLE] = \"/var/task\"\n if container_config.volumes is None:\n container_config.volumes = VolumeMappings()\n container_config.volumes.append(\n VolumeBind(\n str(self.function_version.config.code.get_unzipped_code_location()),\n \"/var/task\",\n read_only=True,\n )\n )\n else:\n container_config.copy_folders.append(\n (\n f\"{str(self.function_version.config.code.get_unzipped_code_location())}/.\",\n \"/var/task\",\n )\n )\n\n lambda_hooks.start_docker_executor.run(container_config, self.function_version)\n\n if not container_config.image_name:\n container_config.image_name = self.get_image()\n if config.LAMBDA_DEV_PORT_EXPOSE:\n self.executor_endpoint.container_port = get_free_tcp_port()\n if container_config.ports is None:\n container_config.ports = PortMappings()\n container_config.ports.add(self.executor_endpoint.container_port, INVOCATION_PORT)\n CONTAINER_CLIENT.create_container_from_config(container_config)\n if (\n not config.LAMBDA_PREBUILD_IMAGES\n or self.function_version.config.package_type != PackageType.Zip\n ):\n CONTAINER_CLIENT.copy_into_container(\n self.container_name, f\"{str(get_runtime_client_path())}/.\", \"/\"\n )\n if not config.LAMBDA_PREBUILD_IMAGES:\n # copy_folders should be empty here if package type is not zip\n for source, target in container_config.copy_folders:\n CONTAINER_CLIENT.copy_into_container(self.container_name, source, target)\n\n CONTAINER_CLIENT.start_container(self.container_name)\n self.ip = CONTAINER_CLIENT.get_container_ipv4_for_network(\n container_name_or_id=self.container_name, container_network=network\n )\n if config.LAMBDA_DEV_PORT_EXPOSE:\n self.ip = \"127.0.0.1\"\n self.executor_endpoint.container_address = self.ip\n\n def stop(self) -> None:\n CONTAINER_CLIENT.stop_container(container_name=self.container_name, timeout=5)\n if config.LAMBDA_REMOVE_CONTAINERS:\n CONTAINER_CLIENT.remove_container(container_name=self.container_name)\n try:\n self.executor_endpoint.shutdown()\n except Exception as e:\n LOG.debug(\n \"Error while stopping executor endpoint for lambda %s, error: %s\",\n self.function_version.qualified_arn,\n e,\n )\n\n def get_address(self) -> str:\n if not self.ip:\n raise LambdaRuntimeException(f\"IP address of executor '{self.id}' unknown\")\n return self.ip\n\n def get_endpoint_from_executor(self) -> str:\n return get_main_endpoint_from_container()\n\n def _get_network_for_executor(self) -> str:\n return get_container_network_for_lambda()\n\n def invoke(self, payload: Dict[str, str]):\n LOG.debug(\n \"Sending invoke-payload '%s' to executor '%s'\",\n truncate(json.dumps(payload), config.LAMBDA_TRUNCATE_STDOUT),\n self.id,\n )\n self.executor_endpoint.invoke(payload)\n\n @classmethod\n def prepare_version(cls, function_version: FunctionVersion) -> None:\n time_before = time.perf_counter()\n lambda_hooks.prepare_docker_executor.run(function_version)\n if function_version.config.code:\n function_version.config.code.prepare_for_execution()\n image_name = resolver.get_image_for_runtime(function_version.config.runtime)\n if image_name not in PULLED_IMAGES:\n CONTAINER_CLIENT.pull_image(image_name)\n PULLED_IMAGES.add(image_name)\n if config.LAMBDA_PREBUILD_IMAGES:\n target_path = function_version.config.code.get_unzipped_code_location()\n prepare_image(target_path, function_version)\n LOG.debug(\n \"Version preparation of version %s took %0.2fms\",\n function_version.qualified_arn,\n (time.perf_counter() - time_before) * 1000,\n )\n\n @classmethod\n def cleanup_version(cls, function_version: FunctionVersion) -> None:\n if config.LAMBDA_PREBUILD_IMAGES:\n CONTAINER_CLIENT.remove_image(get_image_name_for_function(function_version))\n\n def get_runtime_endpoint(self) -> str:\n return f\"http://{self.get_endpoint_from_executor()}:{config.EDGE_PORT}{self.executor_endpoint.get_endpoint_prefix()}\"\n", "path": "localstack/services/awslambda/invocation/docker_runtime_executor.py"}]} | 4,091 | 836 |
gh_patches_debug_44041 | rasdani/github-patches | git_diff | pypi__warehouse-11122 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add caveats to macaroons for expiration (time) and version
**What's the problem this feature will solve?**
This will allow further attenuating the permissions granted by an API key
**Describe the solution you'd like**
Addition of two addition types of caveat, project version (for uploads) and time (expiry).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/macaroons/caveats.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import json
14
15 import pymacaroons
16
17 from warehouse.packaging.models import Project
18
19
20 class InvalidMacaroonError(Exception):
21 ...
22
23
24 class Caveat:
25 def __init__(self, verifier):
26 self.verifier = verifier
27
28 def verify(self, predicate):
29 raise InvalidMacaroonError
30
31 def __call__(self, predicate):
32 return self.verify(predicate)
33
34
35 class V1Caveat(Caveat):
36 def verify_projects(self, projects):
37 # First, ensure that we're actually operating in
38 # the context of a package.
39 if not isinstance(self.verifier.context, Project):
40 raise InvalidMacaroonError(
41 "project-scoped token used outside of a project context"
42 )
43
44 project = self.verifier.context
45 if project.normalized_name in projects:
46 return True
47
48 raise InvalidMacaroonError(
49 f"project-scoped token is not valid for project '{project.name}'"
50 )
51
52 def verify(self, predicate):
53 try:
54 data = json.loads(predicate)
55 except ValueError:
56 raise InvalidMacaroonError("malformatted predicate")
57
58 if data.get("version") != 1:
59 raise InvalidMacaroonError("invalidate version in predicate")
60
61 permissions = data.get("permissions")
62 if permissions is None:
63 raise InvalidMacaroonError("invalid permissions in predicate")
64
65 if permissions == "user":
66 # User-scoped tokens behave exactly like a user's normal credentials.
67 return True
68
69 projects = permissions.get("projects")
70 if projects is None:
71 raise InvalidMacaroonError("invalid projects in predicate")
72
73 return self.verify_projects(projects)
74
75
76 class Verifier:
77 def __init__(self, macaroon, context, principals, permission):
78 self.macaroon = macaroon
79 self.context = context
80 self.principals = principals
81 self.permission = permission
82 self.verifier = pymacaroons.Verifier()
83
84 def verify(self, key):
85 self.verifier.satisfy_general(V1Caveat(self))
86
87 try:
88 return self.verifier.verify(self.macaroon, key)
89 except (
90 pymacaroons.exceptions.MacaroonInvalidSignatureException,
91 Exception, # https://github.com/ecordell/pymacaroons/issues/50
92 ):
93 raise InvalidMacaroonError("invalid macaroon signature")
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/macaroons/caveats.py b/warehouse/macaroons/caveats.py
--- a/warehouse/macaroons/caveats.py
+++ b/warehouse/macaroons/caveats.py
@@ -11,6 +11,7 @@
# limitations under the License.
import json
+import time
import pymacaroons
@@ -24,43 +25,51 @@
class Caveat:
def __init__(self, verifier):
self.verifier = verifier
+ # TODO: Surface this failure reason to the user.
+ # See: https://github.com/pypa/warehouse/issues/9018
+ self.failure_reason = None
- def verify(self, predicate):
- raise InvalidMacaroonError
+ def verify(self, predicate) -> bool:
+ return False
def __call__(self, predicate):
return self.verify(predicate)
class V1Caveat(Caveat):
- def verify_projects(self, projects):
+ def verify_projects(self, projects) -> bool:
# First, ensure that we're actually operating in
# the context of a package.
if not isinstance(self.verifier.context, Project):
- raise InvalidMacaroonError(
+ self.failure_reason = (
"project-scoped token used outside of a project context"
)
+ return False
project = self.verifier.context
if project.normalized_name in projects:
return True
- raise InvalidMacaroonError(
+ self.failure_reason = (
f"project-scoped token is not valid for project '{project.name}'"
)
+ return False
- def verify(self, predicate):
+ def verify(self, predicate) -> bool:
try:
data = json.loads(predicate)
except ValueError:
- raise InvalidMacaroonError("malformatted predicate")
+ self.failure_reason = "malformatted predicate"
+ return False
if data.get("version") != 1:
- raise InvalidMacaroonError("invalidate version in predicate")
+ self.failure_reason = "invalid version in predicate"
+ return False
permissions = data.get("permissions")
if permissions is None:
- raise InvalidMacaroonError("invalid permissions in predicate")
+ self.failure_reason = "invalid permissions in predicate"
+ return False
if permissions == "user":
# User-scoped tokens behave exactly like a user's normal credentials.
@@ -68,11 +77,34 @@
projects = permissions.get("projects")
if projects is None:
- raise InvalidMacaroonError("invalid projects in predicate")
+ self.failure_reason = "invalid projects in predicate"
+ return False
return self.verify_projects(projects)
+class ExpiryCaveat(Caveat):
+ def verify(self, predicate):
+ try:
+ data = json.loads(predicate)
+ expiry = data["exp"]
+ not_before = data["nbf"]
+ except (KeyError, ValueError, TypeError):
+ self.failure_reason = "malformatted predicate"
+ return False
+
+ if not expiry or not not_before:
+ self.failure_reason = "missing fields"
+ return False
+
+ now = int(time.time())
+ if now < not_before or now >= expiry:
+ self.failure_reason = "token is expired"
+ return False
+
+ return True
+
+
class Verifier:
def __init__(self, macaroon, context, principals, permission):
self.macaroon = macaroon
@@ -83,6 +115,7 @@
def verify(self, key):
self.verifier.satisfy_general(V1Caveat(self))
+ self.verifier.satisfy_general(ExpiryCaveat(self))
try:
return self.verifier.verify(self.macaroon, key)
@@ -90,4 +123,4 @@
pymacaroons.exceptions.MacaroonInvalidSignatureException,
Exception, # https://github.com/ecordell/pymacaroons/issues/50
):
- raise InvalidMacaroonError("invalid macaroon signature")
+ return False
| {"golden_diff": "diff --git a/warehouse/macaroons/caveats.py b/warehouse/macaroons/caveats.py\n--- a/warehouse/macaroons/caveats.py\n+++ b/warehouse/macaroons/caveats.py\n@@ -11,6 +11,7 @@\n # limitations under the License.\n \n import json\n+import time\n \n import pymacaroons\n \n@@ -24,43 +25,51 @@\n class Caveat:\n def __init__(self, verifier):\n self.verifier = verifier\n+ # TODO: Surface this failure reason to the user.\n+ # See: https://github.com/pypa/warehouse/issues/9018\n+ self.failure_reason = None\n \n- def verify(self, predicate):\n- raise InvalidMacaroonError\n+ def verify(self, predicate) -> bool:\n+ return False\n \n def __call__(self, predicate):\n return self.verify(predicate)\n \n \n class V1Caveat(Caveat):\n- def verify_projects(self, projects):\n+ def verify_projects(self, projects) -> bool:\n # First, ensure that we're actually operating in\n # the context of a package.\n if not isinstance(self.verifier.context, Project):\n- raise InvalidMacaroonError(\n+ self.failure_reason = (\n \"project-scoped token used outside of a project context\"\n )\n+ return False\n \n project = self.verifier.context\n if project.normalized_name in projects:\n return True\n \n- raise InvalidMacaroonError(\n+ self.failure_reason = (\n f\"project-scoped token is not valid for project '{project.name}'\"\n )\n+ return False\n \n- def verify(self, predicate):\n+ def verify(self, predicate) -> bool:\n try:\n data = json.loads(predicate)\n except ValueError:\n- raise InvalidMacaroonError(\"malformatted predicate\")\n+ self.failure_reason = \"malformatted predicate\"\n+ return False\n \n if data.get(\"version\") != 1:\n- raise InvalidMacaroonError(\"invalidate version in predicate\")\n+ self.failure_reason = \"invalid version in predicate\"\n+ return False\n \n permissions = data.get(\"permissions\")\n if permissions is None:\n- raise InvalidMacaroonError(\"invalid permissions in predicate\")\n+ self.failure_reason = \"invalid permissions in predicate\"\n+ return False\n \n if permissions == \"user\":\n # User-scoped tokens behave exactly like a user's normal credentials.\n@@ -68,11 +77,34 @@\n \n projects = permissions.get(\"projects\")\n if projects is None:\n- raise InvalidMacaroonError(\"invalid projects in predicate\")\n+ self.failure_reason = \"invalid projects in predicate\"\n+ return False\n \n return self.verify_projects(projects)\n \n \n+class ExpiryCaveat(Caveat):\n+ def verify(self, predicate):\n+ try:\n+ data = json.loads(predicate)\n+ expiry = data[\"exp\"]\n+ not_before = data[\"nbf\"]\n+ except (KeyError, ValueError, TypeError):\n+ self.failure_reason = \"malformatted predicate\"\n+ return False\n+\n+ if not expiry or not not_before:\n+ self.failure_reason = \"missing fields\"\n+ return False\n+\n+ now = int(time.time())\n+ if now < not_before or now >= expiry:\n+ self.failure_reason = \"token is expired\"\n+ return False\n+\n+ return True\n+\n+\n class Verifier:\n def __init__(self, macaroon, context, principals, permission):\n self.macaroon = macaroon\n@@ -83,6 +115,7 @@\n \n def verify(self, key):\n self.verifier.satisfy_general(V1Caveat(self))\n+ self.verifier.satisfy_general(ExpiryCaveat(self))\n \n try:\n return self.verifier.verify(self.macaroon, key)\n@@ -90,4 +123,4 @@\n pymacaroons.exceptions.MacaroonInvalidSignatureException,\n Exception, # https://github.com/ecordell/pymacaroons/issues/50\n ):\n- raise InvalidMacaroonError(\"invalid macaroon signature\")\n+ return False\n", "issue": "Add caveats to macaroons for expiration (time) and version\n**What's the problem this feature will solve?**\r\n\r\nThis will allow further attenuating the permissions granted by an API key\r\n\r\n**Describe the solution you'd like**\r\n\r\nAddition of two addition types of caveat, project version (for uploads) and time (expiry).\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport pymacaroons\n\nfrom warehouse.packaging.models import Project\n\n\nclass InvalidMacaroonError(Exception):\n ...\n\n\nclass Caveat:\n def __init__(self, verifier):\n self.verifier = verifier\n\n def verify(self, predicate):\n raise InvalidMacaroonError\n\n def __call__(self, predicate):\n return self.verify(predicate)\n\n\nclass V1Caveat(Caveat):\n def verify_projects(self, projects):\n # First, ensure that we're actually operating in\n # the context of a package.\n if not isinstance(self.verifier.context, Project):\n raise InvalidMacaroonError(\n \"project-scoped token used outside of a project context\"\n )\n\n project = self.verifier.context\n if project.normalized_name in projects:\n return True\n\n raise InvalidMacaroonError(\n f\"project-scoped token is not valid for project '{project.name}'\"\n )\n\n def verify(self, predicate):\n try:\n data = json.loads(predicate)\n except ValueError:\n raise InvalidMacaroonError(\"malformatted predicate\")\n\n if data.get(\"version\") != 1:\n raise InvalidMacaroonError(\"invalidate version in predicate\")\n\n permissions = data.get(\"permissions\")\n if permissions is None:\n raise InvalidMacaroonError(\"invalid permissions in predicate\")\n\n if permissions == \"user\":\n # User-scoped tokens behave exactly like a user's normal credentials.\n return True\n\n projects = permissions.get(\"projects\")\n if projects is None:\n raise InvalidMacaroonError(\"invalid projects in predicate\")\n\n return self.verify_projects(projects)\n\n\nclass Verifier:\n def __init__(self, macaroon, context, principals, permission):\n self.macaroon = macaroon\n self.context = context\n self.principals = principals\n self.permission = permission\n self.verifier = pymacaroons.Verifier()\n\n def verify(self, key):\n self.verifier.satisfy_general(V1Caveat(self))\n\n try:\n return self.verifier.verify(self.macaroon, key)\n except (\n pymacaroons.exceptions.MacaroonInvalidSignatureException,\n Exception, # https://github.com/ecordell/pymacaroons/issues/50\n ):\n raise InvalidMacaroonError(\"invalid macaroon signature\")\n", "path": "warehouse/macaroons/caveats.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport time\n\nimport pymacaroons\n\nfrom warehouse.packaging.models import Project\n\n\nclass InvalidMacaroonError(Exception):\n ...\n\n\nclass Caveat:\n def __init__(self, verifier):\n self.verifier = verifier\n # TODO: Surface this failure reason to the user.\n # See: https://github.com/pypa/warehouse/issues/9018\n self.failure_reason = None\n\n def verify(self, predicate) -> bool:\n return False\n\n def __call__(self, predicate):\n return self.verify(predicate)\n\n\nclass V1Caveat(Caveat):\n def verify_projects(self, projects) -> bool:\n # First, ensure that we're actually operating in\n # the context of a package.\n if not isinstance(self.verifier.context, Project):\n self.failure_reason = (\n \"project-scoped token used outside of a project context\"\n )\n return False\n\n project = self.verifier.context\n if project.normalized_name in projects:\n return True\n\n self.failure_reason = (\n f\"project-scoped token is not valid for project '{project.name}'\"\n )\n return False\n\n def verify(self, predicate) -> bool:\n try:\n data = json.loads(predicate)\n except ValueError:\n self.failure_reason = \"malformatted predicate\"\n return False\n\n if data.get(\"version\") != 1:\n self.failure_reason = \"invalid version in predicate\"\n return False\n\n permissions = data.get(\"permissions\")\n if permissions is None:\n self.failure_reason = \"invalid permissions in predicate\"\n return False\n\n if permissions == \"user\":\n # User-scoped tokens behave exactly like a user's normal credentials.\n return True\n\n projects = permissions.get(\"projects\")\n if projects is None:\n self.failure_reason = \"invalid projects in predicate\"\n return False\n\n return self.verify_projects(projects)\n\n\nclass ExpiryCaveat(Caveat):\n def verify(self, predicate):\n try:\n data = json.loads(predicate)\n expiry = data[\"exp\"]\n not_before = data[\"nbf\"]\n except (KeyError, ValueError, TypeError):\n self.failure_reason = \"malformatted predicate\"\n return False\n\n if not expiry or not not_before:\n self.failure_reason = \"missing fields\"\n return False\n\n now = int(time.time())\n if now < not_before or now >= expiry:\n self.failure_reason = \"token is expired\"\n return False\n\n return True\n\n\nclass Verifier:\n def __init__(self, macaroon, context, principals, permission):\n self.macaroon = macaroon\n self.context = context\n self.principals = principals\n self.permission = permission\n self.verifier = pymacaroons.Verifier()\n\n def verify(self, key):\n self.verifier.satisfy_general(V1Caveat(self))\n self.verifier.satisfy_general(ExpiryCaveat(self))\n\n try:\n return self.verifier.verify(self.macaroon, key)\n except (\n pymacaroons.exceptions.MacaroonInvalidSignatureException,\n Exception, # https://github.com/ecordell/pymacaroons/issues/50\n ):\n return False\n", "path": "warehouse/macaroons/caveats.py"}]} | 1,151 | 922 |
gh_patches_debug_37260 | rasdani/github-patches | git_diff | kubeflow__pipelines-4363 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow output artifact store configuration (vs hard coded)
it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`).
see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148
it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.
i suggest making it configurable, i can do such PR if we agree its needed.
flexible pipeline service (host) path in client SDK
when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:
`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`
to:
`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`
also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug
if its acceptable i can submit a PR for the line change above
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/aws.py`
Content:
```
1 # Copyright 2019 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY'):
16 """An operator that configures the container to use AWS credentials.
17
18 AWS doesn't create secret along with kubeflow deployment and it requires users
19 to manually create credential secret with proper permissions.
20
21 ::
22
23 apiVersion: v1
24 kind: Secret
25 metadata:
26 name: aws-secret
27 type: Opaque
28 data:
29 AWS_ACCESS_KEY_ID: BASE64_YOUR_AWS_ACCESS_KEY_ID
30 AWS_SECRET_ACCESS_KEY: BASE64_YOUR_AWS_SECRET_ACCESS_KEY
31 """
32
33 def _use_aws_secret(task):
34 from kubernetes import client as k8s_client
35 (
36 task.container
37 .add_env_variable(
38 k8s_client.V1EnvVar(
39 name='AWS_ACCESS_KEY_ID',
40 value_from=k8s_client.V1EnvVarSource(
41 secret_key_ref=k8s_client.V1SecretKeySelector(
42 name=secret_name,
43 key=aws_access_key_id_name
44 )
45 )
46 )
47 )
48 .add_env_variable(
49 k8s_client.V1EnvVar(
50 name='AWS_SECRET_ACCESS_KEY',
51 value_from=k8s_client.V1EnvVarSource(
52 secret_key_ref=k8s_client.V1SecretKeySelector(
53 name=secret_name,
54 key=aws_secret_access_key_name
55 )
56 )
57 )
58 )
59 )
60 return task
61
62 return _use_aws_secret
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/kfp/aws.py b/sdk/python/kfp/aws.py
--- a/sdk/python/kfp/aws.py
+++ b/sdk/python/kfp/aws.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY'):
+def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY', aws_region=None):
"""An operator that configures the container to use AWS credentials.
AWS doesn't create secret along with kubeflow deployment and it requires users
@@ -32,31 +32,38 @@
def _use_aws_secret(task):
from kubernetes import client as k8s_client
- (
- task.container
- .add_env_variable(
- k8s_client.V1EnvVar(
- name='AWS_ACCESS_KEY_ID',
- value_from=k8s_client.V1EnvVarSource(
- secret_key_ref=k8s_client.V1SecretKeySelector(
- name=secret_name,
- key=aws_access_key_id_name
- )
+ task.container \
+ .add_env_variable(
+ k8s_client.V1EnvVar(
+ name='AWS_ACCESS_KEY_ID',
+ value_from=k8s_client.V1EnvVarSource(
+ secret_key_ref=k8s_client.V1SecretKeySelector(
+ name=secret_name,
+ key=aws_access_key_id_name
)
)
)
+ ) \
+ .add_env_variable(
+ k8s_client.V1EnvVar(
+ name='AWS_SECRET_ACCESS_KEY',
+ value_from=k8s_client.V1EnvVarSource(
+ secret_key_ref=k8s_client.V1SecretKeySelector(
+ name=secret_name,
+ key=aws_secret_access_key_name
+ )
+ )
+ )
+ )
+
+ if aws_region:
+ task.container \
.add_env_variable(
k8s_client.V1EnvVar(
- name='AWS_SECRET_ACCESS_KEY',
- value_from=k8s_client.V1EnvVarSource(
- secret_key_ref=k8s_client.V1SecretKeySelector(
- name=secret_name,
- key=aws_secret_access_key_name
- )
- )
+ name='AWS_REGION',
+ value=aws_region
)
)
- )
return task
return _use_aws_secret
| {"golden_diff": "diff --git a/sdk/python/kfp/aws.py b/sdk/python/kfp/aws.py\n--- a/sdk/python/kfp/aws.py\n+++ b/sdk/python/kfp/aws.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY'):\n+def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY', aws_region=None):\n \"\"\"An operator that configures the container to use AWS credentials.\n \n AWS doesn't create secret along with kubeflow deployment and it requires users\n@@ -32,31 +32,38 @@\n \n def _use_aws_secret(task):\n from kubernetes import client as k8s_client\n- (\n- task.container\n- .add_env_variable(\n- k8s_client.V1EnvVar(\n- name='AWS_ACCESS_KEY_ID',\n- value_from=k8s_client.V1EnvVarSource(\n- secret_key_ref=k8s_client.V1SecretKeySelector(\n- name=secret_name,\n- key=aws_access_key_id_name\n- )\n+ task.container \\\n+ .add_env_variable(\n+ k8s_client.V1EnvVar(\n+ name='AWS_ACCESS_KEY_ID',\n+ value_from=k8s_client.V1EnvVarSource(\n+ secret_key_ref=k8s_client.V1SecretKeySelector(\n+ name=secret_name,\n+ key=aws_access_key_id_name\n )\n )\n )\n+ ) \\\n+ .add_env_variable(\n+ k8s_client.V1EnvVar(\n+ name='AWS_SECRET_ACCESS_KEY',\n+ value_from=k8s_client.V1EnvVarSource(\n+ secret_key_ref=k8s_client.V1SecretKeySelector(\n+ name=secret_name,\n+ key=aws_secret_access_key_name\n+ )\n+ )\n+ )\n+ )\n+\n+ if aws_region:\n+ task.container \\\n .add_env_variable(\n k8s_client.V1EnvVar(\n- name='AWS_SECRET_ACCESS_KEY',\n- value_from=k8s_client.V1EnvVarSource(\n- secret_key_ref=k8s_client.V1SecretKeySelector(\n- name=secret_name,\n- key=aws_secret_access_key_name\n- )\n- )\n+ name='AWS_REGION',\n+ value=aws_region\n )\n )\n- )\n return task\n \n return _use_aws_secret\n", "issue": "allow output artifact store configuration (vs hard coded)\nit seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`). \r\n\r\nsee: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148\r\n\r\nit would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.\r\ni suggest making it configurable, i can do such PR if we agree its needed. \nflexible pipeline service (host) path in client SDK \nwhen creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:\r\n\r\n`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`\r\n\r\nto:\r\n\r\n`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`\r\n\r\nalso note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug\r\n\r\nif its acceptable i can submit a PR for the line change above\r\n \n", "before_files": [{"content": "# Copyright 2019 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ndef use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY'):\n \"\"\"An operator that configures the container to use AWS credentials.\n\n AWS doesn't create secret along with kubeflow deployment and it requires users\n to manually create credential secret with proper permissions.\n\n ::\n\n apiVersion: v1\n kind: Secret\n metadata:\n name: aws-secret\n type: Opaque\n data:\n AWS_ACCESS_KEY_ID: BASE64_YOUR_AWS_ACCESS_KEY_ID\n AWS_SECRET_ACCESS_KEY: BASE64_YOUR_AWS_SECRET_ACCESS_KEY\n \"\"\"\n\n def _use_aws_secret(task):\n from kubernetes import client as k8s_client\n (\n task.container\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_ACCESS_KEY_ID',\n value_from=k8s_client.V1EnvVarSource(\n secret_key_ref=k8s_client.V1SecretKeySelector(\n name=secret_name,\n key=aws_access_key_id_name\n )\n )\n )\n )\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_SECRET_ACCESS_KEY',\n value_from=k8s_client.V1EnvVarSource(\n secret_key_ref=k8s_client.V1SecretKeySelector(\n name=secret_name,\n key=aws_secret_access_key_name\n )\n )\n )\n )\n )\n return task\n\n return _use_aws_secret\n", "path": "sdk/python/kfp/aws.py"}], "after_files": [{"content": "# Copyright 2019 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\ndef use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY', aws_region=None):\n \"\"\"An operator that configures the container to use AWS credentials.\n\n AWS doesn't create secret along with kubeflow deployment and it requires users\n to manually create credential secret with proper permissions.\n\n ::\n\n apiVersion: v1\n kind: Secret\n metadata:\n name: aws-secret\n type: Opaque\n data:\n AWS_ACCESS_KEY_ID: BASE64_YOUR_AWS_ACCESS_KEY_ID\n AWS_SECRET_ACCESS_KEY: BASE64_YOUR_AWS_SECRET_ACCESS_KEY\n \"\"\"\n\n def _use_aws_secret(task):\n from kubernetes import client as k8s_client\n task.container \\\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_ACCESS_KEY_ID',\n value_from=k8s_client.V1EnvVarSource(\n secret_key_ref=k8s_client.V1SecretKeySelector(\n name=secret_name,\n key=aws_access_key_id_name\n )\n )\n )\n ) \\\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_SECRET_ACCESS_KEY',\n value_from=k8s_client.V1EnvVarSource(\n secret_key_ref=k8s_client.V1SecretKeySelector(\n name=secret_name,\n key=aws_secret_access_key_name\n )\n )\n )\n )\n\n if aws_region:\n task.container \\\n .add_env_variable(\n k8s_client.V1EnvVar(\n name='AWS_REGION',\n value=aws_region\n )\n )\n return task\n\n return _use_aws_secret\n", "path": "sdk/python/kfp/aws.py"}]} | 1,201 | 586 |
gh_patches_debug_6789 | rasdani/github-patches | git_diff | tornadoweb__tornado-2972 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor bug in WSGIContainer docstring.
https://github.com/tornadoweb/tornado/blob/2047e7ae3c825bf52dad10cc8402d09e11091bc1/tornado/wsgi.py#L76
This line (in a docstring showing how to use WSGIContainer) is wrong.
It should say:
return [b"Hello world!\n"]
... because WSGI requires a sequence of bytestrings in modern python. Without this change, the example fails with:
TypeError: sequence item 0: expected a bytes-like object, str found
in python 3.9 (and probably any python 3).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tornado/wsgi.py`
Content:
```
1 #
2 # Copyright 2009 Facebook
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 """WSGI support for the Tornado web framework.
17
18 WSGI is the Python standard for web servers, and allows for interoperability
19 between Tornado and other Python web frameworks and servers.
20
21 This module provides WSGI support via the `WSGIContainer` class, which
22 makes it possible to run applications using other WSGI frameworks on
23 the Tornado HTTP server. The reverse is not supported; the Tornado
24 `.Application` and `.RequestHandler` classes are designed for use with
25 the Tornado `.HTTPServer` and cannot be used in a generic WSGI
26 container.
27
28 """
29
30 import sys
31 from io import BytesIO
32 import tornado
33
34 from tornado import escape
35 from tornado import httputil
36 from tornado.log import access_log
37
38 from typing import List, Tuple, Optional, Callable, Any, Dict, Text
39 from types import TracebackType
40 import typing
41
42 if typing.TYPE_CHECKING:
43 from typing import Type # noqa: F401
44 from wsgiref.types import WSGIApplication as WSGIAppType # noqa: F401
45
46
47 # PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
48 # that are smuggled inside objects of type unicode (via the latin1 encoding).
49 # This function is like those in the tornado.escape module, but defined
50 # here to minimize the temptation to use it in non-wsgi contexts.
51 def to_wsgi_str(s: bytes) -> str:
52 assert isinstance(s, bytes)
53 return s.decode("latin1")
54
55
56 class WSGIContainer(object):
57 r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
58
59 .. warning::
60
61 WSGI is a *synchronous* interface, while Tornado's concurrency model
62 is based on single-threaded asynchronous execution. This means that
63 running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
64 than running the same app in a multi-threaded WSGI server like
65 ``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are
66 benefits to combining Tornado and WSGI in the same process that
67 outweigh the reduced scalability.
68
69 Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
70 run it. For example::
71
72 def simple_app(environ, start_response):
73 status = "200 OK"
74 response_headers = [("Content-type", "text/plain")]
75 start_response(status, response_headers)
76 return ["Hello world!\n"]
77
78 container = tornado.wsgi.WSGIContainer(simple_app)
79 http_server = tornado.httpserver.HTTPServer(container)
80 http_server.listen(8888)
81 tornado.ioloop.IOLoop.current().start()
82
83 This class is intended to let other frameworks (Django, web.py, etc)
84 run on the Tornado HTTP server and I/O loop.
85
86 The `tornado.web.FallbackHandler` class is often useful for mixing
87 Tornado and WSGI apps in the same server. See
88 https://github.com/bdarnell/django-tornado-demo for a complete example.
89 """
90
91 def __init__(self, wsgi_application: "WSGIAppType") -> None:
92 self.wsgi_application = wsgi_application
93
94 def __call__(self, request: httputil.HTTPServerRequest) -> None:
95 data = {} # type: Dict[str, Any]
96 response = [] # type: List[bytes]
97
98 def start_response(
99 status: str,
100 headers: List[Tuple[str, str]],
101 exc_info: Optional[
102 Tuple[
103 "Optional[Type[BaseException]]",
104 Optional[BaseException],
105 Optional[TracebackType],
106 ]
107 ] = None,
108 ) -> Callable[[bytes], Any]:
109 data["status"] = status
110 data["headers"] = headers
111 return response.append
112
113 app_response = self.wsgi_application(
114 WSGIContainer.environ(request), start_response
115 )
116 try:
117 response.extend(app_response)
118 body = b"".join(response)
119 finally:
120 if hasattr(app_response, "close"):
121 app_response.close() # type: ignore
122 if not data:
123 raise Exception("WSGI app did not call start_response")
124
125 status_code_str, reason = data["status"].split(" ", 1)
126 status_code = int(status_code_str)
127 headers = data["headers"] # type: List[Tuple[str, str]]
128 header_set = set(k.lower() for (k, v) in headers)
129 body = escape.utf8(body)
130 if status_code != 304:
131 if "content-length" not in header_set:
132 headers.append(("Content-Length", str(len(body))))
133 if "content-type" not in header_set:
134 headers.append(("Content-Type", "text/html; charset=UTF-8"))
135 if "server" not in header_set:
136 headers.append(("Server", "TornadoServer/%s" % tornado.version))
137
138 start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
139 header_obj = httputil.HTTPHeaders()
140 for key, value in headers:
141 header_obj.add(key, value)
142 assert request.connection is not None
143 request.connection.write_headers(start_line, header_obj, chunk=body)
144 request.connection.finish()
145 self._log(status_code, request)
146
147 @staticmethod
148 def environ(request: httputil.HTTPServerRequest) -> Dict[Text, Any]:
149 """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment."""
150 hostport = request.host.split(":")
151 if len(hostport) == 2:
152 host = hostport[0]
153 port = int(hostport[1])
154 else:
155 host = request.host
156 port = 443 if request.protocol == "https" else 80
157 environ = {
158 "REQUEST_METHOD": request.method,
159 "SCRIPT_NAME": "",
160 "PATH_INFO": to_wsgi_str(
161 escape.url_unescape(request.path, encoding=None, plus=False)
162 ),
163 "QUERY_STRING": request.query,
164 "REMOTE_ADDR": request.remote_ip,
165 "SERVER_NAME": host,
166 "SERVER_PORT": str(port),
167 "SERVER_PROTOCOL": request.version,
168 "wsgi.version": (1, 0),
169 "wsgi.url_scheme": request.protocol,
170 "wsgi.input": BytesIO(escape.utf8(request.body)),
171 "wsgi.errors": sys.stderr,
172 "wsgi.multithread": False,
173 "wsgi.multiprocess": True,
174 "wsgi.run_once": False,
175 }
176 if "Content-Type" in request.headers:
177 environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
178 if "Content-Length" in request.headers:
179 environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
180 for key, value in request.headers.items():
181 environ["HTTP_" + key.replace("-", "_").upper()] = value
182 return environ
183
184 def _log(self, status_code: int, request: httputil.HTTPServerRequest) -> None:
185 if status_code < 400:
186 log_method = access_log.info
187 elif status_code < 500:
188 log_method = access_log.warning
189 else:
190 log_method = access_log.error
191 request_time = 1000.0 * request.request_time()
192 assert request.method is not None
193 assert request.uri is not None
194 summary = request.method + " " + request.uri + " (" + request.remote_ip + ")"
195 log_method("%d %s %.2fms", status_code, summary, request_time)
196
197
198 HTTPRequest = httputil.HTTPServerRequest
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tornado/wsgi.py b/tornado/wsgi.py
--- a/tornado/wsgi.py
+++ b/tornado/wsgi.py
@@ -73,7 +73,7 @@
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
- return ["Hello world!\n"]
+ return [b"Hello world!\n"]
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
| {"golden_diff": "diff --git a/tornado/wsgi.py b/tornado/wsgi.py\n--- a/tornado/wsgi.py\n+++ b/tornado/wsgi.py\n@@ -73,7 +73,7 @@\n status = \"200 OK\"\n response_headers = [(\"Content-type\", \"text/plain\")]\n start_response(status, response_headers)\n- return [\"Hello world!\\n\"]\n+ return [b\"Hello world!\\n\"]\n \n container = tornado.wsgi.WSGIContainer(simple_app)\n http_server = tornado.httpserver.HTTPServer(container)\n", "issue": "Minor bug in WSGIContainer docstring.\nhttps://github.com/tornadoweb/tornado/blob/2047e7ae3c825bf52dad10cc8402d09e11091bc1/tornado/wsgi.py#L76\r\n\r\nThis line (in a docstring showing how to use WSGIContainer) is wrong.\r\n\r\nIt should say:\r\n\r\n return [b\"Hello world!\\n\"] \r\n\r\n... because WSGI requires a sequence of bytestrings in modern python. Without this change, the example fails with:\r\n\r\n TypeError: sequence item 0: expected a bytes-like object, str found\r\n\r\nin python 3.9 (and probably any python 3).\n", "before_files": [{"content": "#\n# Copyright 2009 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"WSGI support for the Tornado web framework.\n\nWSGI is the Python standard for web servers, and allows for interoperability\nbetween Tornado and other Python web frameworks and servers.\n\nThis module provides WSGI support via the `WSGIContainer` class, which\nmakes it possible to run applications using other WSGI frameworks on\nthe Tornado HTTP server. The reverse is not supported; the Tornado\n`.Application` and `.RequestHandler` classes are designed for use with\nthe Tornado `.HTTPServer` and cannot be used in a generic WSGI\ncontainer.\n\n\"\"\"\n\nimport sys\nfrom io import BytesIO\nimport tornado\n\nfrom tornado import escape\nfrom tornado import httputil\nfrom tornado.log import access_log\n\nfrom typing import List, Tuple, Optional, Callable, Any, Dict, Text\nfrom types import TracebackType\nimport typing\n\nif typing.TYPE_CHECKING:\n from typing import Type # noqa: F401\n from wsgiref.types import WSGIApplication as WSGIAppType # noqa: F401\n\n\n# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings\n# that are smuggled inside objects of type unicode (via the latin1 encoding).\n# This function is like those in the tornado.escape module, but defined\n# here to minimize the temptation to use it in non-wsgi contexts.\ndef to_wsgi_str(s: bytes) -> str:\n assert isinstance(s, bytes)\n return s.decode(\"latin1\")\n\n\nclass WSGIContainer(object):\n r\"\"\"Makes a WSGI-compatible function runnable on Tornado's HTTP server.\n\n .. warning::\n\n WSGI is a *synchronous* interface, while Tornado's concurrency model\n is based on single-threaded asynchronous execution. This means that\n running a WSGI app with Tornado's `WSGIContainer` is *less scalable*\n than running the same app in a multi-threaded WSGI server like\n ``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are\n benefits to combining Tornado and WSGI in the same process that\n outweigh the reduced scalability.\n\n Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to\n run it. For example::\n\n def simple_app(environ, start_response):\n status = \"200 OK\"\n response_headers = [(\"Content-type\", \"text/plain\")]\n start_response(status, response_headers)\n return [\"Hello world!\\n\"]\n\n container = tornado.wsgi.WSGIContainer(simple_app)\n http_server = tornado.httpserver.HTTPServer(container)\n http_server.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n\n This class is intended to let other frameworks (Django, web.py, etc)\n run on the Tornado HTTP server and I/O loop.\n\n The `tornado.web.FallbackHandler` class is often useful for mixing\n Tornado and WSGI apps in the same server. See\n https://github.com/bdarnell/django-tornado-demo for a complete example.\n \"\"\"\n\n def __init__(self, wsgi_application: \"WSGIAppType\") -> None:\n self.wsgi_application = wsgi_application\n\n def __call__(self, request: httputil.HTTPServerRequest) -> None:\n data = {} # type: Dict[str, Any]\n response = [] # type: List[bytes]\n\n def start_response(\n status: str,\n headers: List[Tuple[str, str]],\n exc_info: Optional[\n Tuple[\n \"Optional[Type[BaseException]]\",\n Optional[BaseException],\n Optional[TracebackType],\n ]\n ] = None,\n ) -> Callable[[bytes], Any]:\n data[\"status\"] = status\n data[\"headers\"] = headers\n return response.append\n\n app_response = self.wsgi_application(\n WSGIContainer.environ(request), start_response\n )\n try:\n response.extend(app_response)\n body = b\"\".join(response)\n finally:\n if hasattr(app_response, \"close\"):\n app_response.close() # type: ignore\n if not data:\n raise Exception(\"WSGI app did not call start_response\")\n\n status_code_str, reason = data[\"status\"].split(\" \", 1)\n status_code = int(status_code_str)\n headers = data[\"headers\"] # type: List[Tuple[str, str]]\n header_set = set(k.lower() for (k, v) in headers)\n body = escape.utf8(body)\n if status_code != 304:\n if \"content-length\" not in header_set:\n headers.append((\"Content-Length\", str(len(body))))\n if \"content-type\" not in header_set:\n headers.append((\"Content-Type\", \"text/html; charset=UTF-8\"))\n if \"server\" not in header_set:\n headers.append((\"Server\", \"TornadoServer/%s\" % tornado.version))\n\n start_line = httputil.ResponseStartLine(\"HTTP/1.1\", status_code, reason)\n header_obj = httputil.HTTPHeaders()\n for key, value in headers:\n header_obj.add(key, value)\n assert request.connection is not None\n request.connection.write_headers(start_line, header_obj, chunk=body)\n request.connection.finish()\n self._log(status_code, request)\n\n @staticmethod\n def environ(request: httputil.HTTPServerRequest) -> Dict[Text, Any]:\n \"\"\"Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.\"\"\"\n hostport = request.host.split(\":\")\n if len(hostport) == 2:\n host = hostport[0]\n port = int(hostport[1])\n else:\n host = request.host\n port = 443 if request.protocol == \"https\" else 80\n environ = {\n \"REQUEST_METHOD\": request.method,\n \"SCRIPT_NAME\": \"\",\n \"PATH_INFO\": to_wsgi_str(\n escape.url_unescape(request.path, encoding=None, plus=False)\n ),\n \"QUERY_STRING\": request.query,\n \"REMOTE_ADDR\": request.remote_ip,\n \"SERVER_NAME\": host,\n \"SERVER_PORT\": str(port),\n \"SERVER_PROTOCOL\": request.version,\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": request.protocol,\n \"wsgi.input\": BytesIO(escape.utf8(request.body)),\n \"wsgi.errors\": sys.stderr,\n \"wsgi.multithread\": False,\n \"wsgi.multiprocess\": True,\n \"wsgi.run_once\": False,\n }\n if \"Content-Type\" in request.headers:\n environ[\"CONTENT_TYPE\"] = request.headers.pop(\"Content-Type\")\n if \"Content-Length\" in request.headers:\n environ[\"CONTENT_LENGTH\"] = request.headers.pop(\"Content-Length\")\n for key, value in request.headers.items():\n environ[\"HTTP_\" + key.replace(\"-\", \"_\").upper()] = value\n return environ\n\n def _log(self, status_code: int, request: httputil.HTTPServerRequest) -> None:\n if status_code < 400:\n log_method = access_log.info\n elif status_code < 500:\n log_method = access_log.warning\n else:\n log_method = access_log.error\n request_time = 1000.0 * request.request_time()\n assert request.method is not None\n assert request.uri is not None\n summary = request.method + \" \" + request.uri + \" (\" + request.remote_ip + \")\"\n log_method(\"%d %s %.2fms\", status_code, summary, request_time)\n\n\nHTTPRequest = httputil.HTTPServerRequest\n", "path": "tornado/wsgi.py"}], "after_files": [{"content": "#\n# Copyright 2009 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"WSGI support for the Tornado web framework.\n\nWSGI is the Python standard for web servers, and allows for interoperability\nbetween Tornado and other Python web frameworks and servers.\n\nThis module provides WSGI support via the `WSGIContainer` class, which\nmakes it possible to run applications using other WSGI frameworks on\nthe Tornado HTTP server. The reverse is not supported; the Tornado\n`.Application` and `.RequestHandler` classes are designed for use with\nthe Tornado `.HTTPServer` and cannot be used in a generic WSGI\ncontainer.\n\n\"\"\"\n\nimport sys\nfrom io import BytesIO\nimport tornado\n\nfrom tornado import escape\nfrom tornado import httputil\nfrom tornado.log import access_log\n\nfrom typing import List, Tuple, Optional, Callable, Any, Dict, Text\nfrom types import TracebackType\nimport typing\n\nif typing.TYPE_CHECKING:\n from typing import Type # noqa: F401\n from wsgiref.types import WSGIApplication as WSGIAppType # noqa: F401\n\n\n# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings\n# that are smuggled inside objects of type unicode (via the latin1 encoding).\n# This function is like those in the tornado.escape module, but defined\n# here to minimize the temptation to use it in non-wsgi contexts.\ndef to_wsgi_str(s: bytes) -> str:\n assert isinstance(s, bytes)\n return s.decode(\"latin1\")\n\n\nclass WSGIContainer(object):\n r\"\"\"Makes a WSGI-compatible function runnable on Tornado's HTTP server.\n\n .. warning::\n\n WSGI is a *synchronous* interface, while Tornado's concurrency model\n is based on single-threaded asynchronous execution. This means that\n running a WSGI app with Tornado's `WSGIContainer` is *less scalable*\n than running the same app in a multi-threaded WSGI server like\n ``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are\n benefits to combining Tornado and WSGI in the same process that\n outweigh the reduced scalability.\n\n Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to\n run it. For example::\n\n def simple_app(environ, start_response):\n status = \"200 OK\"\n response_headers = [(\"Content-type\", \"text/plain\")]\n start_response(status, response_headers)\n return [b\"Hello world!\\n\"]\n\n container = tornado.wsgi.WSGIContainer(simple_app)\n http_server = tornado.httpserver.HTTPServer(container)\n http_server.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n\n This class is intended to let other frameworks (Django, web.py, etc)\n run on the Tornado HTTP server and I/O loop.\n\n The `tornado.web.FallbackHandler` class is often useful for mixing\n Tornado and WSGI apps in the same server. See\n https://github.com/bdarnell/django-tornado-demo for a complete example.\n \"\"\"\n\n def __init__(self, wsgi_application: \"WSGIAppType\") -> None:\n self.wsgi_application = wsgi_application\n\n def __call__(self, request: httputil.HTTPServerRequest) -> None:\n data = {} # type: Dict[str, Any]\n response = [] # type: List[bytes]\n\n def start_response(\n status: str,\n headers: List[Tuple[str, str]],\n exc_info: Optional[\n Tuple[\n \"Optional[Type[BaseException]]\",\n Optional[BaseException],\n Optional[TracebackType],\n ]\n ] = None,\n ) -> Callable[[bytes], Any]:\n data[\"status\"] = status\n data[\"headers\"] = headers\n return response.append\n\n app_response = self.wsgi_application(\n WSGIContainer.environ(request), start_response\n )\n try:\n response.extend(app_response)\n body = b\"\".join(response)\n finally:\n if hasattr(app_response, \"close\"):\n app_response.close() # type: ignore\n if not data:\n raise Exception(\"WSGI app did not call start_response\")\n\n status_code_str, reason = data[\"status\"].split(\" \", 1)\n status_code = int(status_code_str)\n headers = data[\"headers\"] # type: List[Tuple[str, str]]\n header_set = set(k.lower() for (k, v) in headers)\n body = escape.utf8(body)\n if status_code != 304:\n if \"content-length\" not in header_set:\n headers.append((\"Content-Length\", str(len(body))))\n if \"content-type\" not in header_set:\n headers.append((\"Content-Type\", \"text/html; charset=UTF-8\"))\n if \"server\" not in header_set:\n headers.append((\"Server\", \"TornadoServer/%s\" % tornado.version))\n\n start_line = httputil.ResponseStartLine(\"HTTP/1.1\", status_code, reason)\n header_obj = httputil.HTTPHeaders()\n for key, value in headers:\n header_obj.add(key, value)\n assert request.connection is not None\n request.connection.write_headers(start_line, header_obj, chunk=body)\n request.connection.finish()\n self._log(status_code, request)\n\n @staticmethod\n def environ(request: httputil.HTTPServerRequest) -> Dict[Text, Any]:\n \"\"\"Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.\"\"\"\n hostport = request.host.split(\":\")\n if len(hostport) == 2:\n host = hostport[0]\n port = int(hostport[1])\n else:\n host = request.host\n port = 443 if request.protocol == \"https\" else 80\n environ = {\n \"REQUEST_METHOD\": request.method,\n \"SCRIPT_NAME\": \"\",\n \"PATH_INFO\": to_wsgi_str(\n escape.url_unescape(request.path, encoding=None, plus=False)\n ),\n \"QUERY_STRING\": request.query,\n \"REMOTE_ADDR\": request.remote_ip,\n \"SERVER_NAME\": host,\n \"SERVER_PORT\": str(port),\n \"SERVER_PROTOCOL\": request.version,\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": request.protocol,\n \"wsgi.input\": BytesIO(escape.utf8(request.body)),\n \"wsgi.errors\": sys.stderr,\n \"wsgi.multithread\": False,\n \"wsgi.multiprocess\": True,\n \"wsgi.run_once\": False,\n }\n if \"Content-Type\" in request.headers:\n environ[\"CONTENT_TYPE\"] = request.headers.pop(\"Content-Type\")\n if \"Content-Length\" in request.headers:\n environ[\"CONTENT_LENGTH\"] = request.headers.pop(\"Content-Length\")\n for key, value in request.headers.items():\n environ[\"HTTP_\" + key.replace(\"-\", \"_\").upper()] = value\n return environ\n\n def _log(self, status_code: int, request: httputil.HTTPServerRequest) -> None:\n if status_code < 400:\n log_method = access_log.info\n elif status_code < 500:\n log_method = access_log.warning\n else:\n log_method = access_log.error\n request_time = 1000.0 * request.request_time()\n assert request.method is not None\n assert request.uri is not None\n summary = request.method + \" \" + request.uri + \" (\" + request.remote_ip + \")\"\n log_method(\"%d %s %.2fms\", status_code, summary, request_time)\n\n\nHTTPRequest = httputil.HTTPServerRequest\n", "path": "tornado/wsgi.py"}]} | 2,734 | 119 |
gh_patches_debug_6695 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1938 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Issue with static calendar "last Monday of the month" schedule
### I Have A Problem With:
Sensor configuration
### What's Your Problem
Everything used to work fine until a few weeks ago. I have 3 static calendars:
- mixed waste is picked up every Monday
- plastic is picked up first Monday of the month
- paper is picked up last Monday of the month
I have 3 sources and 3 sensors configured, and as I've mentioned, everything used to work fine.
However, recently, the paper schedule became the same as the plastic schedule, it's showing first Mondays of the month (and not last). So it looks like plastic and paper are picked up on the same day.
e.g. currently (2023-04-03) the next paper pickup should show 2024-04-29 and not 2024-05-06.
Does anyone have an idea what might have changed to cause this, or a suggestion for a fix?




### Source (if relevant)
static
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: static
calendar_title: "Garbage Pickup"
args:
type: Garbage
frequency: WEEKLY
weekdays: MO
- name: static
calendar_title: "Plastic Pickup"
args:
type: Plastic
frequency: MONTHLY
weekdays: { MO: 1 }
- name: static
calendar_title: "Paper Pickup"
args:
type: Paper
frequency: MONTHLY
weekdays: { MO: -1 }
sensor:
- platform: waste_collection_schedule
name: "Next Garbage Pickup"
source_index: 0
details_format: upcoming
add_days_to: true
value_template: "{{ value.daysTo }} days"
types:
- Garbage
- platform: waste_collection_schedule
name: "Next Plastic Pickup"
source_index: 1
details_format: upcoming
add_days_to: true
value_template: "{{ value.daysTo }} days"
types:
- Plastic
- platform: waste_collection_schedule
name: "Next Paper Pickup"
source_index: 2
details_format: upcoming
add_days_to: true
value_template: "{{ value.daysTo }} days"
types:
- Paper
```
### Checklist Source Error
- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [ ] Checked that the website of your service provider is still working
- [ ] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py`
Content:
```
1 import datetime
2 from collections import OrderedDict
3
4 from dateutil import parser
5 from dateutil.rrule import FR, MO, SA, SU, TH, TU, WE, rrule, weekday
6 from waste_collection_schedule import Collection # type: ignore[attr-defined]
7
8 TITLE = "Static Source"
9 DESCRIPTION = "Source for static waste collection schedules."
10 URL = None
11 TEST_CASES = {
12 "Dates only": {"type": "Dates only", "dates": ["2022-01-01", "2022-02-28"]},
13 "Same date twice": {"type": "Dates only", "dates": ["2022-01-01", "2022-01-01"]},
14 "Recurrence monthly by date": {
15 "type": "First day of month",
16 "frequency": "MONTHLY",
17 "interval": 1,
18 "start": "2022-01-01",
19 "until": "2022-12-31",
20 },
21 "Recurrence monthly by date with date list": {
22 "type": "First day of month excluding 01-Jan, including 02-Jan",
23 "frequency": "MONTHLY",
24 "interval": 1,
25 "start": "2022-01-01",
26 "until": "2022-12-31",
27 "excludes": ["2022-01-01"],
28 "dates": ["2022-01-02"],
29 },
30 "Recurrence with weekday dict (day + byweekday)": {
31 "type": "First Monday and second Tuesday of the month",
32 "frequency": "MONTHLY",
33 "start": "2022-01-01",
34 "weekdays": {"MO": 1, "TU": 2},
35 },
36 "Recurrence with first Saturday of the month": {
37 "type": "First Saturday of the month",
38 "frequency": "MONTHLY",
39 "start": "2022-01-01",
40 "weekdays": "SA",
41 },
42 "Recurrence with last Saturday of the month": {
43 "type": "Last Saturday of the month",
44 "frequency": "MONTHLY",
45 "start": "2022-01-01",
46 "weekdays": {"SA": -1},
47 },
48 "Recurrence weekly specified by weekday": {
49 "type": "Every Friday",
50 "frequency": "WEEKLY",
51 "weekdays": "FR",
52 },
53 }
54
55 FREQNAMES = ["YEARLY", "MONTHLY", "WEEKLY", "DAILY"]
56 WEEKDAY_MAP = {"MO": MO, "TU": TU, "WE": WE, "TH": TH, "FR": FR, "SA": SA, "SU": SU}
57
58
59 class Source:
60 def __init__(
61 self,
62 type: str,
63 dates: list[str] | None = None,
64 frequency: str | None = None,
65 interval: int = 1,
66 start: str | None = None,
67 until: str | None = None,
68 count: int | None = None,
69 excludes: list[str] | None = None,
70 weekdays: list[str | int]
71 | dict[str | int, int | str | None]
72 | str
73 | None = None,
74 ):
75 self._weekdays: list[weekday] | None = None
76 if weekdays is not None:
77 self._weekdays = []
78 if isinstance(weekdays, dict | OrderedDict):
79 [
80 self.add_weekday(
81 weekday,
82 int(number) if number is not None and number == "" else 1,
83 )
84 for weekday, number in weekdays.items()
85 ]
86
87 elif isinstance(weekdays, str):
88 self.add_weekday(weekdays, 1)
89
90 else:
91 raise Exception(f"Invalid weekdays format: {weekdays}")
92
93 if self._weekdays == []:
94 self._weekdays = None
95
96 self._type = type
97 self._dates = [parser.isoparse(d).date() for d in dates or []]
98
99 self._recurrence = FREQNAMES.index(frequency) if frequency is not None else None
100 self._interval = interval
101 self._start = parser.isoparse(start).date() if start else None
102 if until:
103 self._until: datetime.date | None = parser.isoparse(until).date()
104 self._count = None
105 else:
106 self._until = None
107 self._count = count if count else 10
108 self._excludes = [parser.isoparse(d).date() for d in excludes or []]
109
110 def add_weekday(self, weekday, count: int):
111 if self._weekdays is None:
112 raise ValueError("Internal Error: weekdays not initialized")
113
114 if weekday not in WEEKDAY_MAP:
115 raise Exception(f"invalid weekday: {weekday}")
116
117 self._weekdays.append(WEEKDAY_MAP[weekday](count))
118
119 def fetch(self):
120 dates = []
121
122 if self._recurrence is not None:
123 ruledates = rrule(
124 freq=self._recurrence,
125 interval=self._interval,
126 dtstart=self._start,
127 until=self._until,
128 count=self._count,
129 byweekday=self._weekdays,
130 )
131
132 for ruleentry in ruledates:
133 date = ruleentry.date()
134
135 if self._excludes is not None and date in self._excludes:
136 continue
137
138 dates.append(date)
139
140 if self._dates is not None:
141 dates.extend(self._dates)
142
143 dates.sort()
144
145 entries = [Collection(date, self._type) for date in set(dates)]
146 return entries
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py
@@ -79,7 +79,7 @@
[
self.add_weekday(
weekday,
- int(number) if number is not None and number == "" else 1,
+ int(number) if number is not None and number != "" else 1,
)
for weekday, number in weekdays.items()
]
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py\n@@ -79,7 +79,7 @@\n [\n self.add_weekday(\n weekday,\n- int(number) if number is not None and number == \"\" else 1,\n+ int(number) if number is not None and number != \"\" else 1,\n )\n for weekday, number in weekdays.items()\n ]\n", "issue": "[Bug]: Issue with static calendar \"last Monday of the month\" schedule\n### I Have A Problem With:\n\nSensor configuration\n\n### What's Your Problem\n\nEverything used to work fine until a few weeks ago. I have 3 static calendars:\r\n- mixed waste is picked up every Monday\r\n- plastic is picked up first Monday of the month\r\n- paper is picked up last Monday of the month\r\n\r\nI have 3 sources and 3 sensors configured, and as I've mentioned, everything used to work fine.\r\n\r\nHowever, recently, the paper schedule became the same as the plastic schedule, it's showing first Mondays of the month (and not last). So it looks like plastic and paper are picked up on the same day.\r\n\r\ne.g. currently (2023-04-03) the next paper pickup should show 2024-04-29 and not 2024-05-06.\r\n\r\nDoes anyone have an idea what might have changed to cause this, or a suggestion for a fix?\r\n\r\n\r\n\r\n\r\n\r\n\n\n### Source (if relevant)\n\nstatic\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: static\r\n calendar_title: \"Garbage Pickup\"\r\n args:\r\n type: Garbage\r\n frequency: WEEKLY\r\n weekdays: MO\r\n - name: static\r\n calendar_title: \"Plastic Pickup\"\r\n args:\r\n type: Plastic\r\n frequency: MONTHLY\r\n weekdays: { MO: 1 }\r\n - name: static\r\n calendar_title: \"Paper Pickup\"\r\n args:\r\n type: Paper\r\n frequency: MONTHLY\r\n weekdays: { MO: -1 }\r\n\r\nsensor:\r\n - platform: waste_collection_schedule\r\n name: \"Next Garbage Pickup\"\r\n source_index: 0\r\n details_format: upcoming\r\n add_days_to: true\r\n value_template: \"{{ value.daysTo }} days\"\r\n types:\r\n - Garbage\r\n - platform: waste_collection_schedule\r\n name: \"Next Plastic Pickup\"\r\n source_index: 1\r\n details_format: upcoming\r\n add_days_to: true\r\n value_template: \"{{ value.daysTo }} days\"\r\n types:\r\n - Plastic\r\n - platform: waste_collection_schedule\r\n name: \"Next Paper Pickup\"\r\n source_index: 2\r\n details_format: upcoming\r\n add_days_to: true\r\n value_template: \"{{ value.daysTo }} days\"\r\n types:\r\n - Paper\n```\n\n\n### Checklist Source Error\n\n- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [ ] Checked that the website of your service provider is still working\n- [ ] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [ ] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import datetime\nfrom collections import OrderedDict\n\nfrom dateutil import parser\nfrom dateutil.rrule import FR, MO, SA, SU, TH, TU, WE, rrule, weekday\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Static Source\"\nDESCRIPTION = \"Source for static waste collection schedules.\"\nURL = None\nTEST_CASES = {\n \"Dates only\": {\"type\": \"Dates only\", \"dates\": [\"2022-01-01\", \"2022-02-28\"]},\n \"Same date twice\": {\"type\": \"Dates only\", \"dates\": [\"2022-01-01\", \"2022-01-01\"]},\n \"Recurrence monthly by date\": {\n \"type\": \"First day of month\",\n \"frequency\": \"MONTHLY\",\n \"interval\": 1,\n \"start\": \"2022-01-01\",\n \"until\": \"2022-12-31\",\n },\n \"Recurrence monthly by date with date list\": {\n \"type\": \"First day of month excluding 01-Jan, including 02-Jan\",\n \"frequency\": \"MONTHLY\",\n \"interval\": 1,\n \"start\": \"2022-01-01\",\n \"until\": \"2022-12-31\",\n \"excludes\": [\"2022-01-01\"],\n \"dates\": [\"2022-01-02\"],\n },\n \"Recurrence with weekday dict (day + byweekday)\": {\n \"type\": \"First Monday and second Tuesday of the month\",\n \"frequency\": \"MONTHLY\",\n \"start\": \"2022-01-01\",\n \"weekdays\": {\"MO\": 1, \"TU\": 2},\n },\n \"Recurrence with first Saturday of the month\": {\n \"type\": \"First Saturday of the month\",\n \"frequency\": \"MONTHLY\",\n \"start\": \"2022-01-01\",\n \"weekdays\": \"SA\",\n },\n \"Recurrence with last Saturday of the month\": {\n \"type\": \"Last Saturday of the month\",\n \"frequency\": \"MONTHLY\",\n \"start\": \"2022-01-01\",\n \"weekdays\": {\"SA\": -1},\n },\n \"Recurrence weekly specified by weekday\": {\n \"type\": \"Every Friday\",\n \"frequency\": \"WEEKLY\",\n \"weekdays\": \"FR\",\n },\n}\n\nFREQNAMES = [\"YEARLY\", \"MONTHLY\", \"WEEKLY\", \"DAILY\"]\nWEEKDAY_MAP = {\"MO\": MO, \"TU\": TU, \"WE\": WE, \"TH\": TH, \"FR\": FR, \"SA\": SA, \"SU\": SU}\n\n\nclass Source:\n def __init__(\n self,\n type: str,\n dates: list[str] | None = None,\n frequency: str | None = None,\n interval: int = 1,\n start: str | None = None,\n until: str | None = None,\n count: int | None = None,\n excludes: list[str] | None = None,\n weekdays: list[str | int]\n | dict[str | int, int | str | None]\n | str\n | None = None,\n ):\n self._weekdays: list[weekday] | None = None\n if weekdays is not None:\n self._weekdays = []\n if isinstance(weekdays, dict | OrderedDict):\n [\n self.add_weekday(\n weekday,\n int(number) if number is not None and number == \"\" else 1,\n )\n for weekday, number in weekdays.items()\n ]\n\n elif isinstance(weekdays, str):\n self.add_weekday(weekdays, 1)\n\n else:\n raise Exception(f\"Invalid weekdays format: {weekdays}\")\n\n if self._weekdays == []:\n self._weekdays = None\n\n self._type = type\n self._dates = [parser.isoparse(d).date() for d in dates or []]\n\n self._recurrence = FREQNAMES.index(frequency) if frequency is not None else None\n self._interval = interval\n self._start = parser.isoparse(start).date() if start else None\n if until:\n self._until: datetime.date | None = parser.isoparse(until).date()\n self._count = None\n else:\n self._until = None\n self._count = count if count else 10\n self._excludes = [parser.isoparse(d).date() for d in excludes or []]\n\n def add_weekday(self, weekday, count: int):\n if self._weekdays is None:\n raise ValueError(\"Internal Error: weekdays not initialized\")\n\n if weekday not in WEEKDAY_MAP:\n raise Exception(f\"invalid weekday: {weekday}\")\n\n self._weekdays.append(WEEKDAY_MAP[weekday](count))\n\n def fetch(self):\n dates = []\n\n if self._recurrence is not None:\n ruledates = rrule(\n freq=self._recurrence,\n interval=self._interval,\n dtstart=self._start,\n until=self._until,\n count=self._count,\n byweekday=self._weekdays,\n )\n\n for ruleentry in ruledates:\n date = ruleentry.date()\n\n if self._excludes is not None and date in self._excludes:\n continue\n\n dates.append(date)\n\n if self._dates is not None:\n dates.extend(self._dates)\n\n dates.sort()\n\n entries = [Collection(date, self._type) for date in set(dates)]\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py"}], "after_files": [{"content": "import datetime\nfrom collections import OrderedDict\n\nfrom dateutil import parser\nfrom dateutil.rrule import FR, MO, SA, SU, TH, TU, WE, rrule, weekday\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Static Source\"\nDESCRIPTION = \"Source for static waste collection schedules.\"\nURL = None\nTEST_CASES = {\n \"Dates only\": {\"type\": \"Dates only\", \"dates\": [\"2022-01-01\", \"2022-02-28\"]},\n \"Same date twice\": {\"type\": \"Dates only\", \"dates\": [\"2022-01-01\", \"2022-01-01\"]},\n \"Recurrence monthly by date\": {\n \"type\": \"First day of month\",\n \"frequency\": \"MONTHLY\",\n \"interval\": 1,\n \"start\": \"2022-01-01\",\n \"until\": \"2022-12-31\",\n },\n \"Recurrence monthly by date with date list\": {\n \"type\": \"First day of month excluding 01-Jan, including 02-Jan\",\n \"frequency\": \"MONTHLY\",\n \"interval\": 1,\n \"start\": \"2022-01-01\",\n \"until\": \"2022-12-31\",\n \"excludes\": [\"2022-01-01\"],\n \"dates\": [\"2022-01-02\"],\n },\n \"Recurrence with weekday dict (day + byweekday)\": {\n \"type\": \"First Monday and second Tuesday of the month\",\n \"frequency\": \"MONTHLY\",\n \"start\": \"2022-01-01\",\n \"weekdays\": {\"MO\": 1, \"TU\": 2},\n },\n \"Recurrence with first Saturday of the month\": {\n \"type\": \"First Saturday of the month\",\n \"frequency\": \"MONTHLY\",\n \"start\": \"2022-01-01\",\n \"weekdays\": \"SA\",\n },\n \"Recurrence with last Saturday of the month\": {\n \"type\": \"Last Saturday of the month\",\n \"frequency\": \"MONTHLY\",\n \"start\": \"2022-01-01\",\n \"weekdays\": {\"SA\": -1},\n },\n \"Recurrence weekly specified by weekday\": {\n \"type\": \"Every Friday\",\n \"frequency\": \"WEEKLY\",\n \"weekdays\": \"FR\",\n },\n}\n\nFREQNAMES = [\"YEARLY\", \"MONTHLY\", \"WEEKLY\", \"DAILY\"]\nWEEKDAY_MAP = {\"MO\": MO, \"TU\": TU, \"WE\": WE, \"TH\": TH, \"FR\": FR, \"SA\": SA, \"SU\": SU}\n\n\nclass Source:\n def __init__(\n self,\n type: str,\n dates: list[str] | None = None,\n frequency: str | None = None,\n interval: int = 1,\n start: str | None = None,\n until: str | None = None,\n count: int | None = None,\n excludes: list[str] | None = None,\n weekdays: list[str | int]\n | dict[str | int, int | str | None]\n | str\n | None = None,\n ):\n self._weekdays: list[weekday] | None = None\n if weekdays is not None:\n self._weekdays = []\n if isinstance(weekdays, dict | OrderedDict):\n [\n self.add_weekday(\n weekday,\n int(number) if number is not None and number != \"\" else 1,\n )\n for weekday, number in weekdays.items()\n ]\n\n elif isinstance(weekdays, str):\n self.add_weekday(weekdays, 1)\n\n else:\n raise Exception(f\"Invalid weekdays format: {weekdays}\")\n\n if self._weekdays == []:\n self._weekdays = None\n\n self._type = type\n self._dates = [parser.isoparse(d).date() for d in dates or []]\n\n self._recurrence = FREQNAMES.index(frequency) if frequency is not None else None\n self._interval = interval\n self._start = parser.isoparse(start).date() if start else None\n if until:\n self._until: datetime.date | None = parser.isoparse(until).date()\n self._count = None\n else:\n self._until = None\n self._count = count if count else 10\n self._excludes = [parser.isoparse(d).date() for d in excludes or []]\n\n def add_weekday(self, weekday, count: int):\n if self._weekdays is None:\n raise ValueError(\"Internal Error: weekdays not initialized\")\n\n if weekday not in WEEKDAY_MAP:\n raise Exception(f\"invalid weekday: {weekday}\")\n\n self._weekdays.append(WEEKDAY_MAP[weekday](count))\n\n def fetch(self):\n dates = []\n\n if self._recurrence is not None:\n ruledates = rrule(\n freq=self._recurrence,\n interval=self._interval,\n dtstart=self._start,\n until=self._until,\n count=self._count,\n byweekday=self._weekdays,\n )\n\n for ruleentry in ruledates:\n date = ruleentry.date()\n\n if self._excludes is not None and date in self._excludes:\n continue\n\n dates.append(date)\n\n if self._dates is not None:\n dates.extend(self._dates)\n\n dates.sort()\n\n entries = [Collection(date, self._type) for date in set(dates)]\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/static.py"}]} | 2,845 | 136 |
gh_patches_debug_8033 | rasdani/github-patches | git_diff | arviz-devs__arviz-1659 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
from_pyro fails when MCMC is run on GPU
When MCMC is performed on a GPU, from_pyro fails, as the sites returned by
`trace = pyro.poutine.trace(self.model).get_trace(*self._args, **self._kwargs)` can be tensors that still live on the GPU.
This will cause this line to break:
https://github.com/arviz-devs/arviz/blob/1f3b8fd0eaafcf58aad09a80d309b2a4b40dd191/arviz/data/io_pyro.py#L231
Since there is currently no easy way of transferring a pyro MCMC object to the cpu, a workaround is adding a `.cpu()`to this line:
https://github.com/arviz-devs/arviz/blob/1f3b8fd0eaafcf58aad09a80d309b2a4b40dd191/arviz/data/io_pyro.py#L111
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/data/io_pyro.py`
Content:
```
1 """Pyro-specific conversion code."""
2 import logging
3 from typing import Callable, Optional
4 import warnings
5
6 import numpy as np
7 from packaging import version
8
9 from .. import utils
10 from ..rcparams import rcParams
11 from .base import dict_to_dataset, requires
12 from .inference_data import InferenceData
13
14 _log = logging.getLogger(__name__)
15
16
17 class PyroConverter:
18 """Encapsulate Pyro specific logic."""
19
20 # pylint: disable=too-many-instance-attributes
21
22 model = None # type: Optional[Callable]
23 nchains = None # type: int
24 ndraws = None # type: int
25
26 def __init__(
27 self,
28 *,
29 posterior=None,
30 prior=None,
31 posterior_predictive=None,
32 log_likelihood=None,
33 predictions=None,
34 constant_data=None,
35 predictions_constant_data=None,
36 coords=None,
37 dims=None,
38 pred_dims=None,
39 num_chains=1,
40 ):
41 """Convert Pyro data into an InferenceData object.
42
43 Parameters
44 ----------
45 posterior : pyro.infer.MCMC
46 Fitted MCMC object from Pyro
47 prior: dict
48 Prior samples from a Pyro model
49 posterior_predictive : dict
50 Posterior predictive samples for the posterior
51 predictions: dict
52 Out of sample predictions
53 constant_data: dict
54 Dictionary containing constant data variables mapped to their values.
55 predictions_constant_data: dict
56 Constant data used for out-of-sample predictions.
57 coords : dict[str] -> list[str]
58 Map of dimensions to coordinates
59 dims : dict[str] -> list[str]
60 Map variable names to their coordinates
61 pred_dims: dict
62 Dims for predictions data. Map variable names to their coordinates.
63 num_chains: int
64 Number of chains used for sampling. Ignored if posterior is present.
65 """
66 self.posterior = posterior
67 self.prior = prior
68 self.posterior_predictive = posterior_predictive
69 self.log_likelihood = (
70 rcParams["data.log_likelihood"] if log_likelihood is None else log_likelihood
71 )
72 self.predictions = predictions
73 self.constant_data = constant_data
74 self.predictions_constant_data = predictions_constant_data
75 self.coords = coords
76 self.dims = {} if dims is None else dims
77 self.pred_dims = {} if pred_dims is None else pred_dims
78 import pyro
79
80 def arbitrary_element(dct):
81 return next(iter(dct.values()))
82
83 self.pyro = pyro
84 if posterior is not None:
85 self.nchains, self.ndraws = posterior.num_chains, posterior.num_samples
86 if version.parse(pyro.__version__) >= version.parse("1.0.0"):
87 self.model = self.posterior.kernel.model
88 # model arguments and keyword arguments
89 self._args = self.posterior._args # pylint: disable=protected-access
90 self._kwargs = self.posterior._kwargs # pylint: disable=protected-access
91 else:
92 self.nchains = num_chains
93 get_from = None
94 if predictions is not None:
95 get_from = predictions
96 elif posterior_predictive is not None:
97 get_from = posterior_predictive
98 elif prior is not None:
99 get_from = prior
100 if get_from is None and constant_data is None and predictions_constant_data is None:
101 raise ValueError(
102 "When constructing InferenceData must have at least"
103 " one of posterior, prior, posterior_predictive or predictions."
104 )
105 if get_from is not None:
106 aelem = arbitrary_element(get_from)
107 self.ndraws = aelem.shape[0] // self.nchains
108
109 observations = {}
110 if self.model is not None:
111 trace = pyro.poutine.trace(self.model).get_trace(*self._args, **self._kwargs)
112 observations = {
113 name: site["value"]
114 for name, site in trace.nodes.items()
115 if site["type"] == "sample" and site["is_observed"]
116 }
117 self.observations = observations if observations else None
118
119 @requires("posterior")
120 def posterior_to_xarray(self):
121 """Convert the posterior to an xarray dataset."""
122 data = self.posterior.get_samples(group_by_chain=True)
123 data = {k: v.detach().cpu().numpy() for k, v in data.items()}
124 return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=self.dims)
125
126 @requires("posterior")
127 def sample_stats_to_xarray(self):
128 """Extract sample_stats from Pyro posterior."""
129 divergences = self.posterior.diagnostics()["divergences"]
130 diverging = np.zeros((self.nchains, self.ndraws), dtype=np.bool)
131 for i, k in enumerate(sorted(divergences)):
132 diverging[i, divergences[k]] = True
133 data = {"diverging": diverging}
134 return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=None)
135
136 @requires("posterior")
137 @requires("model")
138 def log_likelihood_to_xarray(self):
139 """Extract log likelihood from Pyro posterior."""
140 if not self.log_likelihood:
141 return None
142 data = {}
143 if self.observations is not None:
144 try:
145 samples = self.posterior.get_samples(group_by_chain=False)
146 predictive = self.pyro.infer.Predictive(self.model, samples)
147 vectorized_trace = predictive.get_vectorized_trace(*self._args, **self._kwargs)
148 for obs_name in self.observations.keys():
149 obs_site = vectorized_trace.nodes[obs_name]
150 log_like = obs_site["fn"].log_prob(obs_site["value"]).detach().cpu().numpy()
151 shape = (self.nchains, self.ndraws) + log_like.shape[1:]
152 data[obs_name] = np.reshape(log_like, shape)
153 except: # pylint: disable=bare-except
154 # cannot get vectorized trace
155 warnings.warn(
156 "Could not get vectorized trace, log_likelihood group will be omitted. "
157 "Check your model vectorization or set log_likelihood=False"
158 )
159 return None
160 return dict_to_dataset(
161 data, library=self.pyro, coords=self.coords, dims=self.dims, skip_event_dims=True
162 )
163
164 def translate_posterior_predictive_dict_to_xarray(self, dct, dims):
165 """Convert posterior_predictive or prediction samples to xarray."""
166 data = {}
167 for k, ary in dct.items():
168 ary = ary.detach().cpu().numpy()
169 shape = ary.shape
170 if shape[0] == self.nchains and shape[1] == self.ndraws:
171 data[k] = ary
172 elif shape[0] == self.nchains * self.ndraws:
173 data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))
174 else:
175 data[k] = utils.expand_dims(ary)
176 _log.warning(
177 "posterior predictive shape not compatible with number of chains and draws."
178 "This can mean that some draws or even whole chains are not represented."
179 )
180 return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=dims)
181
182 @requires("posterior_predictive")
183 def posterior_predictive_to_xarray(self):
184 """Convert posterior_predictive samples to xarray."""
185 return self.translate_posterior_predictive_dict_to_xarray(
186 self.posterior_predictive, self.dims
187 )
188
189 @requires("predictions")
190 def predictions_to_xarray(self):
191 """Convert predictions to xarray."""
192 return self.translate_posterior_predictive_dict_to_xarray(self.predictions, self.pred_dims)
193
194 def priors_to_xarray(self):
195 """Convert prior samples (and if possible prior predictive too) to xarray."""
196 if self.prior is None:
197 return {"prior": None, "prior_predictive": None}
198 if self.posterior is not None:
199 prior_vars = list(self.posterior.get_samples().keys())
200 prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]
201 else:
202 prior_vars = self.prior.keys()
203 prior_predictive_vars = None
204 priors_dict = {}
205 for group, var_names in zip(
206 ("prior", "prior_predictive"), (prior_vars, prior_predictive_vars)
207 ):
208 priors_dict[group] = (
209 None
210 if var_names is None
211 else dict_to_dataset(
212 {
213 k: utils.expand_dims(np.squeeze(self.prior[k].detach().cpu().numpy()))
214 for k in var_names
215 },
216 library=self.pyro,
217 coords=self.coords,
218 dims=self.dims,
219 )
220 )
221 return priors_dict
222
223 @requires("observations")
224 @requires("model")
225 def observed_data_to_xarray(self):
226 """Convert observed data to xarray."""
227 if self.dims is None:
228 dims = {}
229 else:
230 dims = self.dims
231 return dict_to_dataset(
232 self.observations, library=self.pyro, coords=self.coords, dims=dims, default_dims=[]
233 )
234
235 @requires("constant_data")
236 def constant_data_to_xarray(self):
237 """Convert constant_data to xarray."""
238 return dict_to_dataset(
239 self.constant_data,
240 library=self.pyro,
241 coords=self.coords,
242 dims=self.dims,
243 default_dims=[],
244 )
245
246 @requires("predictions_constant_data")
247 def predictions_constant_data_to_xarray(self):
248 """Convert predictions_constant_data to xarray."""
249 return dict_to_dataset(
250 self.predictions_constant_data,
251 library=self.pyro,
252 coords=self.coords,
253 dims=self.pred_dims,
254 default_dims=[],
255 )
256
257 def to_inference_data(self):
258 """Convert all available data to an InferenceData object."""
259 return InferenceData(
260 **{
261 "posterior": self.posterior_to_xarray(),
262 "sample_stats": self.sample_stats_to_xarray(),
263 "log_likelihood": self.log_likelihood_to_xarray(),
264 "posterior_predictive": self.posterior_predictive_to_xarray(),
265 "predictions": self.predictions_to_xarray(),
266 "constant_data": self.constant_data_to_xarray(),
267 "predictions_constant_data": self.predictions_constant_data_to_xarray(),
268 **self.priors_to_xarray(),
269 "observed_data": self.observed_data_to_xarray(),
270 }
271 )
272
273
274 def from_pyro(
275 posterior=None,
276 *,
277 prior=None,
278 posterior_predictive=None,
279 log_likelihood=None,
280 predictions=None,
281 constant_data=None,
282 predictions_constant_data=None,
283 coords=None,
284 dims=None,
285 pred_dims=None,
286 num_chains=1,
287 ):
288 """Convert Pyro data into an InferenceData object.
289
290 For a usage example read the
291 :ref:`Creating InferenceData section on from_pyro <creating_InferenceData>`
292
293
294 Parameters
295 ----------
296 posterior : pyro.infer.MCMC
297 Fitted MCMC object from Pyro
298 prior: dict
299 Prior samples from a Pyro model
300 posterior_predictive : dict
301 Posterior predictive samples for the posterior
302 log_likelihood : bool, optional
303 Calculate and store pointwise log likelihood values. Defaults to the value
304 of rcParam ``data.log_likelihood``.
305 predictions: dict
306 Out of sample predictions
307 constant_data: dict
308 Dictionary containing constant data variables mapped to their values.
309 predictions_constant_data: dict
310 Constant data used for out-of-sample predictions.
311 coords : dict[str] -> list[str]
312 Map of dimensions to coordinates
313 dims : dict[str] -> list[str]
314 Map variable names to their coordinates
315 pred_dims: dict
316 Dims for predictions data. Map variable names to their coordinates.
317 num_chains: int
318 Number of chains used for sampling. Ignored if posterior is present.
319 """
320 return PyroConverter(
321 posterior=posterior,
322 prior=prior,
323 posterior_predictive=posterior_predictive,
324 log_likelihood=log_likelihood,
325 predictions=predictions,
326 constant_data=constant_data,
327 predictions_constant_data=predictions_constant_data,
328 coords=coords,
329 dims=dims,
330 pred_dims=pred_dims,
331 num_chains=num_chains,
332 ).to_inference_data()
333
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/data/io_pyro.py b/arviz/data/io_pyro.py
--- a/arviz/data/io_pyro.py
+++ b/arviz/data/io_pyro.py
@@ -110,7 +110,7 @@
if self.model is not None:
trace = pyro.poutine.trace(self.model).get_trace(*self._args, **self._kwargs)
observations = {
- name: site["value"]
+ name: site["value"].cpu()
for name, site in trace.nodes.items()
if site["type"] == "sample" and site["is_observed"]
}
| {"golden_diff": "diff --git a/arviz/data/io_pyro.py b/arviz/data/io_pyro.py\n--- a/arviz/data/io_pyro.py\n+++ b/arviz/data/io_pyro.py\n@@ -110,7 +110,7 @@\n if self.model is not None:\n trace = pyro.poutine.trace(self.model).get_trace(*self._args, **self._kwargs)\n observations = {\n- name: site[\"value\"]\n+ name: site[\"value\"].cpu()\n for name, site in trace.nodes.items()\n if site[\"type\"] == \"sample\" and site[\"is_observed\"]\n }\n", "issue": "from_pyro fails when MCMC is run on GPU\nWhen MCMC is performed on a GPU, from_pyro fails, as the sites returned by\r\n`trace = pyro.poutine.trace(self.model).get_trace(*self._args, **self._kwargs)` can be tensors that still live on the GPU. \r\nThis will cause this line to break: \r\nhttps://github.com/arviz-devs/arviz/blob/1f3b8fd0eaafcf58aad09a80d309b2a4b40dd191/arviz/data/io_pyro.py#L231\r\n\r\nSince there is currently no easy way of transferring a pyro MCMC object to the cpu, a workaround is adding a `.cpu()`to this line:\r\nhttps://github.com/arviz-devs/arviz/blob/1f3b8fd0eaafcf58aad09a80d309b2a4b40dd191/arviz/data/io_pyro.py#L111\n", "before_files": [{"content": "\"\"\"Pyro-specific conversion code.\"\"\"\nimport logging\nfrom typing import Callable, Optional\nimport warnings\n\nimport numpy as np\nfrom packaging import version\n\nfrom .. import utils\nfrom ..rcparams import rcParams\nfrom .base import dict_to_dataset, requires\nfrom .inference_data import InferenceData\n\n_log = logging.getLogger(__name__)\n\n\nclass PyroConverter:\n \"\"\"Encapsulate Pyro specific logic.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n\n model = None # type: Optional[Callable]\n nchains = None # type: int\n ndraws = None # type: int\n\n def __init__(\n self,\n *,\n posterior=None,\n prior=None,\n posterior_predictive=None,\n log_likelihood=None,\n predictions=None,\n constant_data=None,\n predictions_constant_data=None,\n coords=None,\n dims=None,\n pred_dims=None,\n num_chains=1,\n ):\n \"\"\"Convert Pyro data into an InferenceData object.\n\n Parameters\n ----------\n posterior : pyro.infer.MCMC\n Fitted MCMC object from Pyro\n prior: dict\n Prior samples from a Pyro model\n posterior_predictive : dict\n Posterior predictive samples for the posterior\n predictions: dict\n Out of sample predictions\n constant_data: dict\n Dictionary containing constant data variables mapped to their values.\n predictions_constant_data: dict\n Constant data used for out-of-sample predictions.\n coords : dict[str] -> list[str]\n Map of dimensions to coordinates\n dims : dict[str] -> list[str]\n Map variable names to their coordinates\n pred_dims: dict\n Dims for predictions data. Map variable names to their coordinates.\n num_chains: int\n Number of chains used for sampling. Ignored if posterior is present.\n \"\"\"\n self.posterior = posterior\n self.prior = prior\n self.posterior_predictive = posterior_predictive\n self.log_likelihood = (\n rcParams[\"data.log_likelihood\"] if log_likelihood is None else log_likelihood\n )\n self.predictions = predictions\n self.constant_data = constant_data\n self.predictions_constant_data = predictions_constant_data\n self.coords = coords\n self.dims = {} if dims is None else dims\n self.pred_dims = {} if pred_dims is None else pred_dims\n import pyro\n\n def arbitrary_element(dct):\n return next(iter(dct.values()))\n\n self.pyro = pyro\n if posterior is not None:\n self.nchains, self.ndraws = posterior.num_chains, posterior.num_samples\n if version.parse(pyro.__version__) >= version.parse(\"1.0.0\"):\n self.model = self.posterior.kernel.model\n # model arguments and keyword arguments\n self._args = self.posterior._args # pylint: disable=protected-access\n self._kwargs = self.posterior._kwargs # pylint: disable=protected-access\n else:\n self.nchains = num_chains\n get_from = None\n if predictions is not None:\n get_from = predictions\n elif posterior_predictive is not None:\n get_from = posterior_predictive\n elif prior is not None:\n get_from = prior\n if get_from is None and constant_data is None and predictions_constant_data is None:\n raise ValueError(\n \"When constructing InferenceData must have at least\"\n \" one of posterior, prior, posterior_predictive or predictions.\"\n )\n if get_from is not None:\n aelem = arbitrary_element(get_from)\n self.ndraws = aelem.shape[0] // self.nchains\n\n observations = {}\n if self.model is not None:\n trace = pyro.poutine.trace(self.model).get_trace(*self._args, **self._kwargs)\n observations = {\n name: site[\"value\"]\n for name, site in trace.nodes.items()\n if site[\"type\"] == \"sample\" and site[\"is_observed\"]\n }\n self.observations = observations if observations else None\n\n @requires(\"posterior\")\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n data = self.posterior.get_samples(group_by_chain=True)\n data = {k: v.detach().cpu().numpy() for k, v in data.items()}\n return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=self.dims)\n\n @requires(\"posterior\")\n def sample_stats_to_xarray(self):\n \"\"\"Extract sample_stats from Pyro posterior.\"\"\"\n divergences = self.posterior.diagnostics()[\"divergences\"]\n diverging = np.zeros((self.nchains, self.ndraws), dtype=np.bool)\n for i, k in enumerate(sorted(divergences)):\n diverging[i, divergences[k]] = True\n data = {\"diverging\": diverging}\n return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=None)\n\n @requires(\"posterior\")\n @requires(\"model\")\n def log_likelihood_to_xarray(self):\n \"\"\"Extract log likelihood from Pyro posterior.\"\"\"\n if not self.log_likelihood:\n return None\n data = {}\n if self.observations is not None:\n try:\n samples = self.posterior.get_samples(group_by_chain=False)\n predictive = self.pyro.infer.Predictive(self.model, samples)\n vectorized_trace = predictive.get_vectorized_trace(*self._args, **self._kwargs)\n for obs_name in self.observations.keys():\n obs_site = vectorized_trace.nodes[obs_name]\n log_like = obs_site[\"fn\"].log_prob(obs_site[\"value\"]).detach().cpu().numpy()\n shape = (self.nchains, self.ndraws) + log_like.shape[1:]\n data[obs_name] = np.reshape(log_like, shape)\n except: # pylint: disable=bare-except\n # cannot get vectorized trace\n warnings.warn(\n \"Could not get vectorized trace, log_likelihood group will be omitted. \"\n \"Check your model vectorization or set log_likelihood=False\"\n )\n return None\n return dict_to_dataset(\n data, library=self.pyro, coords=self.coords, dims=self.dims, skip_event_dims=True\n )\n\n def translate_posterior_predictive_dict_to_xarray(self, dct, dims):\n \"\"\"Convert posterior_predictive or prediction samples to xarray.\"\"\"\n data = {}\n for k, ary in dct.items():\n ary = ary.detach().cpu().numpy()\n shape = ary.shape\n if shape[0] == self.nchains and shape[1] == self.ndraws:\n data[k] = ary\n elif shape[0] == self.nchains * self.ndraws:\n data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))\n else:\n data[k] = utils.expand_dims(ary)\n _log.warning(\n \"posterior predictive shape not compatible with number of chains and draws.\"\n \"This can mean that some draws or even whole chains are not represented.\"\n )\n return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=dims)\n\n @requires(\"posterior_predictive\")\n def posterior_predictive_to_xarray(self):\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(\n self.posterior_predictive, self.dims\n )\n\n @requires(\"predictions\")\n def predictions_to_xarray(self):\n \"\"\"Convert predictions to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.predictions, self.pred_dims)\n\n def priors_to_xarray(self):\n \"\"\"Convert prior samples (and if possible prior predictive too) to xarray.\"\"\"\n if self.prior is None:\n return {\"prior\": None, \"prior_predictive\": None}\n if self.posterior is not None:\n prior_vars = list(self.posterior.get_samples().keys())\n prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]\n else:\n prior_vars = self.prior.keys()\n prior_predictive_vars = None\n priors_dict = {}\n for group, var_names in zip(\n (\"prior\", \"prior_predictive\"), (prior_vars, prior_predictive_vars)\n ):\n priors_dict[group] = (\n None\n if var_names is None\n else dict_to_dataset(\n {\n k: utils.expand_dims(np.squeeze(self.prior[k].detach().cpu().numpy()))\n for k in var_names\n },\n library=self.pyro,\n coords=self.coords,\n dims=self.dims,\n )\n )\n return priors_dict\n\n @requires(\"observations\")\n @requires(\"model\")\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n return dict_to_dataset(\n self.observations, library=self.pyro, coords=self.coords, dims=dims, default_dims=[]\n )\n\n @requires(\"constant_data\")\n def constant_data_to_xarray(self):\n \"\"\"Convert constant_data to xarray.\"\"\"\n return dict_to_dataset(\n self.constant_data,\n library=self.pyro,\n coords=self.coords,\n dims=self.dims,\n default_dims=[],\n )\n\n @requires(\"predictions_constant_data\")\n def predictions_constant_data_to_xarray(self):\n \"\"\"Convert predictions_constant_data to xarray.\"\"\"\n return dict_to_dataset(\n self.predictions_constant_data,\n library=self.pyro,\n coords=self.coords,\n dims=self.pred_dims,\n default_dims=[],\n )\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\"\"\"\n return InferenceData(\n **{\n \"posterior\": self.posterior_to_xarray(),\n \"sample_stats\": self.sample_stats_to_xarray(),\n \"log_likelihood\": self.log_likelihood_to_xarray(),\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\n \"predictions\": self.predictions_to_xarray(),\n \"constant_data\": self.constant_data_to_xarray(),\n \"predictions_constant_data\": self.predictions_constant_data_to_xarray(),\n **self.priors_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n )\n\n\ndef from_pyro(\n posterior=None,\n *,\n prior=None,\n posterior_predictive=None,\n log_likelihood=None,\n predictions=None,\n constant_data=None,\n predictions_constant_data=None,\n coords=None,\n dims=None,\n pred_dims=None,\n num_chains=1,\n):\n \"\"\"Convert Pyro data into an InferenceData object.\n\n For a usage example read the\n :ref:`Creating InferenceData section on from_pyro <creating_InferenceData>`\n\n\n Parameters\n ----------\n posterior : pyro.infer.MCMC\n Fitted MCMC object from Pyro\n prior: dict\n Prior samples from a Pyro model\n posterior_predictive : dict\n Posterior predictive samples for the posterior\n log_likelihood : bool, optional\n Calculate and store pointwise log likelihood values. Defaults to the value\n of rcParam ``data.log_likelihood``.\n predictions: dict\n Out of sample predictions\n constant_data: dict\n Dictionary containing constant data variables mapped to their values.\n predictions_constant_data: dict\n Constant data used for out-of-sample predictions.\n coords : dict[str] -> list[str]\n Map of dimensions to coordinates\n dims : dict[str] -> list[str]\n Map variable names to their coordinates\n pred_dims: dict\n Dims for predictions data. Map variable names to their coordinates.\n num_chains: int\n Number of chains used for sampling. Ignored if posterior is present.\n \"\"\"\n return PyroConverter(\n posterior=posterior,\n prior=prior,\n posterior_predictive=posterior_predictive,\n log_likelihood=log_likelihood,\n predictions=predictions,\n constant_data=constant_data,\n predictions_constant_data=predictions_constant_data,\n coords=coords,\n dims=dims,\n pred_dims=pred_dims,\n num_chains=num_chains,\n ).to_inference_data()\n", "path": "arviz/data/io_pyro.py"}], "after_files": [{"content": "\"\"\"Pyro-specific conversion code.\"\"\"\nimport logging\nfrom typing import Callable, Optional\nimport warnings\n\nimport numpy as np\nfrom packaging import version\n\nfrom .. import utils\nfrom ..rcparams import rcParams\nfrom .base import dict_to_dataset, requires\nfrom .inference_data import InferenceData\n\n_log = logging.getLogger(__name__)\n\n\nclass PyroConverter:\n \"\"\"Encapsulate Pyro specific logic.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n\n model = None # type: Optional[Callable]\n nchains = None # type: int\n ndraws = None # type: int\n\n def __init__(\n self,\n *,\n posterior=None,\n prior=None,\n posterior_predictive=None,\n log_likelihood=None,\n predictions=None,\n constant_data=None,\n predictions_constant_data=None,\n coords=None,\n dims=None,\n pred_dims=None,\n num_chains=1,\n ):\n \"\"\"Convert Pyro data into an InferenceData object.\n\n Parameters\n ----------\n posterior : pyro.infer.MCMC\n Fitted MCMC object from Pyro\n prior: dict\n Prior samples from a Pyro model\n posterior_predictive : dict\n Posterior predictive samples for the posterior\n predictions: dict\n Out of sample predictions\n constant_data: dict\n Dictionary containing constant data variables mapped to their values.\n predictions_constant_data: dict\n Constant data used for out-of-sample predictions.\n coords : dict[str] -> list[str]\n Map of dimensions to coordinates\n dims : dict[str] -> list[str]\n Map variable names to their coordinates\n pred_dims: dict\n Dims for predictions data. Map variable names to their coordinates.\n num_chains: int\n Number of chains used for sampling. Ignored if posterior is present.\n \"\"\"\n self.posterior = posterior\n self.prior = prior\n self.posterior_predictive = posterior_predictive\n self.log_likelihood = (\n rcParams[\"data.log_likelihood\"] if log_likelihood is None else log_likelihood\n )\n self.predictions = predictions\n self.constant_data = constant_data\n self.predictions_constant_data = predictions_constant_data\n self.coords = coords\n self.dims = {} if dims is None else dims\n self.pred_dims = {} if pred_dims is None else pred_dims\n import pyro\n\n def arbitrary_element(dct):\n return next(iter(dct.values()))\n\n self.pyro = pyro\n if posterior is not None:\n self.nchains, self.ndraws = posterior.num_chains, posterior.num_samples\n if version.parse(pyro.__version__) >= version.parse(\"1.0.0\"):\n self.model = self.posterior.kernel.model\n # model arguments and keyword arguments\n self._args = self.posterior._args # pylint: disable=protected-access\n self._kwargs = self.posterior._kwargs # pylint: disable=protected-access\n else:\n self.nchains = num_chains\n get_from = None\n if predictions is not None:\n get_from = predictions\n elif posterior_predictive is not None:\n get_from = posterior_predictive\n elif prior is not None:\n get_from = prior\n if get_from is None and constant_data is None and predictions_constant_data is None:\n raise ValueError(\n \"When constructing InferenceData must have at least\"\n \" one of posterior, prior, posterior_predictive or predictions.\"\n )\n if get_from is not None:\n aelem = arbitrary_element(get_from)\n self.ndraws = aelem.shape[0] // self.nchains\n\n observations = {}\n if self.model is not None:\n trace = pyro.poutine.trace(self.model).get_trace(*self._args, **self._kwargs)\n observations = {\n name: site[\"value\"].cpu()\n for name, site in trace.nodes.items()\n if site[\"type\"] == \"sample\" and site[\"is_observed\"]\n }\n self.observations = observations if observations else None\n\n @requires(\"posterior\")\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n data = self.posterior.get_samples(group_by_chain=True)\n data = {k: v.detach().cpu().numpy() for k, v in data.items()}\n return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=self.dims)\n\n @requires(\"posterior\")\n def sample_stats_to_xarray(self):\n \"\"\"Extract sample_stats from Pyro posterior.\"\"\"\n divergences = self.posterior.diagnostics()[\"divergences\"]\n diverging = np.zeros((self.nchains, self.ndraws), dtype=np.bool)\n for i, k in enumerate(sorted(divergences)):\n diverging[i, divergences[k]] = True\n data = {\"diverging\": diverging}\n return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=None)\n\n @requires(\"posterior\")\n @requires(\"model\")\n def log_likelihood_to_xarray(self):\n \"\"\"Extract log likelihood from Pyro posterior.\"\"\"\n if not self.log_likelihood:\n return None\n data = {}\n if self.observations is not None:\n try:\n samples = self.posterior.get_samples(group_by_chain=False)\n predictive = self.pyro.infer.Predictive(self.model, samples)\n vectorized_trace = predictive.get_vectorized_trace(*self._args, **self._kwargs)\n for obs_name in self.observations.keys():\n obs_site = vectorized_trace.nodes[obs_name]\n log_like = obs_site[\"fn\"].log_prob(obs_site[\"value\"]).detach().cpu().numpy()\n shape = (self.nchains, self.ndraws) + log_like.shape[1:]\n data[obs_name] = np.reshape(log_like, shape)\n except: # pylint: disable=bare-except\n # cannot get vectorized trace\n warnings.warn(\n \"Could not get vectorized trace, log_likelihood group will be omitted. \"\n \"Check your model vectorization or set log_likelihood=False\"\n )\n return None\n return dict_to_dataset(\n data, library=self.pyro, coords=self.coords, dims=self.dims, skip_event_dims=True\n )\n\n def translate_posterior_predictive_dict_to_xarray(self, dct, dims):\n \"\"\"Convert posterior_predictive or prediction samples to xarray.\"\"\"\n data = {}\n for k, ary in dct.items():\n ary = ary.detach().cpu().numpy()\n shape = ary.shape\n if shape[0] == self.nchains and shape[1] == self.ndraws:\n data[k] = ary\n elif shape[0] == self.nchains * self.ndraws:\n data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))\n else:\n data[k] = utils.expand_dims(ary)\n _log.warning(\n \"posterior predictive shape not compatible with number of chains and draws.\"\n \"This can mean that some draws or even whole chains are not represented.\"\n )\n return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=dims)\n\n @requires(\"posterior_predictive\")\n def posterior_predictive_to_xarray(self):\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(\n self.posterior_predictive, self.dims\n )\n\n @requires(\"predictions\")\n def predictions_to_xarray(self):\n \"\"\"Convert predictions to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.predictions, self.pred_dims)\n\n def priors_to_xarray(self):\n \"\"\"Convert prior samples (and if possible prior predictive too) to xarray.\"\"\"\n if self.prior is None:\n return {\"prior\": None, \"prior_predictive\": None}\n if self.posterior is not None:\n prior_vars = list(self.posterior.get_samples().keys())\n prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]\n else:\n prior_vars = self.prior.keys()\n prior_predictive_vars = None\n priors_dict = {}\n for group, var_names in zip(\n (\"prior\", \"prior_predictive\"), (prior_vars, prior_predictive_vars)\n ):\n priors_dict[group] = (\n None\n if var_names is None\n else dict_to_dataset(\n {\n k: utils.expand_dims(np.squeeze(self.prior[k].detach().cpu().numpy()))\n for k in var_names\n },\n library=self.pyro,\n coords=self.coords,\n dims=self.dims,\n )\n )\n return priors_dict\n\n @requires(\"observations\")\n @requires(\"model\")\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n return dict_to_dataset(\n self.observations, library=self.pyro, coords=self.coords, dims=dims, default_dims=[]\n )\n\n @requires(\"constant_data\")\n def constant_data_to_xarray(self):\n \"\"\"Convert constant_data to xarray.\"\"\"\n return dict_to_dataset(\n self.constant_data,\n library=self.pyro,\n coords=self.coords,\n dims=self.dims,\n default_dims=[],\n )\n\n @requires(\"predictions_constant_data\")\n def predictions_constant_data_to_xarray(self):\n \"\"\"Convert predictions_constant_data to xarray.\"\"\"\n return dict_to_dataset(\n self.predictions_constant_data,\n library=self.pyro,\n coords=self.coords,\n dims=self.pred_dims,\n default_dims=[],\n )\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\"\"\"\n return InferenceData(\n **{\n \"posterior\": self.posterior_to_xarray(),\n \"sample_stats\": self.sample_stats_to_xarray(),\n \"log_likelihood\": self.log_likelihood_to_xarray(),\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\n \"predictions\": self.predictions_to_xarray(),\n \"constant_data\": self.constant_data_to_xarray(),\n \"predictions_constant_data\": self.predictions_constant_data_to_xarray(),\n **self.priors_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n )\n\n\ndef from_pyro(\n posterior=None,\n *,\n prior=None,\n posterior_predictive=None,\n log_likelihood=None,\n predictions=None,\n constant_data=None,\n predictions_constant_data=None,\n coords=None,\n dims=None,\n pred_dims=None,\n num_chains=1,\n):\n \"\"\"Convert Pyro data into an InferenceData object.\n\n For a usage example read the\n :ref:`Creating InferenceData section on from_pyro <creating_InferenceData>`\n\n\n Parameters\n ----------\n posterior : pyro.infer.MCMC\n Fitted MCMC object from Pyro\n prior: dict\n Prior samples from a Pyro model\n posterior_predictive : dict\n Posterior predictive samples for the posterior\n log_likelihood : bool, optional\n Calculate and store pointwise log likelihood values. Defaults to the value\n of rcParam ``data.log_likelihood``.\n predictions: dict\n Out of sample predictions\n constant_data: dict\n Dictionary containing constant data variables mapped to their values.\n predictions_constant_data: dict\n Constant data used for out-of-sample predictions.\n coords : dict[str] -> list[str]\n Map of dimensions to coordinates\n dims : dict[str] -> list[str]\n Map variable names to their coordinates\n pred_dims: dict\n Dims for predictions data. Map variable names to their coordinates.\n num_chains: int\n Number of chains used for sampling. Ignored if posterior is present.\n \"\"\"\n return PyroConverter(\n posterior=posterior,\n prior=prior,\n posterior_predictive=posterior_predictive,\n log_likelihood=log_likelihood,\n predictions=predictions,\n constant_data=constant_data,\n predictions_constant_data=predictions_constant_data,\n coords=coords,\n dims=dims,\n pred_dims=pred_dims,\n num_chains=num_chains,\n ).to_inference_data()\n", "path": "arviz/data/io_pyro.py"}]} | 4,021 | 136 |
gh_patches_debug_6579 | rasdani/github-patches | git_diff | liqd__a4-opin-1944 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Initiators gets Error 403 when deleting Offline Events
**URL:https://opin.me/de/dashboard/offlineevents/offline-test/delete/
https://opin.me/de/projects/magda-testet-brainstorming/**
**user: Initiator**
**expected behaviour: As an Initiator I want to delete an Offline Event and then it is deleted**
**behaviour:As an Initiator I get an Error 403 if I click on delete in the Project dashboard**
**important screensize:**
**device & browser: Firefox, Chrome, Safarie**
**Comment/Question:**
Screenshot?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/offlinephases/views.py`
Content:
```
1 from django.contrib import messages
2 from django.db import transaction
3 from django.shortcuts import redirect, render
4 from django.urls import reverse
5 from django.utils.translation import ugettext_lazy as _
6 from django.views import generic
7 from rules.contrib.views import PermissionRequiredMixin
8
9 from adhocracy4.dashboard import mixins
10 from adhocracy4.projects.mixins import ProjectMixin
11
12 from . import forms, models
13 from .mixins import OfflineEventFormMixin
14
15
16 class OfflineEventDetailView(PermissionRequiredMixin,
17 generic.DetailView):
18 model = models.OfflineEvent
19 permission_required = 'euth_offlinephases.view_offlineevent'
20
21 @property
22 def project(self):
23 return self.object.project
24
25
26 class OfflineEventListView(ProjectMixin,
27 mixins.DashboardBaseMixin,
28 mixins.DashboardComponentMixin,
29 generic.ListView):
30
31 model = models.OfflineEvent
32 template_name = 'euth_offlinephases/offlineevent_list.html'
33 permission_required = 'a4projects.change_project'
34
35 def get_queryset(self):
36 return super().get_queryset().filter(project=self.project)
37
38 def get_permission_object(self):
39 return self.project
40
41
42 class OfflineEventCreateView(
43 ProjectMixin,
44 mixins.DashboardBaseMixin,
45 mixins.DashboardComponentMixin,
46 generic.TemplateView,
47 OfflineEventFormMixin
48 ):
49 template_name = 'euth_offlinephases/offlineevent_form.html'
50 permission_required = 'a4projects.change_project'
51 project_url_kwarg = 'project_slug'
52
53 def get_permission_object(self):
54 return self.project
55
56 def get_success_url(self):
57 return reverse(
58 'a4dashboard:offlineevent-list',
59 kwargs={'project_slug': self.project.slug})
60
61 def get_context_data(self, form=None, upload_forms=None, **kwargs):
62 context = super().get_context_data(**kwargs)
63 if not form:
64 form = forms.OfflineEventForm()
65 if not upload_forms:
66 upload_forms = self.empty_upload_formset()
67 context['form'] = form
68 context['upload_forms'] = upload_forms
69 return context
70
71 def _process_formdata(self, form, upload_forms):
72 form.instance.project = self.project
73 with transaction.atomic():
74 object = form.save()
75 intstances = upload_forms.save(commit=False)
76 for instance in intstances:
77 instance.offlineevent = object
78 instance.save()
79
80 def post(self, request, *args, **kwargs):
81 form = forms.OfflineEventForm(request.POST)
82 upload_forms = self.filled_upload_formset(request)
83 if form.is_valid() and upload_forms.is_valid():
84 self._process_formdata(form, upload_forms)
85 messages.add_message(request,
86 messages.SUCCESS,
87 _('Offline events '
88 'have been updated'))
89 response = redirect(self.get_success_url())
90 else:
91 response = render(request,
92 self.template_name,
93 self.get_context_data(form=form,
94 upload_forms=upload_forms))
95 return response
96
97
98 class OfflineEventUpdateView(ProjectMixin,
99 mixins.DashboardBaseMixin,
100 mixins.DashboardComponentMixin,
101 generic.detail.SingleObjectMixin,
102 generic.TemplateView,
103 OfflineEventFormMixin):
104
105 model = models.OfflineEvent
106 permission_required = 'a4projects.change_project'
107 template_name = 'euth_offlinephases/offlineevent_form.html'
108 get_context_from_object = True
109
110 def dispatch(self, *args, **kwargs):
111 self.object = self.get_object()
112 return super().dispatch(*args, **kwargs)
113
114 def get_context_data(self, form=None, upload_forms=None, **kwargs):
115 context = super().get_context_data(**kwargs)
116 if not form:
117 form = forms.OfflineEventForm(instance=self.get_object())
118 if not upload_forms:
119 queryset = \
120 models.OfflineEventFileUpload\
121 .objects.filter(offlineevent=self.get_object())
122 upload_forms = self.update_upload_formset(queryset)
123 context['form'] = form
124 context['upload_forms'] = upload_forms
125 return context
126
127 def get_success_url(self):
128 return reverse(
129 'a4dashboard:offlineevent-list',
130 kwargs={'project_slug': self.project.slug})
131
132 def get_permission_object(self):
133 return self.project
134
135 def _process_formdata(self, form, upload_forms):
136 with transaction.atomic():
137 form.save()
138 intstances = upload_forms.save(commit=False)
139 for obj in upload_forms.deleted_objects:
140 obj.delete()
141 for instance in intstances:
142 instance.offlineevent = self.object
143 instance.save()
144
145 def post(self, request, *args, **kwargs):
146 upload_forms = self.filled_upload_formset(request)
147 form = forms.OfflineEventForm(request.POST, instance=self.object)
148 if upload_forms.is_valid() and form.is_valid():
149 self._process_formdata(form, upload_forms)
150 messages.add_message(request,
151 messages.SUCCESS,
152 _('Offline events '
153 'have been updated'))
154 response = redirect(self.get_success_url())
155 else:
156 response = render(request,
157 self.template_name,
158 self.get_context_data(
159 form=form, upload_forms=upload_forms))
160 return response
161
162
163 class OfflineEventDeleteView(ProjectMixin,
164 mixins.DashboardBaseMixin,
165 mixins.DashboardComponentMixin,
166 mixins.DashboardComponentDeleteSignalMixin,
167 generic.DeleteView):
168 model = models.OfflineEvent
169 success_message = _('The offline event has been deleted')
170 permission_required = ''
171 template_name = 'euth_offlinephases/offlineevent_confirm_delete.html'
172 get_context_from_object = True
173
174 def delete(self, request, *args, **kwargs):
175 messages.success(self.request, self.success_message)
176 return super().delete(request, *args, **kwargs)
177
178 def get_success_url(self):
179 return reverse(
180 'a4dashboard:offlineevent-list',
181 kwargs={'project_slug': self.project.slug})
182
183 @property
184 def organisation(self):
185 return self.project.organisation
186
187 def get_permission_object(self):
188 return self.project
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/euth/offlinephases/views.py b/euth/offlinephases/views.py
--- a/euth/offlinephases/views.py
+++ b/euth/offlinephases/views.py
@@ -167,7 +167,7 @@
generic.DeleteView):
model = models.OfflineEvent
success_message = _('The offline event has been deleted')
- permission_required = ''
+ permission_required = 'a4projects.change_project'
template_name = 'euth_offlinephases/offlineevent_confirm_delete.html'
get_context_from_object = True
| {"golden_diff": "diff --git a/euth/offlinephases/views.py b/euth/offlinephases/views.py\n--- a/euth/offlinephases/views.py\n+++ b/euth/offlinephases/views.py\n@@ -167,7 +167,7 @@\n generic.DeleteView):\n model = models.OfflineEvent\n success_message = _('The offline event has been deleted')\n- permission_required = ''\n+ permission_required = 'a4projects.change_project'\n template_name = 'euth_offlinephases/offlineevent_confirm_delete.html'\n get_context_from_object = True\n", "issue": "Initiators gets Error 403 when deleting Offline Events\n**URL:https://opin.me/de/dashboard/offlineevents/offline-test/delete/\r\nhttps://opin.me/de/projects/magda-testet-brainstorming/** \r\n**user: Initiator** \r\n**expected behaviour: As an Initiator I want to delete an Offline Event and then it is deleted** \r\n**behaviour:As an Initiator I get an Error 403 if I click on delete in the Project dashboard** \r\n**important screensize:**\r\n**device & browser: Firefox, Chrome, Safarie** \r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.db import transaction\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.dashboard import mixins\nfrom adhocracy4.projects.mixins import ProjectMixin\n\nfrom . import forms, models\nfrom .mixins import OfflineEventFormMixin\n\n\nclass OfflineEventDetailView(PermissionRequiredMixin,\n generic.DetailView):\n model = models.OfflineEvent\n permission_required = 'euth_offlinephases.view_offlineevent'\n\n @property\n def project(self):\n return self.object.project\n\n\nclass OfflineEventListView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.DashboardComponentMixin,\n generic.ListView):\n\n model = models.OfflineEvent\n template_name = 'euth_offlinephases/offlineevent_list.html'\n permission_required = 'a4projects.change_project'\n\n def get_queryset(self):\n return super().get_queryset().filter(project=self.project)\n\n def get_permission_object(self):\n return self.project\n\n\nclass OfflineEventCreateView(\n ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.DashboardComponentMixin,\n generic.TemplateView,\n OfflineEventFormMixin\n):\n template_name = 'euth_offlinephases/offlineevent_form.html'\n permission_required = 'a4projects.change_project'\n project_url_kwarg = 'project_slug'\n\n def get_permission_object(self):\n return self.project\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:offlineevent-list',\n kwargs={'project_slug': self.project.slug})\n\n def get_context_data(self, form=None, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n if not form:\n form = forms.OfflineEventForm()\n if not upload_forms:\n upload_forms = self.empty_upload_formset()\n context['form'] = form\n context['upload_forms'] = upload_forms\n return context\n\n def _process_formdata(self, form, upload_forms):\n form.instance.project = self.project\n with transaction.atomic():\n object = form.save()\n intstances = upload_forms.save(commit=False)\n for instance in intstances:\n instance.offlineevent = object\n instance.save()\n\n def post(self, request, *args, **kwargs):\n form = forms.OfflineEventForm(request.POST)\n upload_forms = self.filled_upload_formset(request)\n if form.is_valid() and upload_forms.is_valid():\n self._process_formdata(form, upload_forms)\n messages.add_message(request,\n messages.SUCCESS,\n _('Offline events '\n 'have been updated'))\n response = redirect(self.get_success_url())\n else:\n response = render(request,\n self.template_name,\n self.get_context_data(form=form,\n upload_forms=upload_forms))\n return response\n\n\nclass OfflineEventUpdateView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.DashboardComponentMixin,\n generic.detail.SingleObjectMixin,\n generic.TemplateView,\n OfflineEventFormMixin):\n\n model = models.OfflineEvent\n permission_required = 'a4projects.change_project'\n template_name = 'euth_offlinephases/offlineevent_form.html'\n get_context_from_object = True\n\n def dispatch(self, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(*args, **kwargs)\n\n def get_context_data(self, form=None, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n if not form:\n form = forms.OfflineEventForm(instance=self.get_object())\n if not upload_forms:\n queryset = \\\n models.OfflineEventFileUpload\\\n .objects.filter(offlineevent=self.get_object())\n upload_forms = self.update_upload_formset(queryset)\n context['form'] = form\n context['upload_forms'] = upload_forms\n return context\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:offlineevent-list',\n kwargs={'project_slug': self.project.slug})\n\n def get_permission_object(self):\n return self.project\n\n def _process_formdata(self, form, upload_forms):\n with transaction.atomic():\n form.save()\n intstances = upload_forms.save(commit=False)\n for obj in upload_forms.deleted_objects:\n obj.delete()\n for instance in intstances:\n instance.offlineevent = self.object\n instance.save()\n\n def post(self, request, *args, **kwargs):\n upload_forms = self.filled_upload_formset(request)\n form = forms.OfflineEventForm(request.POST, instance=self.object)\n if upload_forms.is_valid() and form.is_valid():\n self._process_formdata(form, upload_forms)\n messages.add_message(request,\n messages.SUCCESS,\n _('Offline events '\n 'have been updated'))\n response = redirect(self.get_success_url())\n else:\n response = render(request,\n self.template_name,\n self.get_context_data(\n form=form, upload_forms=upload_forms))\n return response\n\n\nclass OfflineEventDeleteView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.DashboardComponentMixin,\n mixins.DashboardComponentDeleteSignalMixin,\n generic.DeleteView):\n model = models.OfflineEvent\n success_message = _('The offline event has been deleted')\n permission_required = ''\n template_name = 'euth_offlinephases/offlineevent_confirm_delete.html'\n get_context_from_object = True\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super().delete(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:offlineevent-list',\n kwargs={'project_slug': self.project.slug})\n\n @property\n def organisation(self):\n return self.project.organisation\n\n def get_permission_object(self):\n return self.project\n", "path": "euth/offlinephases/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.db import transaction\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom rules.contrib.views import PermissionRequiredMixin\n\nfrom adhocracy4.dashboard import mixins\nfrom adhocracy4.projects.mixins import ProjectMixin\n\nfrom . import forms, models\nfrom .mixins import OfflineEventFormMixin\n\n\nclass OfflineEventDetailView(PermissionRequiredMixin,\n generic.DetailView):\n model = models.OfflineEvent\n permission_required = 'euth_offlinephases.view_offlineevent'\n\n @property\n def project(self):\n return self.object.project\n\n\nclass OfflineEventListView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.DashboardComponentMixin,\n generic.ListView):\n\n model = models.OfflineEvent\n template_name = 'euth_offlinephases/offlineevent_list.html'\n permission_required = 'a4projects.change_project'\n\n def get_queryset(self):\n return super().get_queryset().filter(project=self.project)\n\n def get_permission_object(self):\n return self.project\n\n\nclass OfflineEventCreateView(\n ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.DashboardComponentMixin,\n generic.TemplateView,\n OfflineEventFormMixin\n):\n template_name = 'euth_offlinephases/offlineevent_form.html'\n permission_required = 'a4projects.change_project'\n project_url_kwarg = 'project_slug'\n\n def get_permission_object(self):\n return self.project\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:offlineevent-list',\n kwargs={'project_slug': self.project.slug})\n\n def get_context_data(self, form=None, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n if not form:\n form = forms.OfflineEventForm()\n if not upload_forms:\n upload_forms = self.empty_upload_formset()\n context['form'] = form\n context['upload_forms'] = upload_forms\n return context\n\n def _process_formdata(self, form, upload_forms):\n form.instance.project = self.project\n with transaction.atomic():\n object = form.save()\n intstances = upload_forms.save(commit=False)\n for instance in intstances:\n instance.offlineevent = object\n instance.save()\n\n def post(self, request, *args, **kwargs):\n form = forms.OfflineEventForm(request.POST)\n upload_forms = self.filled_upload_formset(request)\n if form.is_valid() and upload_forms.is_valid():\n self._process_formdata(form, upload_forms)\n messages.add_message(request,\n messages.SUCCESS,\n _('Offline events '\n 'have been updated'))\n response = redirect(self.get_success_url())\n else:\n response = render(request,\n self.template_name,\n self.get_context_data(form=form,\n upload_forms=upload_forms))\n return response\n\n\nclass OfflineEventUpdateView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.DashboardComponentMixin,\n generic.detail.SingleObjectMixin,\n generic.TemplateView,\n OfflineEventFormMixin):\n\n model = models.OfflineEvent\n permission_required = 'a4projects.change_project'\n template_name = 'euth_offlinephases/offlineevent_form.html'\n get_context_from_object = True\n\n def dispatch(self, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(*args, **kwargs)\n\n def get_context_data(self, form=None, upload_forms=None, **kwargs):\n context = super().get_context_data(**kwargs)\n if not form:\n form = forms.OfflineEventForm(instance=self.get_object())\n if not upload_forms:\n queryset = \\\n models.OfflineEventFileUpload\\\n .objects.filter(offlineevent=self.get_object())\n upload_forms = self.update_upload_formset(queryset)\n context['form'] = form\n context['upload_forms'] = upload_forms\n return context\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:offlineevent-list',\n kwargs={'project_slug': self.project.slug})\n\n def get_permission_object(self):\n return self.project\n\n def _process_formdata(self, form, upload_forms):\n with transaction.atomic():\n form.save()\n intstances = upload_forms.save(commit=False)\n for obj in upload_forms.deleted_objects:\n obj.delete()\n for instance in intstances:\n instance.offlineevent = self.object\n instance.save()\n\n def post(self, request, *args, **kwargs):\n upload_forms = self.filled_upload_formset(request)\n form = forms.OfflineEventForm(request.POST, instance=self.object)\n if upload_forms.is_valid() and form.is_valid():\n self._process_formdata(form, upload_forms)\n messages.add_message(request,\n messages.SUCCESS,\n _('Offline events '\n 'have been updated'))\n response = redirect(self.get_success_url())\n else:\n response = render(request,\n self.template_name,\n self.get_context_data(\n form=form, upload_forms=upload_forms))\n return response\n\n\nclass OfflineEventDeleteView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.DashboardComponentMixin,\n mixins.DashboardComponentDeleteSignalMixin,\n generic.DeleteView):\n model = models.OfflineEvent\n success_message = _('The offline event has been deleted')\n permission_required = 'a4projects.change_project'\n template_name = 'euth_offlinephases/offlineevent_confirm_delete.html'\n get_context_from_object = True\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super().delete(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse(\n 'a4dashboard:offlineevent-list',\n kwargs={'project_slug': self.project.slug})\n\n @property\n def organisation(self):\n return self.project.organisation\n\n def get_permission_object(self):\n return self.project\n", "path": "euth/offlinephases/views.py"}]} | 2,138 | 127 |
gh_patches_debug_4153 | rasdani/github-patches | git_diff | svthalia__concrexit-2510 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Thumbnailing of transparent images seems to break
### Describe the bug
<img width="1119" alt="image" src="https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png">
### How to reproduce
<img width="1119" alt="image" src="https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png">
### Expected behaviour
Not <img width="1119" alt="image" src="https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png">
### Screenshots
<img width="1119" alt="image" src="https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png">
### Additional context
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/utils/media/services.py`
Content:
```
1 import io
2 import os
3
4 from django.conf import settings
5 from django.core import signing
6 from django.core.files.base import ContentFile
7 from django.core.files.storage import get_storage_class, DefaultStorage
8 from django.core.files.uploadedfile import InMemoryUploadedFile
9 from django.db.models.fields.files import FieldFile, ImageFieldFile
10 from django.urls import reverse
11
12
13 def save_image(storage, image, path, format):
14 buffer = io.BytesIO()
15 image.convert("RGB").save(fp=buffer, format=format)
16 buff_val = buffer.getvalue()
17 content = ContentFile(buff_val)
18 file = InMemoryUploadedFile(
19 content,
20 None,
21 f"foo.{format.lower()}",
22 f"image/{format.lower()}",
23 content.tell,
24 None,
25 )
26 return storage.save(path, file)
27
28
29 def get_media_url(file, attachment=False):
30 """Get the url of the provided media file to serve in a browser.
31
32 If the file is private a signature will be added.
33 Do NOT use this with user input
34 :param file: the file field
35 :param attachment: True if the file is a forced download
36 :return: the url of the media
37 """
38 storage = DefaultStorage()
39 file_name = file
40 if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):
41 storage = file.storage
42 file_name = file.name
43
44 return f"{storage.url(file_name, attachment)}"
45
46
47 def get_thumbnail_url(file, size, fit=True):
48 """Get the thumbnail url of a media file, NEVER use this with user input.
49
50 If the thumbnail exists this function will return the url of the
51 media file, with signature if necessary. Does it not yet exist a route
52 that executes the :func:`utils.media.views.generate_thumbnail`
53 will be the output.
54 :param file: the file field
55 :param size: size of the image
56 :param fit: False to keep the aspect ratio, True to crop
57 :return: get-thumbnail path
58 """
59 storage = DefaultStorage()
60 name = file
61
62 if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):
63 storage = file.storage
64 name = file.name
65
66 is_public = isinstance(storage, get_storage_class(settings.PUBLIC_FILE_STORAGE))
67 size_fit = "{}_{}".format(size, int(fit))
68
69 if name.endswith(".svg") and is_public:
70 return storage.url(name)
71
72 sig_info = {
73 "size": size,
74 "fit": int(fit),
75 "name": name,
76 "thumb_path": f"thumbnails/{size_fit}/{name}",
77 "serve_path": f"thumbnails/{size_fit}/{name}",
78 "storage": f"{storage.__class__.__module__}.{storage.__class__.__name__}",
79 }
80
81 # We provide a URL instead of calling it as a function, so that using
82 # it means kicking off a new GET request. If we would need to check all files for the
83 # thumbnails inline, loading an album overview would have high latency.
84 return (
85 reverse("get-thumbnail", args=[os.path.join(size_fit, sig_info["name"])])
86 + f"?sig={signing.dumps(sig_info)}"
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/utils/media/services.py b/website/utils/media/services.py
--- a/website/utils/media/services.py
+++ b/website/utils/media/services.py
@@ -12,7 +12,7 @@
def save_image(storage, image, path, format):
buffer = io.BytesIO()
- image.convert("RGB").save(fp=buffer, format=format)
+ image.convert("RGB" if format == "JPEG" else "RGBA").save(fp=buffer, format=format)
buff_val = buffer.getvalue()
content = ContentFile(buff_val)
file = InMemoryUploadedFile(
| {"golden_diff": "diff --git a/website/utils/media/services.py b/website/utils/media/services.py\n--- a/website/utils/media/services.py\n+++ b/website/utils/media/services.py\n@@ -12,7 +12,7 @@\n \n def save_image(storage, image, path, format):\n buffer = io.BytesIO()\n- image.convert(\"RGB\").save(fp=buffer, format=format)\n+ image.convert(\"RGB\" if format == \"JPEG\" else \"RGBA\").save(fp=buffer, format=format)\n buff_val = buffer.getvalue()\n content = ContentFile(buff_val)\n file = InMemoryUploadedFile(\n", "issue": "Thumbnailing of transparent images seems to break\n### Describe the bug\r\n<img width=\"1119\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png\">\r\n\r\n### How to reproduce\r\n<img width=\"1119\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png\">\r\n\r\n### Expected behaviour\r\nNot <img width=\"1119\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png\">\r\n\r\n### Screenshots\r\n<img width=\"1119\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/191974542-041bdb37-f2e0-4181-9267-9a24d5df66b3.png\">\r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "import io\nimport os\n\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import get_storage_class, DefaultStorage\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.db.models.fields.files import FieldFile, ImageFieldFile\nfrom django.urls import reverse\n\n\ndef save_image(storage, image, path, format):\n buffer = io.BytesIO()\n image.convert(\"RGB\").save(fp=buffer, format=format)\n buff_val = buffer.getvalue()\n content = ContentFile(buff_val)\n file = InMemoryUploadedFile(\n content,\n None,\n f\"foo.{format.lower()}\",\n f\"image/{format.lower()}\",\n content.tell,\n None,\n )\n return storage.save(path, file)\n\n\ndef get_media_url(file, attachment=False):\n \"\"\"Get the url of the provided media file to serve in a browser.\n\n If the file is private a signature will be added.\n Do NOT use this with user input\n :param file: the file field\n :param attachment: True if the file is a forced download\n :return: the url of the media\n \"\"\"\n storage = DefaultStorage()\n file_name = file\n if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):\n storage = file.storage\n file_name = file.name\n\n return f\"{storage.url(file_name, attachment)}\"\n\n\ndef get_thumbnail_url(file, size, fit=True):\n \"\"\"Get the thumbnail url of a media file, NEVER use this with user input.\n\n If the thumbnail exists this function will return the url of the\n media file, with signature if necessary. Does it not yet exist a route\n that executes the :func:`utils.media.views.generate_thumbnail`\n will be the output.\n :param file: the file field\n :param size: size of the image\n :param fit: False to keep the aspect ratio, True to crop\n :return: get-thumbnail path\n \"\"\"\n storage = DefaultStorage()\n name = file\n\n if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):\n storage = file.storage\n name = file.name\n\n is_public = isinstance(storage, get_storage_class(settings.PUBLIC_FILE_STORAGE))\n size_fit = \"{}_{}\".format(size, int(fit))\n\n if name.endswith(\".svg\") and is_public:\n return storage.url(name)\n\n sig_info = {\n \"size\": size,\n \"fit\": int(fit),\n \"name\": name,\n \"thumb_path\": f\"thumbnails/{size_fit}/{name}\",\n \"serve_path\": f\"thumbnails/{size_fit}/{name}\",\n \"storage\": f\"{storage.__class__.__module__}.{storage.__class__.__name__}\",\n }\n\n # We provide a URL instead of calling it as a function, so that using\n # it means kicking off a new GET request. If we would need to check all files for the\n # thumbnails inline, loading an album overview would have high latency.\n return (\n reverse(\"get-thumbnail\", args=[os.path.join(size_fit, sig_info[\"name\"])])\n + f\"?sig={signing.dumps(sig_info)}\"\n )\n", "path": "website/utils/media/services.py"}], "after_files": [{"content": "import io\nimport os\n\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import get_storage_class, DefaultStorage\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.db.models.fields.files import FieldFile, ImageFieldFile\nfrom django.urls import reverse\n\n\ndef save_image(storage, image, path, format):\n buffer = io.BytesIO()\n image.convert(\"RGB\" if format == \"JPEG\" else \"RGBA\").save(fp=buffer, format=format)\n buff_val = buffer.getvalue()\n content = ContentFile(buff_val)\n file = InMemoryUploadedFile(\n content,\n None,\n f\"foo.{format.lower()}\",\n f\"image/{format.lower()}\",\n content.tell,\n None,\n )\n return storage.save(path, file)\n\n\ndef get_media_url(file, attachment=False):\n \"\"\"Get the url of the provided media file to serve in a browser.\n\n If the file is private a signature will be added.\n Do NOT use this with user input\n :param file: the file field\n :param attachment: True if the file is a forced download\n :return: the url of the media\n \"\"\"\n storage = DefaultStorage()\n file_name = file\n if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):\n storage = file.storage\n file_name = file.name\n\n return f\"{storage.url(file_name, attachment)}\"\n\n\ndef get_thumbnail_url(file, size, fit=True):\n \"\"\"Get the thumbnail url of a media file, NEVER use this with user input.\n\n If the thumbnail exists this function will return the url of the\n media file, with signature if necessary. Does it not yet exist a route\n that executes the :func:`utils.media.views.generate_thumbnail`\n will be the output.\n :param file: the file field\n :param size: size of the image\n :param fit: False to keep the aspect ratio, True to crop\n :return: get-thumbnail path\n \"\"\"\n storage = DefaultStorage()\n name = file\n\n if isinstance(file, ImageFieldFile) or isinstance(file, FieldFile):\n storage = file.storage\n name = file.name\n\n is_public = isinstance(storage, get_storage_class(settings.PUBLIC_FILE_STORAGE))\n size_fit = \"{}_{}\".format(size, int(fit))\n\n if name.endswith(\".svg\") and is_public:\n return storage.url(name)\n\n sig_info = {\n \"size\": size,\n \"fit\": int(fit),\n \"name\": name,\n \"thumb_path\": f\"thumbnails/{size_fit}/{name}\",\n \"serve_path\": f\"thumbnails/{size_fit}/{name}\",\n \"storage\": f\"{storage.__class__.__module__}.{storage.__class__.__name__}\",\n }\n\n # We provide a URL instead of calling it as a function, so that using\n # it means kicking off a new GET request. If we would need to check all files for the\n # thumbnails inline, loading an album overview would have high latency.\n return (\n reverse(\"get-thumbnail\", args=[os.path.join(size_fit, sig_info[\"name\"])])\n + f\"?sig={signing.dumps(sig_info)}\"\n )\n", "path": "website/utils/media/services.py"}]} | 1,445 | 130 |
gh_patches_debug_30000 | rasdani/github-patches | git_diff | Textualize__rich-1950 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
console markup escaping issue
### Discussed in https://github.com/Textualize/rich/discussions/1939
<div type='discussions-op-text'>
<sup>Originally posted by **codenotworking** February 9, 2022</sup>
My code:
```python
from rich import print as rprint
rprint("[dim white]\[url=[/]")
```
I'm trying to print simply `[url=` but I end up getting `[url=[/]`. What am I doing wrong?</div>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rich/markup.py`
Content:
```
1 from ast import literal_eval
2 from operator import attrgetter
3 import re
4 from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union
5
6 from .errors import MarkupError
7 from .style import Style
8 from .text import Span, Text
9 from .emoji import EmojiVariant
10 from ._emoji_replace import _emoji_replace
11
12
13 RE_TAGS = re.compile(
14 r"""((\\*)\[([a-z#\/@].*?)\])""",
15 re.VERBOSE,
16 )
17
18 RE_HANDLER = re.compile(r"^([\w\.]*?)(\(.*?\))?$")
19
20
21 class Tag(NamedTuple):
22 """A tag in console markup."""
23
24 name: str
25 """The tag name. e.g. 'bold'."""
26 parameters: Optional[str]
27 """Any additional parameters after the name."""
28
29 def __str__(self) -> str:
30 return (
31 self.name if self.parameters is None else f"{self.name} {self.parameters}"
32 )
33
34 @property
35 def markup(self) -> str:
36 """Get the string representation of this tag."""
37 return (
38 f"[{self.name}]"
39 if self.parameters is None
40 else f"[{self.name}={self.parameters}]"
41 )
42
43
44 _ReStringMatch = Match[str] # regex match object
45 _ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
46 _EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
47
48
49 def escape(
50 markup: str, _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#\/@].*?\])").sub
51 ) -> str:
52 """Escapes text so that it won't be interpreted as markup.
53
54 Args:
55 markup (str): Content to be inserted in to markup.
56
57 Returns:
58 str: Markup with square brackets escaped.
59 """
60
61 def escape_backslashes(match: Match[str]) -> str:
62 """Called by re.sub replace matches."""
63 backslashes, text = match.groups()
64 return f"{backslashes}{backslashes}\\{text}"
65
66 markup = _escape(escape_backslashes, markup)
67 return markup
68
69
70 def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
71 """Parse markup in to an iterable of tuples of (position, text, tag).
72
73 Args:
74 markup (str): A string containing console markup
75
76 """
77 position = 0
78 _divmod = divmod
79 _Tag = Tag
80 for match in RE_TAGS.finditer(markup):
81 full_text, escapes, tag_text = match.groups()
82 start, end = match.span()
83 if start > position:
84 yield start, markup[position:start], None
85 if escapes:
86 backslashes, escaped = _divmod(len(escapes), 2)
87 if backslashes:
88 # Literal backslashes
89 yield start, "\\" * backslashes, None
90 start += backslashes * 2
91 if escaped:
92 # Escape of tag
93 yield start, full_text[len(escapes) :], None
94 position = end
95 continue
96 text, equals, parameters = tag_text.partition("=")
97 yield start, None, _Tag(text, parameters if equals else None)
98 position = end
99 if position < len(markup):
100 yield position, markup[position:], None
101
102
103 def render(
104 markup: str,
105 style: Union[str, Style] = "",
106 emoji: bool = True,
107 emoji_variant: Optional[EmojiVariant] = None,
108 ) -> Text:
109 """Render console markup in to a Text instance.
110
111 Args:
112 markup (str): A string containing console markup.
113 emoji (bool, optional): Also render emoji code. Defaults to True.
114
115 Raises:
116 MarkupError: If there is a syntax error in the markup.
117
118 Returns:
119 Text: A test instance.
120 """
121 emoji_replace = _emoji_replace
122 if "[" not in markup:
123 return Text(
124 emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
125 style=style,
126 )
127 text = Text(style=style)
128 append = text.append
129 normalize = Style.normalize
130
131 style_stack: List[Tuple[int, Tag]] = []
132 pop = style_stack.pop
133
134 spans: List[Span] = []
135 append_span = spans.append
136
137 _Span = Span
138 _Tag = Tag
139
140 def pop_style(style_name: str) -> Tuple[int, Tag]:
141 """Pop tag matching given style name."""
142 for index, (_, tag) in enumerate(reversed(style_stack), 1):
143 if tag.name == style_name:
144 return pop(-index)
145 raise KeyError(style_name)
146
147 for position, plain_text, tag in _parse(markup):
148 if plain_text is not None:
149 append(emoji_replace(plain_text) if emoji else plain_text)
150 elif tag is not None:
151 if tag.name.startswith("/"): # Closing tag
152 style_name = tag.name[1:].strip()
153
154 if style_name: # explicit close
155 style_name = normalize(style_name)
156 try:
157 start, open_tag = pop_style(style_name)
158 except KeyError:
159 raise MarkupError(
160 f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
161 ) from None
162 else: # implicit close
163 try:
164 start, open_tag = pop()
165 except IndexError:
166 raise MarkupError(
167 f"closing tag '[/]' at position {position} has nothing to close"
168 ) from None
169
170 if open_tag.name.startswith("@"):
171 if open_tag.parameters:
172 handler_name = ""
173 parameters = open_tag.parameters.strip()
174 handler_match = RE_HANDLER.match(parameters)
175 if handler_match is not None:
176 handler_name, match_parameters = handler_match.groups()
177 parameters = (
178 "()" if match_parameters is None else match_parameters
179 )
180
181 try:
182 meta_params = literal_eval(parameters)
183 except SyntaxError as error:
184 raise MarkupError(
185 f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
186 )
187 except Exception as error:
188 raise MarkupError(
189 f"error parsing {open_tag.parameters!r}; {error}"
190 ) from None
191
192 if handler_name:
193 meta_params = (
194 handler_name,
195 meta_params
196 if isinstance(meta_params, tuple)
197 else (meta_params,),
198 )
199
200 else:
201 meta_params = ()
202
203 append_span(
204 _Span(
205 start, len(text), Style(meta={open_tag.name: meta_params})
206 )
207 )
208 else:
209 append_span(_Span(start, len(text), str(open_tag)))
210
211 else: # Opening tag
212 normalized_tag = _Tag(normalize(tag.name), tag.parameters)
213 style_stack.append((len(text), normalized_tag))
214
215 text_length = len(text)
216 while style_stack:
217 start, tag = style_stack.pop()
218 style = str(tag)
219 if style:
220 append_span(_Span(start, text_length, style))
221
222 text.spans = sorted(spans[::-1], key=attrgetter("start"))
223 return text
224
225
226 if __name__ == "__main__": # pragma: no cover
227
228 MARKUP = [
229 "[red]Hello World[/red]",
230 "[magenta]Hello [b]World[/b]",
231 "[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
232 "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
233 ":warning-emoji: [bold red blink] DANGER![/]",
234 ]
235
236 from rich.table import Table
237 from rich import print
238
239 grid = Table("Markup", "Result", padding=(0, 1))
240
241 for markup in MARKUP:
242 grid.add_row(Text(markup), markup)
243
244 print(grid)
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rich/markup.py b/rich/markup.py
--- a/rich/markup.py
+++ b/rich/markup.py
@@ -1,21 +1,20 @@
+import re
from ast import literal_eval
from operator import attrgetter
-import re
from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union
+from ._emoji_replace import _emoji_replace
+from .emoji import EmojiVariant
from .errors import MarkupError
from .style import Style
from .text import Span, Text
-from .emoji import EmojiVariant
-from ._emoji_replace import _emoji_replace
-
RE_TAGS = re.compile(
- r"""((\\*)\[([a-z#\/@].*?)\])""",
+ r"""((\\*)\[([a-z#/@][^[]*?)])""",
re.VERBOSE,
)
-RE_HANDLER = re.compile(r"^([\w\.]*?)(\(.*?\))?$")
+RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$")
class Tag(NamedTuple):
@@ -146,6 +145,8 @@
for position, plain_text, tag in _parse(markup):
if plain_text is not None:
+ # Handle open brace escapes, where the brace is not part of a tag.
+ plain_text = plain_text.replace("\\[", "[")
append(emoji_replace(plain_text) if emoji else plain_text)
elif tag is not None:
if tag.name.startswith("/"): # Closing tag
@@ -233,8 +234,8 @@
":warning-emoji: [bold red blink] DANGER![/]",
]
- from rich.table import Table
from rich import print
+ from rich.table import Table
grid = Table("Markup", "Result", padding=(0, 1))
| {"golden_diff": "diff --git a/rich/markup.py b/rich/markup.py\n--- a/rich/markup.py\n+++ b/rich/markup.py\n@@ -1,21 +1,20 @@\n+import re\n from ast import literal_eval\n from operator import attrgetter\n-import re\n from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union\n \n+from ._emoji_replace import _emoji_replace\n+from .emoji import EmojiVariant\n from .errors import MarkupError\n from .style import Style\n from .text import Span, Text\n-from .emoji import EmojiVariant\n-from ._emoji_replace import _emoji_replace\n-\n \n RE_TAGS = re.compile(\n- r\"\"\"((\\\\*)\\[([a-z#\\/@].*?)\\])\"\"\",\n+ r\"\"\"((\\\\*)\\[([a-z#/@][^[]*?)])\"\"\",\n re.VERBOSE,\n )\n \n-RE_HANDLER = re.compile(r\"^([\\w\\.]*?)(\\(.*?\\))?$\")\n+RE_HANDLER = re.compile(r\"^([\\w.]*?)(\\(.*?\\))?$\")\n \n \n class Tag(NamedTuple):\n@@ -146,6 +145,8 @@\n \n for position, plain_text, tag in _parse(markup):\n if plain_text is not None:\n+ # Handle open brace escapes, where the brace is not part of a tag.\n+ plain_text = plain_text.replace(\"\\\\[\", \"[\")\n append(emoji_replace(plain_text) if emoji else plain_text)\n elif tag is not None:\n if tag.name.startswith(\"/\"): # Closing tag\n@@ -233,8 +234,8 @@\n \":warning-emoji: [bold red blink] DANGER![/]\",\n ]\n \n- from rich.table import Table\n from rich import print\n+ from rich.table import Table\n \n grid = Table(\"Markup\", \"Result\", padding=(0, 1))\n", "issue": "console markup escaping issue\n### Discussed in https://github.com/Textualize/rich/discussions/1939\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **codenotworking** February 9, 2022</sup>\r\nMy code:\r\n```python\r\nfrom rich import print as rprint\r\n\r\nrprint(\"[dim white]\\[url=[/]\")\r\n```\r\n\r\nI'm trying to print simply `[url=` but I end up getting `[url=[/]`. What am I doing wrong?</div>\n", "before_files": [{"content": "from ast import literal_eval\nfrom operator import attrgetter\nimport re\nfrom typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union\n\nfrom .errors import MarkupError\nfrom .style import Style\nfrom .text import Span, Text\nfrom .emoji import EmojiVariant\nfrom ._emoji_replace import _emoji_replace\n\n\nRE_TAGS = re.compile(\n r\"\"\"((\\\\*)\\[([a-z#\\/@].*?)\\])\"\"\",\n re.VERBOSE,\n)\n\nRE_HANDLER = re.compile(r\"^([\\w\\.]*?)(\\(.*?\\))?$\")\n\n\nclass Tag(NamedTuple):\n \"\"\"A tag in console markup.\"\"\"\n\n name: str\n \"\"\"The tag name. e.g. 'bold'.\"\"\"\n parameters: Optional[str]\n \"\"\"Any additional parameters after the name.\"\"\"\n\n def __str__(self) -> str:\n return (\n self.name if self.parameters is None else f\"{self.name} {self.parameters}\"\n )\n\n @property\n def markup(self) -> str:\n \"\"\"Get the string representation of this tag.\"\"\"\n return (\n f\"[{self.name}]\"\n if self.parameters is None\n else f\"[{self.name}={self.parameters}]\"\n )\n\n\n_ReStringMatch = Match[str] # regex match object\n_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub\n_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re\n\n\ndef escape(\n markup: str, _escape: _EscapeSubMethod = re.compile(r\"(\\\\*)(\\[[a-z#\\/@].*?\\])\").sub\n) -> str:\n \"\"\"Escapes text so that it won't be interpreted as markup.\n\n Args:\n markup (str): Content to be inserted in to markup.\n\n Returns:\n str: Markup with square brackets escaped.\n \"\"\"\n\n def escape_backslashes(match: Match[str]) -> str:\n \"\"\"Called by re.sub replace matches.\"\"\"\n backslashes, text = match.groups()\n return f\"{backslashes}{backslashes}\\\\{text}\"\n\n markup = _escape(escape_backslashes, markup)\n return markup\n\n\ndef _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:\n \"\"\"Parse markup in to an iterable of tuples of (position, text, tag).\n\n Args:\n markup (str): A string containing console markup\n\n \"\"\"\n position = 0\n _divmod = divmod\n _Tag = Tag\n for match in RE_TAGS.finditer(markup):\n full_text, escapes, tag_text = match.groups()\n start, end = match.span()\n if start > position:\n yield start, markup[position:start], None\n if escapes:\n backslashes, escaped = _divmod(len(escapes), 2)\n if backslashes:\n # Literal backslashes\n yield start, \"\\\\\" * backslashes, None\n start += backslashes * 2\n if escaped:\n # Escape of tag\n yield start, full_text[len(escapes) :], None\n position = end\n continue\n text, equals, parameters = tag_text.partition(\"=\")\n yield start, None, _Tag(text, parameters if equals else None)\n position = end\n if position < len(markup):\n yield position, markup[position:], None\n\n\ndef render(\n markup: str,\n style: Union[str, Style] = \"\",\n emoji: bool = True,\n emoji_variant: Optional[EmojiVariant] = None,\n) -> Text:\n \"\"\"Render console markup in to a Text instance.\n\n Args:\n markup (str): A string containing console markup.\n emoji (bool, optional): Also render emoji code. Defaults to True.\n\n Raises:\n MarkupError: If there is a syntax error in the markup.\n\n Returns:\n Text: A test instance.\n \"\"\"\n emoji_replace = _emoji_replace\n if \"[\" not in markup:\n return Text(\n emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,\n style=style,\n )\n text = Text(style=style)\n append = text.append\n normalize = Style.normalize\n\n style_stack: List[Tuple[int, Tag]] = []\n pop = style_stack.pop\n\n spans: List[Span] = []\n append_span = spans.append\n\n _Span = Span\n _Tag = Tag\n\n def pop_style(style_name: str) -> Tuple[int, Tag]:\n \"\"\"Pop tag matching given style name.\"\"\"\n for index, (_, tag) in enumerate(reversed(style_stack), 1):\n if tag.name == style_name:\n return pop(-index)\n raise KeyError(style_name)\n\n for position, plain_text, tag in _parse(markup):\n if plain_text is not None:\n append(emoji_replace(plain_text) if emoji else plain_text)\n elif tag is not None:\n if tag.name.startswith(\"/\"): # Closing tag\n style_name = tag.name[1:].strip()\n\n if style_name: # explicit close\n style_name = normalize(style_name)\n try:\n start, open_tag = pop_style(style_name)\n except KeyError:\n raise MarkupError(\n f\"closing tag '{tag.markup}' at position {position} doesn't match any open tag\"\n ) from None\n else: # implicit close\n try:\n start, open_tag = pop()\n except IndexError:\n raise MarkupError(\n f\"closing tag '[/]' at position {position} has nothing to close\"\n ) from None\n\n if open_tag.name.startswith(\"@\"):\n if open_tag.parameters:\n handler_name = \"\"\n parameters = open_tag.parameters.strip()\n handler_match = RE_HANDLER.match(parameters)\n if handler_match is not None:\n handler_name, match_parameters = handler_match.groups()\n parameters = (\n \"()\" if match_parameters is None else match_parameters\n )\n\n try:\n meta_params = literal_eval(parameters)\n except SyntaxError as error:\n raise MarkupError(\n f\"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}\"\n )\n except Exception as error:\n raise MarkupError(\n f\"error parsing {open_tag.parameters!r}; {error}\"\n ) from None\n\n if handler_name:\n meta_params = (\n handler_name,\n meta_params\n if isinstance(meta_params, tuple)\n else (meta_params,),\n )\n\n else:\n meta_params = ()\n\n append_span(\n _Span(\n start, len(text), Style(meta={open_tag.name: meta_params})\n )\n )\n else:\n append_span(_Span(start, len(text), str(open_tag)))\n\n else: # Opening tag\n normalized_tag = _Tag(normalize(tag.name), tag.parameters)\n style_stack.append((len(text), normalized_tag))\n\n text_length = len(text)\n while style_stack:\n start, tag = style_stack.pop()\n style = str(tag)\n if style:\n append_span(_Span(start, text_length, style))\n\n text.spans = sorted(spans[::-1], key=attrgetter(\"start\"))\n return text\n\n\nif __name__ == \"__main__\": # pragma: no cover\n\n MARKUP = [\n \"[red]Hello World[/red]\",\n \"[magenta]Hello [b]World[/b]\",\n \"[bold]Bold[italic] bold and italic [/bold]italic[/italic]\",\n \"Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog\",\n \":warning-emoji: [bold red blink] DANGER![/]\",\n ]\n\n from rich.table import Table\n from rich import print\n\n grid = Table(\"Markup\", \"Result\", padding=(0, 1))\n\n for markup in MARKUP:\n grid.add_row(Text(markup), markup)\n\n print(grid)\n", "path": "rich/markup.py"}], "after_files": [{"content": "import re\nfrom ast import literal_eval\nfrom operator import attrgetter\nfrom typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union\n\nfrom ._emoji_replace import _emoji_replace\nfrom .emoji import EmojiVariant\nfrom .errors import MarkupError\nfrom .style import Style\nfrom .text import Span, Text\n\nRE_TAGS = re.compile(\n r\"\"\"((\\\\*)\\[([a-z#/@][^[]*?)])\"\"\",\n re.VERBOSE,\n)\n\nRE_HANDLER = re.compile(r\"^([\\w.]*?)(\\(.*?\\))?$\")\n\n\nclass Tag(NamedTuple):\n \"\"\"A tag in console markup.\"\"\"\n\n name: str\n \"\"\"The tag name. e.g. 'bold'.\"\"\"\n parameters: Optional[str]\n \"\"\"Any additional parameters after the name.\"\"\"\n\n def __str__(self) -> str:\n return (\n self.name if self.parameters is None else f\"{self.name} {self.parameters}\"\n )\n\n @property\n def markup(self) -> str:\n \"\"\"Get the string representation of this tag.\"\"\"\n return (\n f\"[{self.name}]\"\n if self.parameters is None\n else f\"[{self.name}={self.parameters}]\"\n )\n\n\n_ReStringMatch = Match[str] # regex match object\n_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub\n_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re\n\n\ndef escape(\n markup: str, _escape: _EscapeSubMethod = re.compile(r\"(\\\\*)(\\[[a-z#\\/@].*?\\])\").sub\n) -> str:\n \"\"\"Escapes text so that it won't be interpreted as markup.\n\n Args:\n markup (str): Content to be inserted in to markup.\n\n Returns:\n str: Markup with square brackets escaped.\n \"\"\"\n\n def escape_backslashes(match: Match[str]) -> str:\n \"\"\"Called by re.sub replace matches.\"\"\"\n backslashes, text = match.groups()\n return f\"{backslashes}{backslashes}\\\\{text}\"\n\n markup = _escape(escape_backslashes, markup)\n return markup\n\n\ndef _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:\n \"\"\"Parse markup in to an iterable of tuples of (position, text, tag).\n\n Args:\n markup (str): A string containing console markup\n\n \"\"\"\n position = 0\n _divmod = divmod\n _Tag = Tag\n for match in RE_TAGS.finditer(markup):\n full_text, escapes, tag_text = match.groups()\n start, end = match.span()\n if start > position:\n yield start, markup[position:start], None\n if escapes:\n backslashes, escaped = _divmod(len(escapes), 2)\n if backslashes:\n # Literal backslashes\n yield start, \"\\\\\" * backslashes, None\n start += backslashes * 2\n if escaped:\n # Escape of tag\n yield start, full_text[len(escapes) :], None\n position = end\n continue\n text, equals, parameters = tag_text.partition(\"=\")\n yield start, None, _Tag(text, parameters if equals else None)\n position = end\n if position < len(markup):\n yield position, markup[position:], None\n\n\ndef render(\n markup: str,\n style: Union[str, Style] = \"\",\n emoji: bool = True,\n emoji_variant: Optional[EmojiVariant] = None,\n) -> Text:\n \"\"\"Render console markup in to a Text instance.\n\n Args:\n markup (str): A string containing console markup.\n emoji (bool, optional): Also render emoji code. Defaults to True.\n\n Raises:\n MarkupError: If there is a syntax error in the markup.\n\n Returns:\n Text: A test instance.\n \"\"\"\n emoji_replace = _emoji_replace\n if \"[\" not in markup:\n return Text(\n emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,\n style=style,\n )\n text = Text(style=style)\n append = text.append\n normalize = Style.normalize\n\n style_stack: List[Tuple[int, Tag]] = []\n pop = style_stack.pop\n\n spans: List[Span] = []\n append_span = spans.append\n\n _Span = Span\n _Tag = Tag\n\n def pop_style(style_name: str) -> Tuple[int, Tag]:\n \"\"\"Pop tag matching given style name.\"\"\"\n for index, (_, tag) in enumerate(reversed(style_stack), 1):\n if tag.name == style_name:\n return pop(-index)\n raise KeyError(style_name)\n\n for position, plain_text, tag in _parse(markup):\n if plain_text is not None:\n # Handle open brace escapes, where the brace is not part of a tag.\n plain_text = plain_text.replace(\"\\\\[\", \"[\")\n append(emoji_replace(plain_text) if emoji else plain_text)\n elif tag is not None:\n if tag.name.startswith(\"/\"): # Closing tag\n style_name = tag.name[1:].strip()\n\n if style_name: # explicit close\n style_name = normalize(style_name)\n try:\n start, open_tag = pop_style(style_name)\n except KeyError:\n raise MarkupError(\n f\"closing tag '{tag.markup}' at position {position} doesn't match any open tag\"\n ) from None\n else: # implicit close\n try:\n start, open_tag = pop()\n except IndexError:\n raise MarkupError(\n f\"closing tag '[/]' at position {position} has nothing to close\"\n ) from None\n\n if open_tag.name.startswith(\"@\"):\n if open_tag.parameters:\n handler_name = \"\"\n parameters = open_tag.parameters.strip()\n handler_match = RE_HANDLER.match(parameters)\n if handler_match is not None:\n handler_name, match_parameters = handler_match.groups()\n parameters = (\n \"()\" if match_parameters is None else match_parameters\n )\n\n try:\n meta_params = literal_eval(parameters)\n except SyntaxError as error:\n raise MarkupError(\n f\"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}\"\n )\n except Exception as error:\n raise MarkupError(\n f\"error parsing {open_tag.parameters!r}; {error}\"\n ) from None\n\n if handler_name:\n meta_params = (\n handler_name,\n meta_params\n if isinstance(meta_params, tuple)\n else (meta_params,),\n )\n\n else:\n meta_params = ()\n\n append_span(\n _Span(\n start, len(text), Style(meta={open_tag.name: meta_params})\n )\n )\n else:\n append_span(_Span(start, len(text), str(open_tag)))\n\n else: # Opening tag\n normalized_tag = _Tag(normalize(tag.name), tag.parameters)\n style_stack.append((len(text), normalized_tag))\n\n text_length = len(text)\n while style_stack:\n start, tag = style_stack.pop()\n style = str(tag)\n if style:\n append_span(_Span(start, text_length, style))\n\n text.spans = sorted(spans[::-1], key=attrgetter(\"start\"))\n return text\n\n\nif __name__ == \"__main__\": # pragma: no cover\n\n MARKUP = [\n \"[red]Hello World[/red]\",\n \"[magenta]Hello [b]World[/b]\",\n \"[bold]Bold[italic] bold and italic [/bold]italic[/italic]\",\n \"Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog\",\n \":warning-emoji: [bold red blink] DANGER![/]\",\n ]\n\n from rich import print\n from rich.table import Table\n\n grid = Table(\"Markup\", \"Result\", padding=(0, 1))\n\n for markup in MARKUP:\n grid.add_row(Text(markup), markup)\n\n print(grid)\n", "path": "rich/markup.py"}]} | 2,728 | 414 |
gh_patches_debug_26436 | rasdani/github-patches | git_diff | Textualize__textual-1066 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Header's title text isn't centered properly
> Please give a brief but clear explanation of what the issue is. Let us know what the behaviour you expect is, and what is actually happening. Let us know what operating system you are running on, and what terminal you are using.
`Header`'s title text isn't centered, `show_clock=True` exacerbates the issue. My expectation is that the title is centered within the visible space between the icon/clock (if shown), and between the icon/right edge if not.
> Feel free to add screenshots and/or videos. These can be very helpful!



> If you can, include a complete working example that demonstrates the bug. Please check it can run without modifications.
```python
from textual.app import App, ComposeResult
from textual.widgets import Header, Static
class Demo(App):
TITLE = "Demonstration"
CSS = """
Screen {
layout: grid;
grid-size: 2;
}
.box {
height: 100%;
border: white;
}
"""
def compose(self) -> ComposeResult:
yield Header(show_clock=True)
yield Static(classes="box")
yield Static(classes="box")
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_header.py`
Content:
```
1 from __future__ import annotations
2
3 from datetime import datetime
4
5 from rich.text import Text
6
7 from ..widget import Widget
8 from ..reactive import Reactive, watch
9
10
11 class HeaderIcon(Widget):
12 """Display an 'icon' on the left of the header."""
13
14 DEFAULT_CSS = """
15 HeaderIcon {
16 dock: left;
17 padding: 0 1;
18 width: 8;
19 content-align: left middle;
20 }
21 """
22 icon = Reactive("⭘")
23
24 def render(self):
25 return self.icon
26
27
28 class HeaderClock(Widget):
29 """Display a clock on the right of the header."""
30
31 DEFAULT_CSS = """
32 HeaderClock {
33 dock: right;
34 width: 10;
35 padding: 0 1;
36 background: $secondary-background-lighten-1;
37 color: $text;
38 text-opacity: 85%;
39 content-align: center middle;
40 }
41 """
42
43 def on_mount(self) -> None:
44 self.set_interval(1, callback=self.refresh, name=f"update header clock")
45
46 def render(self):
47 return Text(datetime.now().time().strftime("%X"))
48
49
50 class HeaderTitle(Widget):
51 """Display the title / subtitle in the header."""
52
53 DEFAULT_CSS = """
54 HeaderTitle {
55 content-align: center middle;
56 width: 100%;
57 margin-right: 10;
58 }
59 """
60
61 text: Reactive[str] = Reactive("")
62 sub_text = Reactive("")
63
64 def render(self) -> Text:
65 text = Text(self.text, no_wrap=True, overflow="ellipsis")
66 if self.sub_text:
67 text.append(" — ")
68 text.append(self.sub_text, "dim")
69 return text
70
71
72 class Header(Widget):
73 """A header widget with icon and clock.
74
75 Args:
76 show_clock (bool, optional): True if the clock should be shown on the right of the header.
77 """
78
79 DEFAULT_CSS = """
80 Header {
81 dock: top;
82 width: 100%;
83 background: $secondary-background;
84 color: $text;
85 height: 1;
86 }
87 Header.-tall {
88 height: 3;
89 }
90 """
91
92 tall = Reactive(False)
93
94 DEFAULT_CLASSES = ""
95
96 def __init__(
97 self,
98 show_clock: bool = False,
99 *,
100 name: str | None = None,
101 id: str | None = None,
102 classes: str | None = None,
103 ):
104 super().__init__(name=name, id=id, classes=classes)
105 self.show_clock = show_clock
106
107 def compose(self):
108 yield HeaderIcon()
109 yield HeaderTitle()
110 if self.show_clock:
111 yield HeaderClock()
112
113 def watch_tall(self, tall: bool) -> None:
114 self.set_class(tall, "-tall")
115
116 def on_click(self):
117 self.toggle_class("-tall")
118
119 def on_mount(self) -> None:
120 def set_title(title: str) -> None:
121 self.query_one(HeaderTitle).text = title
122
123 def set_sub_title(sub_title: str) -> None:
124 self.query_one(HeaderTitle).sub_text = sub_title
125
126 watch(self.app, "title", set_title)
127 watch(self.app, "sub_title", set_sub_title)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/widgets/_header.py b/src/textual/widgets/_header.py
--- a/src/textual/widgets/_header.py
+++ b/src/textual/widgets/_header.py
@@ -25,14 +25,26 @@
return self.icon
-class HeaderClock(Widget):
- """Display a clock on the right of the header."""
+class HeaderClockSpace(Widget):
+ """The space taken up by the clock on the right of the header."""
DEFAULT_CSS = """
- HeaderClock {
+ HeaderClockSpace {
dock: right;
width: 10;
padding: 0 1;
+ }
+ """
+
+ def render(self) -> str:
+ return ""
+
+
+class HeaderClock(HeaderClockSpace):
+ """Display a clock on the right of the header."""
+
+ DEFAULT_CSS = """
+ HeaderClock {
background: $secondary-background-lighten-1;
color: $text;
text-opacity: 85%;
@@ -54,7 +66,6 @@
HeaderTitle {
content-align: center middle;
width: 100%;
- margin-right: 10;
}
"""
@@ -107,8 +118,7 @@
def compose(self):
yield HeaderIcon()
yield HeaderTitle()
- if self.show_clock:
- yield HeaderClock()
+ yield HeaderClock() if self.show_clock else HeaderClockSpace()
def watch_tall(self, tall: bool) -> None:
self.set_class(tall, "-tall")
| {"golden_diff": "diff --git a/src/textual/widgets/_header.py b/src/textual/widgets/_header.py\n--- a/src/textual/widgets/_header.py\n+++ b/src/textual/widgets/_header.py\n@@ -25,14 +25,26 @@\n return self.icon\n \n \n-class HeaderClock(Widget):\n- \"\"\"Display a clock on the right of the header.\"\"\"\n+class HeaderClockSpace(Widget):\n+ \"\"\"The space taken up by the clock on the right of the header.\"\"\"\n \n DEFAULT_CSS = \"\"\"\n- HeaderClock {\n+ HeaderClockSpace {\n dock: right;\n width: 10;\n padding: 0 1;\n+ }\n+ \"\"\"\n+\n+ def render(self) -> str:\n+ return \"\"\n+\n+\n+class HeaderClock(HeaderClockSpace):\n+ \"\"\"Display a clock on the right of the header.\"\"\"\n+\n+ DEFAULT_CSS = \"\"\"\n+ HeaderClock {\n background: $secondary-background-lighten-1;\n color: $text;\n text-opacity: 85%;\n@@ -54,7 +66,6 @@\n HeaderTitle {\n content-align: center middle;\n width: 100%;\n- margin-right: 10;\n }\n \"\"\"\n \n@@ -107,8 +118,7 @@\n def compose(self):\n yield HeaderIcon()\n yield HeaderTitle()\n- if self.show_clock:\n- yield HeaderClock()\n+ yield HeaderClock() if self.show_clock else HeaderClockSpace()\n \n def watch_tall(self, tall: bool) -> None:\n self.set_class(tall, \"-tall\")\n", "issue": "[BUG] Header's title text isn't centered properly \n> Please give a brief but clear explanation of what the issue is. Let us know what the behaviour you expect is, and what is actually happening. Let us know what operating system you are running on, and what terminal you are using.\r\n\r\n`Header`'s title text isn't centered, `show_clock=True` exacerbates the issue. My expectation is that the title is centered within the visible space between the icon/clock (if shown), and between the icon/right edge if not.\r\n\r\n> Feel free to add screenshots and/or videos. These can be very helpful!\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n> If you can, include a complete working example that demonstrates the bug. Please check it can run without modifications.\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Header, Static\r\n\r\nclass Demo(App):\r\n TITLE = \"Demonstration\"\r\n CSS = \"\"\"\r\n Screen {\r\n layout: grid;\r\n grid-size: 2;\r\n }\r\n .box {\r\n height: 100%;\r\n border: white;\r\n }\r\n \"\"\"\r\n\r\n def compose(self) -> ComposeResult:\r\n yield Header(show_clock=True)\r\n yield Static(classes=\"box\")\r\n yield Static(classes=\"box\")\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom datetime import datetime\n\nfrom rich.text import Text\n\nfrom ..widget import Widget\nfrom ..reactive import Reactive, watch\n\n\nclass HeaderIcon(Widget):\n \"\"\"Display an 'icon' on the left of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderIcon {\n dock: left;\n padding: 0 1;\n width: 8;\n content-align: left middle;\n }\n \"\"\"\n icon = Reactive(\"\u2b58\")\n\n def render(self):\n return self.icon\n\n\nclass HeaderClock(Widget):\n \"\"\"Display a clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClock {\n dock: right;\n width: 10;\n padding: 0 1;\n background: $secondary-background-lighten-1;\n color: $text;\n text-opacity: 85%;\n content-align: center middle;\n }\n \"\"\"\n\n def on_mount(self) -> None:\n self.set_interval(1, callback=self.refresh, name=f\"update header clock\")\n\n def render(self):\n return Text(datetime.now().time().strftime(\"%X\"))\n\n\nclass HeaderTitle(Widget):\n \"\"\"Display the title / subtitle in the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderTitle {\n content-align: center middle;\n width: 100%;\n margin-right: 10;\n }\n \"\"\"\n\n text: Reactive[str] = Reactive(\"\")\n sub_text = Reactive(\"\")\n\n def render(self) -> Text:\n text = Text(self.text, no_wrap=True, overflow=\"ellipsis\")\n if self.sub_text:\n text.append(\" \u2014 \")\n text.append(self.sub_text, \"dim\")\n return text\n\n\nclass Header(Widget):\n \"\"\"A header widget with icon and clock.\n\n Args:\n show_clock (bool, optional): True if the clock should be shown on the right of the header.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Header {\n dock: top;\n width: 100%;\n background: $secondary-background;\n color: $text;\n height: 1;\n }\n Header.-tall {\n height: 3;\n }\n \"\"\"\n\n tall = Reactive(False)\n\n DEFAULT_CLASSES = \"\"\n\n def __init__(\n self,\n show_clock: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n super().__init__(name=name, id=id, classes=classes)\n self.show_clock = show_clock\n\n def compose(self):\n yield HeaderIcon()\n yield HeaderTitle()\n if self.show_clock:\n yield HeaderClock()\n\n def watch_tall(self, tall: bool) -> None:\n self.set_class(tall, \"-tall\")\n\n def on_click(self):\n self.toggle_class(\"-tall\")\n\n def on_mount(self) -> None:\n def set_title(title: str) -> None:\n self.query_one(HeaderTitle).text = title\n\n def set_sub_title(sub_title: str) -> None:\n self.query_one(HeaderTitle).sub_text = sub_title\n\n watch(self.app, \"title\", set_title)\n watch(self.app, \"sub_title\", set_sub_title)\n", "path": "src/textual/widgets/_header.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom datetime import datetime\n\nfrom rich.text import Text\n\nfrom ..widget import Widget\nfrom ..reactive import Reactive, watch\n\n\nclass HeaderIcon(Widget):\n \"\"\"Display an 'icon' on the left of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderIcon {\n dock: left;\n padding: 0 1;\n width: 8;\n content-align: left middle;\n }\n \"\"\"\n icon = Reactive(\"\u2b58\")\n\n def render(self):\n return self.icon\n\n\nclass HeaderClockSpace(Widget):\n \"\"\"The space taken up by the clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClockSpace {\n dock: right;\n width: 10;\n padding: 0 1;\n }\n \"\"\"\n\n def render(self) -> str:\n return \"\"\n\n\nclass HeaderClock(HeaderClockSpace):\n \"\"\"Display a clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClock {\n background: $secondary-background-lighten-1;\n color: $text;\n text-opacity: 85%;\n content-align: center middle;\n }\n \"\"\"\n\n def on_mount(self) -> None:\n self.set_interval(1, callback=self.refresh, name=f\"update header clock\")\n\n def render(self):\n return Text(datetime.now().time().strftime(\"%X\"))\n\n\nclass HeaderTitle(Widget):\n \"\"\"Display the title / subtitle in the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderTitle {\n content-align: center middle;\n width: 100%;\n }\n \"\"\"\n\n text: Reactive[str] = Reactive(\"\")\n sub_text = Reactive(\"\")\n\n def render(self) -> Text:\n text = Text(self.text, no_wrap=True, overflow=\"ellipsis\")\n if self.sub_text:\n text.append(\" \u2014 \")\n text.append(self.sub_text, \"dim\")\n return text\n\n\nclass Header(Widget):\n \"\"\"A header widget with icon and clock.\n\n Args:\n show_clock (bool, optional): True if the clock should be shown on the right of the header.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Header {\n dock: top;\n width: 100%;\n background: $secondary-background;\n color: $text;\n height: 1;\n }\n Header.-tall {\n height: 3;\n }\n \"\"\"\n\n tall = Reactive(False)\n\n DEFAULT_CLASSES = \"\"\n\n def __init__(\n self,\n show_clock: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n super().__init__(name=name, id=id, classes=classes)\n self.show_clock = show_clock\n\n def compose(self):\n yield HeaderIcon()\n yield HeaderTitle()\n yield HeaderClock() if self.show_clock else HeaderClockSpace()\n\n def watch_tall(self, tall: bool) -> None:\n self.set_class(tall, \"-tall\")\n\n def on_click(self):\n self.toggle_class(\"-tall\")\n\n def on_mount(self) -> None:\n def set_title(title: str) -> None:\n self.query_one(HeaderTitle).text = title\n\n def set_sub_title(sub_title: str) -> None:\n self.query_one(HeaderTitle).sub_text = sub_title\n\n watch(self.app, \"title\", set_title)\n watch(self.app, \"sub_title\", set_sub_title)\n", "path": "src/textual/widgets/_header.py"}]} | 1,693 | 350 |
gh_patches_debug_38859 | rasdani/github-patches | git_diff | ESMCI__cime-1857 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SystemTestsCompareTwo multisubmit tries to do too much in phase 1
In comparing #1830 with what made it to master, I noticed that the indentation of this block is wrong:
```python
# Compare results
# Case1 is the "main" case, and we need to do the comparisons from there
self._activate_case1()
self._link_to_case2_output()
self._component_compare_test(self._run_one_suffix, self._run_two_suffix, success_change=success_change)
```
-- this should be indented under the "Second run" conditional.
The current indentation leads the ERR test (and any other multi-submit test) to try to do component_compare_test after the first phase, leading to a FAIL result. This doesn't cause a test failure, because the FAIL is later overwritten with a PASS, but it is still incorrect.
I have a fix for this in an incoming PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/CIME/SystemTests/erp.py`
Content:
```
1 """
2 CIME ERP test. This class inherits from SystemTestsCompareTwo
3
4 This is a pes counts hybrid (open-MP/MPI) restart bfb test from
5 startup. This is just like an ERS test but the pe-counts/threading
6 count are modified on retart.
7 (1) Do an initial run with pes set up out of the box (suffix base)
8 (2) Do a restart test with half the number of tasks and threads (suffix rest)
9 """
10
11 from CIME.XML.standard_module_setup import *
12 from CIME.case_setup import case_setup
13 from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
14 from CIME.check_lockedfiles import *
15
16 logger = logging.getLogger(__name__)
17
18 class ERP(SystemTestsCompareTwo):
19
20 def __init__(self, case):
21 """
22 initialize a test object
23 """
24 SystemTestsCompareTwo.__init__(self, case,
25 separate_builds = True,
26 run_two_suffix = 'rest',
27 run_one_description = 'initial',
28 run_two_description = 'restart')
29
30 def _common_setup(self):
31 self._case.set_value("BUILD_THREADED",True)
32
33 def _case_one_setup(self):
34 stop_n = self._case.get_value("STOP_N")
35
36 expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n))
37
38 def _case_two_setup(self):
39 # halve the number of tasks and threads
40 for comp in self._case.get_values("COMP_CLASSES"):
41 ntasks = self._case1.get_value("NTASKS_{}".format(comp))
42 nthreads = self._case1.get_value("NTHRDS_{}".format(comp))
43 rootpe = self._case1.get_value("ROOTPE_{}".format(comp))
44 if ( nthreads > 1 ):
45 self._case.set_value("NTHRDS_{}".format(comp), nthreads/2)
46 if ( ntasks > 1 ):
47 self._case.set_value("NTASKS_{}".format(comp), ntasks/2)
48 self._case.set_value("ROOTPE_{}".format(comp), rootpe/2)
49
50 stop_n = self._case1.get_value("STOP_N")
51 rest_n = self._case1.get_value("REST_N")
52 stop_new = stop_n - rest_n
53 expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n))
54 self._case.set_value("STOP_N", stop_new)
55 self._case.set_value("HIST_N", stop_n)
56 self._case.set_value("CONTINUE_RUN", True)
57 self._case.set_value("REST_OPTION","never")
58
59 # Note, some components, like CESM-CICE, have
60 # decomposition information in env_build.xml that
61 # needs to be regenerated for the above new tasks and thread counts
62 case_setup(self._case, test_mode=True, reset=True)
63
64 def _case_one_custom_postrun_action(self):
65 self.copy_case1_restarts_to_case2()
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/lib/CIME/SystemTests/erp.py b/scripts/lib/CIME/SystemTests/erp.py
--- a/scripts/lib/CIME/SystemTests/erp.py
+++ b/scripts/lib/CIME/SystemTests/erp.py
@@ -1,5 +1,5 @@
"""
-CIME ERP test. This class inherits from SystemTestsCompareTwo
+CIME ERP test. This class inherits from RestartTest
This is a pes counts hybrid (open-MP/MPI) restart bfb test from
startup. This is just like an ERS test but the pe-counts/threading
@@ -10,31 +10,26 @@
from CIME.XML.standard_module_setup import *
from CIME.case_setup import case_setup
-from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
+from CIME.SystemTests.restart_tests import RestartTest
from CIME.check_lockedfiles import *
logger = logging.getLogger(__name__)
-class ERP(SystemTestsCompareTwo):
+class ERP(RestartTest):
def __init__(self, case):
"""
initialize a test object
"""
- SystemTestsCompareTwo.__init__(self, case,
- separate_builds = True,
- run_two_suffix = 'rest',
- run_one_description = 'initial',
- run_two_description = 'restart')
+ RestartTest.__init__(self, case,
+ separate_builds = True,
+ run_two_suffix = 'rest',
+ run_one_description = 'initial',
+ run_two_description = 'restart')
def _common_setup(self):
self._case.set_value("BUILD_THREADED",True)
- def _case_one_setup(self):
- stop_n = self._case.get_value("STOP_N")
-
- expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n))
-
def _case_two_setup(self):
# halve the number of tasks and threads
for comp in self._case.get_values("COMP_CLASSES"):
@@ -47,15 +42,7 @@
self._case.set_value("NTASKS_{}".format(comp), ntasks/2)
self._case.set_value("ROOTPE_{}".format(comp), rootpe/2)
- stop_n = self._case1.get_value("STOP_N")
- rest_n = self._case1.get_value("REST_N")
- stop_new = stop_n - rest_n
- expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n))
- self._case.set_value("STOP_N", stop_new)
- self._case.set_value("HIST_N", stop_n)
- self._case.set_value("CONTINUE_RUN", True)
- self._case.set_value("REST_OPTION","never")
-
+ RestartTest._case_two_setup(self)
# Note, some components, like CESM-CICE, have
# decomposition information in env_build.xml that
# needs to be regenerated for the above new tasks and thread counts
| {"golden_diff": "diff --git a/scripts/lib/CIME/SystemTests/erp.py b/scripts/lib/CIME/SystemTests/erp.py\n--- a/scripts/lib/CIME/SystemTests/erp.py\n+++ b/scripts/lib/CIME/SystemTests/erp.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-CIME ERP test. This class inherits from SystemTestsCompareTwo\n+CIME ERP test. This class inherits from RestartTest\n \n This is a pes counts hybrid (open-MP/MPI) restart bfb test from\n startup. This is just like an ERS test but the pe-counts/threading\n@@ -10,31 +10,26 @@\n \n from CIME.XML.standard_module_setup import *\n from CIME.case_setup import case_setup\n-from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo\n+from CIME.SystemTests.restart_tests import RestartTest\n from CIME.check_lockedfiles import *\n \n logger = logging.getLogger(__name__)\n \n-class ERP(SystemTestsCompareTwo):\n+class ERP(RestartTest):\n \n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n- SystemTestsCompareTwo.__init__(self, case,\n- separate_builds = True,\n- run_two_suffix = 'rest',\n- run_one_description = 'initial',\n- run_two_description = 'restart')\n+ RestartTest.__init__(self, case,\n+ separate_builds = True,\n+ run_two_suffix = 'rest',\n+ run_one_description = 'initial',\n+ run_two_description = 'restart')\n \n def _common_setup(self):\n self._case.set_value(\"BUILD_THREADED\",True)\n \n- def _case_one_setup(self):\n- stop_n = self._case.get_value(\"STOP_N\")\n-\n- expect(stop_n > 2, \"ERROR: stop_n value {:d} too short\".format(stop_n))\n-\n def _case_two_setup(self):\n # halve the number of tasks and threads\n for comp in self._case.get_values(\"COMP_CLASSES\"):\n@@ -47,15 +42,7 @@\n self._case.set_value(\"NTASKS_{}\".format(comp), ntasks/2)\n self._case.set_value(\"ROOTPE_{}\".format(comp), rootpe/2)\n \n- stop_n = self._case1.get_value(\"STOP_N\")\n- rest_n = self._case1.get_value(\"REST_N\")\n- stop_new = stop_n - rest_n\n- expect(stop_new > 0, \"ERROR: stop_n value {:d} too short {:d} {:d}\".format(stop_new,stop_n,rest_n))\n- self._case.set_value(\"STOP_N\", stop_new)\n- self._case.set_value(\"HIST_N\", stop_n)\n- self._case.set_value(\"CONTINUE_RUN\", True)\n- self._case.set_value(\"REST_OPTION\",\"never\")\n-\n+ RestartTest._case_two_setup(self)\n # Note, some components, like CESM-CICE, have\n # decomposition information in env_build.xml that\n # needs to be regenerated for the above new tasks and thread counts\n", "issue": "SystemTestsCompareTwo multisubmit tries to do too much in phase 1\nIn comparing #1830 with what made it to master, I noticed that the indentation of this block is wrong:\r\n\r\n```python\r\n # Compare results\r\n # Case1 is the \"main\" case, and we need to do the comparisons from there\r\n self._activate_case1()\r\n self._link_to_case2_output()\r\n\r\n self._component_compare_test(self._run_one_suffix, self._run_two_suffix, success_change=success_change)\r\n```\r\n\r\n-- this should be indented under the \"Second run\" conditional.\r\n\r\nThe current indentation leads the ERR test (and any other multi-submit test) to try to do component_compare_test after the first phase, leading to a FAIL result. This doesn't cause a test failure, because the FAIL is later overwritten with a PASS, but it is still incorrect.\r\n\r\nI have a fix for this in an incoming PR.\n", "before_files": [{"content": "\"\"\"\nCIME ERP test. This class inherits from SystemTestsCompareTwo\n\nThis is a pes counts hybrid (open-MP/MPI) restart bfb test from\nstartup. This is just like an ERS test but the pe-counts/threading\ncount are modified on retart.\n(1) Do an initial run with pes set up out of the box (suffix base)\n(2) Do a restart test with half the number of tasks and threads (suffix rest)\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.case_setup import case_setup\nfrom CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo\nfrom CIME.check_lockedfiles import *\n\nlogger = logging.getLogger(__name__)\n\nclass ERP(SystemTestsCompareTwo):\n\n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n SystemTestsCompareTwo.__init__(self, case,\n separate_builds = True,\n run_two_suffix = 'rest',\n run_one_description = 'initial',\n run_two_description = 'restart')\n\n def _common_setup(self):\n self._case.set_value(\"BUILD_THREADED\",True)\n\n def _case_one_setup(self):\n stop_n = self._case.get_value(\"STOP_N\")\n\n expect(stop_n > 2, \"ERROR: stop_n value {:d} too short\".format(stop_n))\n\n def _case_two_setup(self):\n # halve the number of tasks and threads\n for comp in self._case.get_values(\"COMP_CLASSES\"):\n ntasks = self._case1.get_value(\"NTASKS_{}\".format(comp))\n nthreads = self._case1.get_value(\"NTHRDS_{}\".format(comp))\n rootpe = self._case1.get_value(\"ROOTPE_{}\".format(comp))\n if ( nthreads > 1 ):\n self._case.set_value(\"NTHRDS_{}\".format(comp), nthreads/2)\n if ( ntasks > 1 ):\n self._case.set_value(\"NTASKS_{}\".format(comp), ntasks/2)\n self._case.set_value(\"ROOTPE_{}\".format(comp), rootpe/2)\n\n stop_n = self._case1.get_value(\"STOP_N\")\n rest_n = self._case1.get_value(\"REST_N\")\n stop_new = stop_n - rest_n\n expect(stop_new > 0, \"ERROR: stop_n value {:d} too short {:d} {:d}\".format(stop_new,stop_n,rest_n))\n self._case.set_value(\"STOP_N\", stop_new)\n self._case.set_value(\"HIST_N\", stop_n)\n self._case.set_value(\"CONTINUE_RUN\", True)\n self._case.set_value(\"REST_OPTION\",\"never\")\n\n # Note, some components, like CESM-CICE, have\n # decomposition information in env_build.xml that\n # needs to be regenerated for the above new tasks and thread counts\n case_setup(self._case, test_mode=True, reset=True)\n\n def _case_one_custom_postrun_action(self):\n self.copy_case1_restarts_to_case2()\n", "path": "scripts/lib/CIME/SystemTests/erp.py"}], "after_files": [{"content": "\"\"\"\nCIME ERP test. This class inherits from RestartTest\n\nThis is a pes counts hybrid (open-MP/MPI) restart bfb test from\nstartup. This is just like an ERS test but the pe-counts/threading\ncount are modified on retart.\n(1) Do an initial run with pes set up out of the box (suffix base)\n(2) Do a restart test with half the number of tasks and threads (suffix rest)\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.case_setup import case_setup\nfrom CIME.SystemTests.restart_tests import RestartTest\nfrom CIME.check_lockedfiles import *\n\nlogger = logging.getLogger(__name__)\n\nclass ERP(RestartTest):\n\n def __init__(self, case):\n \"\"\"\n initialize a test object\n \"\"\"\n RestartTest.__init__(self, case,\n separate_builds = True,\n run_two_suffix = 'rest',\n run_one_description = 'initial',\n run_two_description = 'restart')\n\n def _common_setup(self):\n self._case.set_value(\"BUILD_THREADED\",True)\n\n def _case_two_setup(self):\n # halve the number of tasks and threads\n for comp in self._case.get_values(\"COMP_CLASSES\"):\n ntasks = self._case1.get_value(\"NTASKS_{}\".format(comp))\n nthreads = self._case1.get_value(\"NTHRDS_{}\".format(comp))\n rootpe = self._case1.get_value(\"ROOTPE_{}\".format(comp))\n if ( nthreads > 1 ):\n self._case.set_value(\"NTHRDS_{}\".format(comp), nthreads/2)\n if ( ntasks > 1 ):\n self._case.set_value(\"NTASKS_{}\".format(comp), ntasks/2)\n self._case.set_value(\"ROOTPE_{}\".format(comp), rootpe/2)\n\n RestartTest._case_two_setup(self)\n # Note, some components, like CESM-CICE, have\n # decomposition information in env_build.xml that\n # needs to be regenerated for the above new tasks and thread counts\n case_setup(self._case, test_mode=True, reset=True)\n\n def _case_one_custom_postrun_action(self):\n self.copy_case1_restarts_to_case2()\n", "path": "scripts/lib/CIME/SystemTests/erp.py"}]} | 1,247 | 674 |
gh_patches_debug_15146 | rasdani/github-patches | git_diff | sunpy__sunpy-723 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make Glymur an optional dependancy
This is "make io imports optional"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """
2 SunPy: Python for Solar Physics
3
4 The SunPy project is an effort to create an open-source software library for
5 solar physics using the Python programming language.
6 """
7 DOCLINES = __doc__.split("\n")
8
9 CLASSIFIERS = [
10 'Development Status :: 3 - Alpha',
11 'Intended Audience :: Science/Research',
12 'Intended Audience :: Developers',
13 'License :: OSI Approved :: BSD License',
14 'Programming Language :: Python',
15 'Programming Language :: Python :: 3',
16 'Topic :: Software Development',
17 'Topic :: Scientific/Engineering',
18 'Topic :: Scientific/Engineering :: Physics',
19 'Operating System :: Microsoft :: Windows',
20 'Operating System :: POSIX',
21 'Operating System :: Unix',
22 'Operating System :: MacOS'
23 ]
24
25 VERSION = '0.3.2'
26
27 def git_description():
28 import subprocess
29 try:
30 out = subprocess.Popen(['git', 'describe', '--tags'], stdout = subprocess.PIPE).communicate()[0]
31 description = out.strip().decode('ascii')
32 except OSError:
33 description = 'Error: could not run git'
34 return description
35
36 def write_version_py():
37 import os
38 if os.path.exists('.git'):
39 GIT_DESCRIPTION = git_description()
40 else:
41 GIT_DESCRIPTION = 'N/A'
42
43 out = open('sunpy/version.py', 'w')
44 template = """# This file is automatically generated by SunPy's setup.py
45 version = '%(version)s'
46 git_description = '%(git_description)s'
47 """
48 try:
49 out.write(template % {'version': VERSION,
50 'git_description': GIT_DESCRIPTION})
51 finally:
52 out.close()
53
54
55 def install(setup): #pylint: disable=W0621
56 from setuptools import find_packages
57 #Crotate Module
58 from distutils.core import Extension
59 from os.path import dirname, join
60 cwd = dirname(__file__)
61 try:
62 import numpy as np
63 except ImportError:
64 print("SunPy WARNING: NumPy must be installed first to build the C extension")
65
66 if 'np' in locals():
67 module = 'sunpy.image.Crotate' # import this
68 sourcefiles = [join(cwd, 'sunpy', 'image', 'src', 'rot_extn.c'),
69 join(cwd, 'sunpy', 'image', 'src', 'transform', 'aff_tr.c')]
70 libs = ['m']
71 # -ON for compile optimise
72 gcc_args = ['-std=c99', '-O3']
73 # gcc_args = ['-std=c99']
74
75 # need *module* name here
76 crotate = Extension(module,
77 sources = sourcefiles,
78 libraries = libs,
79 extra_compile_args = gcc_args,
80 include_dirs =
81 [np.get_include(), join(cwd, 'sunpy', 'image', 'src')]
82 )
83
84 module_ana = 'sunpy.io._pyana'
85 sourcefiles_ana = [join(cwd, 'sunpy', 'io', 'src', 'ana', 'anacompress.c'),
86 join(cwd, 'sunpy', 'io', 'src', 'ana', 'anadecompress.c'),
87 join(cwd, 'sunpy', 'io', 'src', 'ana', 'anarw.c'),
88 join(cwd, 'sunpy', 'io', 'src', 'ana', 'testrw.c'),
89 join(cwd, 'sunpy', 'io', 'src', 'ana', '_pyana.c')]
90
91 ana = Extension(module_ana,
92 sources = sourcefiles_ana,
93 libraries = libs,
94 extra_compile_args = gcc_args,
95 include_dirs =
96 [np.get_include(), join(cwd, 'sunpy', 'io', 'src')]
97 )
98 ext_modules = []
99 if 'crotate' in locals():
100 ext_modules.append(crotate)
101 if 'ana' in locals():
102 ext_modules.append(ana)
103
104 write_version_py()
105
106 setup(
107 author="Steven Christe, Matt Earnshaw, Russell Hewett, Keith Hughitt, Jack Ireland, Florian Mayer, Stuart Mumford, Albert Shih, David Perez-Suarez et. al",
108 author_email="[email protected]",
109 classifiers=CLASSIFIERS,
110 description=DOCLINES[0],
111 download_url="http://www.sunpy.org/download/",
112 # 2011/11/21: disabling for now to prevent paver warnings
113 #extra_requires={
114 # "Plotman": ['PyQt4']
115 #},
116 install_requires=[
117 'numpy>1.6.0',
118 'astropy>=0.3.0',
119 'scipy',
120 # 'suds',
121 'pandas>=0.10.0',
122 'matplotlib>=1.1',
123 'glymur>=0.5.9',
124 # 'beautifulsoup4',
125 'sqlalchemy',
126 ],
127 license="BSD",
128 long_description="\n".join(DOCLINES[2:]),
129 maintainer="SunPy Developers",
130 maintainer_email="[email protected]",
131 name="sunpy",
132 packages=find_packages(),
133 package_data={'': ['*.fits', '*.fit', 'sunpyrc']},
134 platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
135 provides=['sunpy'],
136 url="http://www.sunpy.org/",
137 use_2to3=True,
138 version=VERSION,
139 ext_modules = ext_modules
140 )
141
142 if __name__ == '__main__':
143 from distribute_setup import use_setuptools
144 use_setuptools()
145 from setuptools import setup
146 install(setup)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -109,19 +109,12 @@
classifiers=CLASSIFIERS,
description=DOCLINES[0],
download_url="http://www.sunpy.org/download/",
- # 2011/11/21: disabling for now to prevent paver warnings
- #extra_requires={
- # "Plotman": ['PyQt4']
- #},
install_requires=[
'numpy>1.6.0',
'astropy>=0.3.0',
'scipy',
- # 'suds',
'pandas>=0.10.0',
'matplotlib>=1.1',
- 'glymur>=0.5.9',
- # 'beautifulsoup4',
'sqlalchemy',
],
license="BSD",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -109,19 +109,12 @@\n classifiers=CLASSIFIERS,\n description=DOCLINES[0],\n download_url=\"http://www.sunpy.org/download/\",\n- # 2011/11/21: disabling for now to prevent paver warnings\n- #extra_requires={\n- # \"Plotman\": ['PyQt4']\n- #},\n install_requires=[\n 'numpy>1.6.0',\n 'astropy>=0.3.0',\n 'scipy',\n- # 'suds',\n 'pandas>=0.10.0',\n 'matplotlib>=1.1',\n- 'glymur>=0.5.9',\n- # 'beautifulsoup4',\n 'sqlalchemy',\n ],\n license=\"BSD\",\n", "issue": "Make Glymur an optional dependancy\nThis is \"make io imports optional\"\n\n", "before_files": [{"content": "\"\"\"\nSunPy: Python for Solar Physics\n\nThe SunPy project is an effort to create an open-source software library for\nsolar physics using the Python programming language.\n\"\"\"\nDOCLINES = __doc__.split(\"\\n\")\n\nCLASSIFIERS = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'\n]\n\nVERSION = '0.3.2'\n\ndef git_description():\n import subprocess\n try:\n out = subprocess.Popen(['git', 'describe', '--tags'], stdout = subprocess.PIPE).communicate()[0]\n description = out.strip().decode('ascii')\n except OSError:\n description = 'Error: could not run git'\n return description\n\ndef write_version_py():\n import os\n if os.path.exists('.git'):\n GIT_DESCRIPTION = git_description()\n else:\n GIT_DESCRIPTION = 'N/A'\n\n out = open('sunpy/version.py', 'w')\n template = \"\"\"# This file is automatically generated by SunPy's setup.py\nversion = '%(version)s'\ngit_description = '%(git_description)s'\n\"\"\"\n try:\n out.write(template % {'version': VERSION,\n 'git_description': GIT_DESCRIPTION})\n finally:\n out.close()\n\n\ndef install(setup): #pylint: disable=W0621\n from setuptools import find_packages\n #Crotate Module\n from distutils.core import Extension\n from os.path import dirname, join\n cwd = dirname(__file__)\n try:\n import numpy as np\n except ImportError:\n print(\"SunPy WARNING: NumPy must be installed first to build the C extension\")\n\n if 'np' in locals():\n module = 'sunpy.image.Crotate' # import this\n sourcefiles = [join(cwd, 'sunpy', 'image', 'src', 'rot_extn.c'),\n join(cwd, 'sunpy', 'image', 'src', 'transform', 'aff_tr.c')]\n libs = ['m']\n # -ON for compile optimise\n gcc_args = ['-std=c99', '-O3']\n # gcc_args = ['-std=c99']\n\n # need *module* name here\n crotate = Extension(module,\n sources = sourcefiles,\n libraries = libs,\n extra_compile_args = gcc_args,\n include_dirs =\n [np.get_include(), join(cwd, 'sunpy', 'image', 'src')]\n )\n\n module_ana = 'sunpy.io._pyana'\n sourcefiles_ana = [join(cwd, 'sunpy', 'io', 'src', 'ana', 'anacompress.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'anadecompress.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'anarw.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'testrw.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', '_pyana.c')]\n\n ana = Extension(module_ana,\n sources = sourcefiles_ana,\n libraries = libs,\n extra_compile_args = gcc_args,\n include_dirs =\n [np.get_include(), join(cwd, 'sunpy', 'io', 'src')]\n )\n ext_modules = []\n if 'crotate' in locals():\n ext_modules.append(crotate)\n if 'ana' in locals():\n ext_modules.append(ana)\n\n write_version_py()\n\n setup(\n\tauthor=\"Steven Christe, Matt Earnshaw, Russell Hewett, Keith Hughitt, Jack Ireland, Florian Mayer, Stuart Mumford, Albert Shih, David Perez-Suarez et. al\",\n author_email=\"[email protected]\",\n classifiers=CLASSIFIERS,\n description=DOCLINES[0],\n download_url=\"http://www.sunpy.org/download/\",\n # 2011/11/21: disabling for now to prevent paver warnings\n #extra_requires={\n # \"Plotman\": ['PyQt4']\n #},\n install_requires=[\n 'numpy>1.6.0',\n 'astropy>=0.3.0',\n 'scipy',\n # 'suds',\n 'pandas>=0.10.0',\n 'matplotlib>=1.1',\n 'glymur>=0.5.9',\n # 'beautifulsoup4',\n 'sqlalchemy',\n ],\n license=\"BSD\",\n long_description=\"\\n\".join(DOCLINES[2:]),\n maintainer=\"SunPy Developers\",\n maintainer_email=\"[email protected]\",\n name=\"sunpy\",\n packages=find_packages(),\n package_data={'': ['*.fits', '*.fit', 'sunpyrc']},\n platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"],\n provides=['sunpy'],\n url=\"http://www.sunpy.org/\",\n use_2to3=True,\n version=VERSION,\n ext_modules = ext_modules\n )\n\nif __name__ == '__main__':\n from distribute_setup import use_setuptools\n use_setuptools()\n from setuptools import setup\n install(setup)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\nSunPy: Python for Solar Physics\n\nThe SunPy project is an effort to create an open-source software library for\nsolar physics using the Python programming language.\n\"\"\"\nDOCLINES = __doc__.split(\"\\n\")\n\nCLASSIFIERS = [\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'\n]\n\nVERSION = '0.3.2'\n\ndef git_description():\n import subprocess\n try:\n out = subprocess.Popen(['git', 'describe', '--tags'], stdout = subprocess.PIPE).communicate()[0]\n description = out.strip().decode('ascii')\n except OSError:\n description = 'Error: could not run git'\n return description\n\ndef write_version_py():\n import os\n if os.path.exists('.git'):\n GIT_DESCRIPTION = git_description()\n else:\n GIT_DESCRIPTION = 'N/A'\n\n out = open('sunpy/version.py', 'w')\n template = \"\"\"# This file is automatically generated by SunPy's setup.py\nversion = '%(version)s'\ngit_description = '%(git_description)s'\n\"\"\"\n try:\n out.write(template % {'version': VERSION,\n 'git_description': GIT_DESCRIPTION})\n finally:\n out.close()\n\n\ndef install(setup): #pylint: disable=W0621\n from setuptools import find_packages\n #Crotate Module\n from distutils.core import Extension\n from os.path import dirname, join\n cwd = dirname(__file__)\n try:\n import numpy as np\n except ImportError:\n print(\"SunPy WARNING: NumPy must be installed first to build the C extension\")\n\n if 'np' in locals():\n module = 'sunpy.image.Crotate' # import this\n sourcefiles = [join(cwd, 'sunpy', 'image', 'src', 'rot_extn.c'),\n join(cwd, 'sunpy', 'image', 'src', 'transform', 'aff_tr.c')]\n libs = ['m']\n # -ON for compile optimise\n gcc_args = ['-std=c99', '-O3']\n # gcc_args = ['-std=c99']\n\n # need *module* name here\n crotate = Extension(module,\n sources = sourcefiles,\n libraries = libs,\n extra_compile_args = gcc_args,\n include_dirs =\n [np.get_include(), join(cwd, 'sunpy', 'image', 'src')]\n )\n\n module_ana = 'sunpy.io._pyana'\n sourcefiles_ana = [join(cwd, 'sunpy', 'io', 'src', 'ana', 'anacompress.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'anadecompress.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'anarw.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', 'testrw.c'),\n join(cwd, 'sunpy', 'io', 'src', 'ana', '_pyana.c')]\n\n ana = Extension(module_ana,\n sources = sourcefiles_ana,\n libraries = libs,\n extra_compile_args = gcc_args,\n include_dirs =\n [np.get_include(), join(cwd, 'sunpy', 'io', 'src')]\n )\n ext_modules = []\n if 'crotate' in locals():\n ext_modules.append(crotate)\n if 'ana' in locals():\n ext_modules.append(ana)\n\n write_version_py()\n\n setup(\n\tauthor=\"Steven Christe, Matt Earnshaw, Russell Hewett, Keith Hughitt, Jack Ireland, Florian Mayer, Stuart Mumford, Albert Shih, David Perez-Suarez et. al\",\n author_email=\"[email protected]\",\n classifiers=CLASSIFIERS,\n description=DOCLINES[0],\n download_url=\"http://www.sunpy.org/download/\",\n install_requires=[\n 'numpy>1.6.0',\n 'astropy>=0.3.0',\n 'scipy',\n 'pandas>=0.10.0',\n 'matplotlib>=1.1',\n 'sqlalchemy',\n ],\n license=\"BSD\",\n long_description=\"\\n\".join(DOCLINES[2:]),\n maintainer=\"SunPy Developers\",\n maintainer_email=\"[email protected]\",\n name=\"sunpy\",\n packages=find_packages(),\n package_data={'': ['*.fits', '*.fit', 'sunpyrc']},\n platforms=[\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"],\n provides=['sunpy'],\n url=\"http://www.sunpy.org/\",\n use_2to3=True,\n version=VERSION,\n ext_modules = ext_modules\n )\n\nif __name__ == '__main__':\n from distribute_setup import use_setuptools\n use_setuptools()\n from setuptools import setup\n install(setup)\n", "path": "setup.py"}]} | 1,829 | 200 |
gh_patches_debug_29755 | rasdani/github-patches | git_diff | joke2k__faker-1036 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Edit on Github" link broken in ReadTheDocs
http://fake-factory.readthedocs.org/en/latest/locales.html
Clicking "Edit on Github" results in a 404 error.
EDIT:
http://fake-factory.readthedocs.org/en/latest/ has a github link to `https://github.com/joke2k/faker/blob/docs/docs/index.rst` when the correct link is
`https://github.com/joke2k/faker/blob/master/docs/index.rst`
(Note the doubled up `docs/docs` instead of `master/docs`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/build_docs.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import print_function, unicode_literals
4
5 import os
6 import pprint
7 import sys
8
9 import six
10
11 DOCS_ROOT = os.path.abspath(os.path.join('..', 'docs'))
12
13
14 def write(fh, s):
15 return fh.write(s.encode('utf-8'))
16
17
18 def write_provider(fh, doc, provider, formatters, excludes=None):
19
20 if excludes is None:
21 excludes = []
22
23 write(fh, '\n')
24 title = "``{0}``".format(doc.get_provider_name(provider))
25 write(fh, '%s\n' % title)
26 write(fh, "-" * len(title))
27 write(fh, '\n\n::\n')
28
29 for signature, example in formatters.items():
30 if signature in excludes:
31 continue
32 try:
33 # `pprint` can't format sets of heterogenous types.
34 if not isinstance(example, set):
35 example = pprint.pformat(example, indent=4)
36 lines = six.text_type(example).expandtabs().splitlines()
37 except UnicodeEncodeError:
38 msg = 'error on "{0}" with value "{1}"'.format(signature, example)
39 raise Exception(msg)
40 write(fh, '\n')
41 write(fh, "\t{fake}\n{example}\n".format(
42 fake=signature,
43 example='\n'.join(['\t# ' + line for line in lines]),
44 ))
45
46
47 def write_docs(*args, **kwargs):
48 from faker import Faker, documentor
49 from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES
50
51 fake = Faker(locale=DEFAULT_LOCALE)
52
53 from faker.providers import BaseProvider
54 base_provider_formatters = [f for f in dir(BaseProvider)]
55
56 doc = documentor.Documentor(fake)
57
58 formatters = doc.get_formatters(with_args=True, with_defaults=True)
59
60 for provider, fakers in formatters:
61 provider_name = doc.get_provider_name(provider)
62 fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)
63 with open(fname, 'wb') as fh:
64 write_provider(fh, doc, provider, fakers)
65
66 with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:
67 write(fh, 'Providers\n')
68 write(fh, '=========\n')
69 write(fh, '.. toctree::\n')
70 write(fh, ' :maxdepth: 2\n\n')
71 [write(fh, ' providers/%s\n' % doc.get_provider_name(provider))
72 for provider, fakers in formatters]
73
74 AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES)
75 for lang in AVAILABLE_LOCALES:
76 fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)
77 with open(fname, 'wb') as fh:
78 write(fh, '\n')
79 title = 'Language {0}\n'.format(lang)
80 write(fh, title)
81 write(fh, '=' * len(title))
82 write(fh, '\n')
83 fake = Faker(locale=lang)
84 d = documentor.Documentor(fake)
85
86 for p, fs in d.get_formatters(with_args=True, with_defaults=True,
87 locale=lang,
88 excludes=base_provider_formatters):
89 write_provider(fh, d, p, fs)
90
91 with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:
92 write(fh, 'Locales\n')
93 write(fh, '=======\n')
94 write(fh, '.. toctree::\n')
95 write(fh, ' :maxdepth: 2\n\n')
96 [write(fh, ' locales/%s\n' % lang) for lang in AVAILABLE_LOCALES]
97
98
99 # wrappers for sphinx
100 def _main(app, *args, **kwargs):
101 return write_docs(*args, **kwargs)
102
103
104 def setup(app):
105 app.connect(str('builder-inited'), _main)
106
107
108 if __name__ == "__main__":
109 write_docs(*sys.argv[1:])
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/build_docs.py b/faker/build_docs.py
--- a/faker/build_docs.py
+++ b/faker/build_docs.py
@@ -61,9 +61,11 @@
provider_name = doc.get_provider_name(provider)
fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)
with open(fname, 'wb') as fh:
+ write(fh, ':github_url: hide\n\n')
write_provider(fh, doc, provider, fakers)
with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:
+ write(fh, ':github_url: hide\n\n')
write(fh, 'Providers\n')
write(fh, '=========\n')
write(fh, '.. toctree::\n')
@@ -75,7 +77,7 @@
for lang in AVAILABLE_LOCALES:
fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)
with open(fname, 'wb') as fh:
- write(fh, '\n')
+ write(fh, ':github_url: hide\n\n')
title = 'Language {0}\n'.format(lang)
write(fh, title)
write(fh, '=' * len(title))
@@ -89,6 +91,7 @@
write_provider(fh, d, p, fs)
with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:
+ write(fh, ':github_url: hide\n\n')
write(fh, 'Locales\n')
write(fh, '=======\n')
write(fh, '.. toctree::\n')
| {"golden_diff": "diff --git a/faker/build_docs.py b/faker/build_docs.py\n--- a/faker/build_docs.py\n+++ b/faker/build_docs.py\n@@ -61,9 +61,11 @@\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n with open(fname, 'wb') as fh:\n+ write(fh, ':github_url: hide\\n\\n')\n write_provider(fh, doc, provider, fakers)\n \n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n+ write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n@@ -75,7 +77,7 @@\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n with open(fname, 'wb') as fh:\n- write(fh, '\\n')\n+ write(fh, ':github_url: hide\\n\\n')\n title = 'Language {0}\\n'.format(lang)\n write(fh, title)\n write(fh, '=' * len(title))\n@@ -89,6 +91,7 @@\n write_provider(fh, d, p, fs)\n \n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n+ write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Locales\\n')\n write(fh, '=======\\n')\n write(fh, '.. toctree::\\n')\n", "issue": "\"Edit on Github\" link broken in ReadTheDocs\nhttp://fake-factory.readthedocs.org/en/latest/locales.html\n\nClicking \"Edit on Github\" results in a 404 error.\n\nEDIT: \nhttp://fake-factory.readthedocs.org/en/latest/ has a github link to `https://github.com/joke2k/faker/blob/docs/docs/index.rst` when the correct link is\n`https://github.com/joke2k/faker/blob/master/docs/index.rst`\n(Note the doubled up `docs/docs` instead of `master/docs`)\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport pprint\nimport sys\n\nimport six\n\nDOCS_ROOT = os.path.abspath(os.path.join('..', 'docs'))\n\n\ndef write(fh, s):\n return fh.write(s.encode('utf-8'))\n\n\ndef write_provider(fh, doc, provider, formatters, excludes=None):\n\n if excludes is None:\n excludes = []\n\n write(fh, '\\n')\n title = \"``{0}``\".format(doc.get_provider_name(provider))\n write(fh, '%s\\n' % title)\n write(fh, \"-\" * len(title))\n write(fh, '\\n\\n::\\n')\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n # `pprint` can't format sets of heterogenous types.\n if not isinstance(example, set):\n example = pprint.pformat(example, indent=4)\n lines = six.text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n msg = 'error on \"{0}\" with value \"{1}\"'.format(signature, example)\n raise Exception(msg)\n write(fh, '\\n')\n write(fh, \"\\t{fake}\\n{example}\\n\".format(\n fake=signature,\n example='\\n'.join(['\\t# ' + line for line in lines]),\n ))\n\n\ndef write_docs(*args, **kwargs):\n from faker import Faker, documentor\n from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES\n\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n with open(fname, 'wb') as fh:\n write_provider(fh, doc, provider, fakers)\n\n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' providers/%s\\n' % doc.get_provider_name(provider))\n for provider, fakers in formatters]\n\n AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES)\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n with open(fname, 'wb') as fh:\n write(fh, '\\n')\n title = 'Language {0}\\n'.format(lang)\n write(fh, title)\n write(fh, '=' * len(title))\n write(fh, '\\n')\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True,\n locale=lang,\n excludes=base_provider_formatters):\n write_provider(fh, d, p, fs)\n\n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n write(fh, 'Locales\\n')\n write(fh, '=======\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' locales/%s\\n' % lang) for lang in AVAILABLE_LOCALES]\n\n\n# wrappers for sphinx\ndef _main(app, *args, **kwargs):\n return write_docs(*args, **kwargs)\n\n\ndef setup(app):\n app.connect(str('builder-inited'), _main)\n\n\nif __name__ == \"__main__\":\n write_docs(*sys.argv[1:])\n", "path": "faker/build_docs.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport pprint\nimport sys\n\nimport six\n\nDOCS_ROOT = os.path.abspath(os.path.join('..', 'docs'))\n\n\ndef write(fh, s):\n return fh.write(s.encode('utf-8'))\n\n\ndef write_provider(fh, doc, provider, formatters, excludes=None):\n\n if excludes is None:\n excludes = []\n\n write(fh, '\\n')\n title = \"``{0}``\".format(doc.get_provider_name(provider))\n write(fh, '%s\\n' % title)\n write(fh, \"-\" * len(title))\n write(fh, '\\n\\n::\\n')\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n # `pprint` can't format sets of heterogenous types.\n if not isinstance(example, set):\n example = pprint.pformat(example, indent=4)\n lines = six.text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n msg = 'error on \"{0}\" with value \"{1}\"'.format(signature, example)\n raise Exception(msg)\n write(fh, '\\n')\n write(fh, \"\\t{fake}\\n{example}\\n\".format(\n fake=signature,\n example='\\n'.join(['\\t# ' + line for line in lines]),\n ))\n\n\ndef write_docs(*args, **kwargs):\n from faker import Faker, documentor\n from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES\n\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n with open(fname, 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write_provider(fh, doc, provider, fakers)\n\n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' providers/%s\\n' % doc.get_provider_name(provider))\n for provider, fakers in formatters]\n\n AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES)\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n with open(fname, 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n title = 'Language {0}\\n'.format(lang)\n write(fh, title)\n write(fh, '=' * len(title))\n write(fh, '\\n')\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True,\n locale=lang,\n excludes=base_provider_formatters):\n write_provider(fh, d, p, fs)\n\n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Locales\\n')\n write(fh, '=======\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' locales/%s\\n' % lang) for lang in AVAILABLE_LOCALES]\n\n\n# wrappers for sphinx\ndef _main(app, *args, **kwargs):\n return write_docs(*args, **kwargs)\n\n\ndef setup(app):\n app.connect(str('builder-inited'), _main)\n\n\nif __name__ == \"__main__\":\n write_docs(*sys.argv[1:])\n", "path": "faker/build_docs.py"}]} | 1,479 | 371 |
gh_patches_debug_26747 | rasdani/github-patches | git_diff | matrix-org__synapse-8875 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remove pin on prometheus-client <0.9.0
we should check that prom-client 0.9.0 doesn't break anything, and then bump the pin.
Indeed, given it's now been a few years since prom-client 0.4.0 broke the world, maybe we should remove the pin altogether?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/python_dependencies.py`
Content:
```
1 # Copyright 2015, 2016 OpenMarket Ltd
2 # Copyright 2017 Vector Creations Ltd
3 # Copyright 2018 New Vector Ltd
4 # Copyright 2020 The Matrix.org Foundation C.I.C.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 import logging
19 from typing import List, Set
20
21 from pkg_resources import (
22 DistributionNotFound,
23 Requirement,
24 VersionConflict,
25 get_provider,
26 )
27
28 logger = logging.getLogger(__name__)
29
30
31 # REQUIREMENTS is a simple list of requirement specifiers[1], and must be
32 # installed. It is passed to setup() as install_requires in setup.py.
33 #
34 # CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict
35 # of lists. The dict key is the optional dependency name and can be passed to
36 # pip when installing. The list is a series of requirement specifiers[1] to be
37 # installed when that optional dependency requirement is specified. It is passed
38 # to setup() as extras_require in setup.py
39 #
40 # Note that these both represent runtime dependencies (and the versions
41 # installed are checked at runtime).
42 #
43 # [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
44
45 REQUIREMENTS = [
46 "jsonschema>=2.5.1",
47 "frozendict>=1",
48 "unpaddedbase64>=1.1.0",
49 "canonicaljson>=1.4.0",
50 # we use the type definitions added in signedjson 1.1.
51 "signedjson>=1.1.0",
52 "pynacl>=1.2.1",
53 "idna>=2.5",
54 # validating SSL certs for IP addresses requires service_identity 18.1.
55 "service_identity>=18.1.0",
56 # Twisted 18.9 introduces some logger improvements that the structured
57 # logger utilises
58 "Twisted>=18.9.0",
59 "treq>=15.1",
60 # Twisted has required pyopenssl 16.0 since about Twisted 16.6.
61 "pyopenssl>=16.0.0",
62 "pyyaml>=3.11",
63 "pyasn1>=0.1.9",
64 "pyasn1-modules>=0.0.7",
65 "bcrypt>=3.1.0",
66 "pillow>=4.3.0",
67 "sortedcontainers>=1.4.4",
68 "pymacaroons>=0.13.0",
69 "msgpack>=0.5.2",
70 "phonenumbers>=8.2.0",
71 # we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
72 # prom-client has a history of breaking backwards compatibility between
73 # minor versions (https://github.com/prometheus/client_python/issues/317),
74 # so we also pin the minor version.
75 #
76 # Note that we replicate these constraints in the Synapse Dockerfile while
77 # pre-installing dependencies. If these constraints are updated here, the
78 # same change should be made in the Dockerfile.
79 "prometheus_client>=0.4.0,<0.9.0",
80 # we use attr.validators.deep_iterable, which arrived in 19.1.0 (Note:
81 # Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33
82 # is out in November.)
83 "attrs>=19.1.0",
84 "netaddr>=0.7.18",
85 "Jinja2>=2.9",
86 "bleach>=1.4.3",
87 "typing-extensions>=3.7.4",
88 ]
89
90 CONDITIONAL_REQUIREMENTS = {
91 "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
92 # we use execute_batch, which arrived in psycopg 2.7.
93 "postgres": ["psycopg2>=2.7"],
94 # ACME support is required to provision TLS certificates from authorities
95 # that use the protocol, such as Let's Encrypt.
96 "acme": [
97 "txacme>=0.9.2",
98 # txacme depends on eliot. Eliot 1.8.0 is incompatible with
99 # python 3.5.2, as per https://github.com/itamarst/eliot/issues/418
100 'eliot<1.8.0;python_version<"3.5.3"',
101 ],
102 "saml2": ["pysaml2>=4.5.0"],
103 "oidc": ["authlib>=0.14.0"],
104 "systemd": ["systemd-python>=231"],
105 "url_preview": ["lxml>=3.5.0"],
106 "sentry": ["sentry-sdk>=0.7.2"],
107 "opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"],
108 "jwt": ["pyjwt>=1.6.4"],
109 # hiredis is not a *strict* dependency, but it makes things much faster.
110 # (if it is not installed, we fall back to slow code.)
111 "redis": ["txredisapi>=1.4.7", "hiredis"],
112 }
113
114 ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str]
115
116 for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
117 # Exclude systemd as it's a system-based requirement.
118 # Exclude lint as it's a dev-based requirement.
119 if name not in ["systemd"]:
120 ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
121
122
123 def list_requirements():
124 return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)
125
126
127 class DependencyException(Exception):
128 @property
129 def message(self):
130 return "\n".join(
131 [
132 "Missing Requirements: %s" % (", ".join(self.dependencies),),
133 "To install run:",
134 " pip install --upgrade --force %s" % (" ".join(self.dependencies),),
135 "",
136 ]
137 )
138
139 @property
140 def dependencies(self):
141 for i in self.args[0]:
142 yield "'" + i + "'"
143
144
145 def check_requirements(for_feature=None):
146 deps_needed = []
147 errors = []
148
149 if for_feature:
150 reqs = CONDITIONAL_REQUIREMENTS[for_feature]
151 else:
152 reqs = REQUIREMENTS
153
154 for dependency in reqs:
155 try:
156 _check_requirement(dependency)
157 except VersionConflict as e:
158 deps_needed.append(dependency)
159 errors.append(
160 "Needed %s, got %s==%s"
161 % (
162 dependency,
163 e.dist.project_name, # type: ignore[attr-defined] # noqa
164 e.dist.version, # type: ignore[attr-defined] # noqa
165 )
166 )
167 except DistributionNotFound:
168 deps_needed.append(dependency)
169 if for_feature:
170 errors.append(
171 "Needed %s for the '%s' feature but it was not installed"
172 % (dependency, for_feature)
173 )
174 else:
175 errors.append("Needed %s but it was not installed" % (dependency,))
176
177 if not for_feature:
178 # Check the optional dependencies are up to date. We allow them to not be
179 # installed.
180 OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), []) # type: List[str]
181
182 for dependency in OPTS:
183 try:
184 _check_requirement(dependency)
185 except VersionConflict as e:
186 deps_needed.append(dependency)
187 errors.append(
188 "Needed optional %s, got %s==%s"
189 % (
190 dependency,
191 e.dist.project_name, # type: ignore[attr-defined] # noqa
192 e.dist.version, # type: ignore[attr-defined] # noqa
193 )
194 )
195 except DistributionNotFound:
196 # If it's not found, we don't care
197 pass
198
199 if deps_needed:
200 for err in errors:
201 logging.error(err)
202
203 raise DependencyException(deps_needed)
204
205
206 def _check_requirement(dependency_string):
207 """Parses a dependency string, and checks if the specified requirement is installed
208
209 Raises:
210 VersionConflict if the requirement is installed, but with the the wrong version
211 DistributionNotFound if nothing is found to provide the requirement
212 """
213 req = Requirement.parse(dependency_string)
214
215 # first check if the markers specify that this requirement needs installing
216 if req.marker is not None and not req.marker.evaluate():
217 # not required for this environment
218 return
219
220 get_provider(req)
221
222
223 if __name__ == "__main__":
224 import sys
225
226 sys.stdout.writelines(req + "\n" for req in list_requirements())
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -40,6 +40,10 @@
# Note that these both represent runtime dependencies (and the versions
# installed are checked at runtime).
#
+# Also note that we replicate these constraints in the Synapse Dockerfile while
+# pre-installing dependencies. If these constraints are updated here, the same
+# change should be made in the Dockerfile.
+#
# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
REQUIREMENTS = [
@@ -69,14 +73,7 @@
"msgpack>=0.5.2",
"phonenumbers>=8.2.0",
# we use GaugeHistogramMetric, which was added in prom-client 0.4.0.
- # prom-client has a history of breaking backwards compatibility between
- # minor versions (https://github.com/prometheus/client_python/issues/317),
- # so we also pin the minor version.
- #
- # Note that we replicate these constraints in the Synapse Dockerfile while
- # pre-installing dependencies. If these constraints are updated here, the
- # same change should be made in the Dockerfile.
- "prometheus_client>=0.4.0,<0.9.0",
+ "prometheus_client>=0.4.0",
# we use attr.validators.deep_iterable, which arrived in 19.1.0 (Note:
# Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33
# is out in November.)
| {"golden_diff": "diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py\n--- a/synapse/python_dependencies.py\n+++ b/synapse/python_dependencies.py\n@@ -40,6 +40,10 @@\n # Note that these both represent runtime dependencies (and the versions\n # installed are checked at runtime).\n #\n+# Also note that we replicate these constraints in the Synapse Dockerfile while\n+# pre-installing dependencies. If these constraints are updated here, the same\n+# change should be made in the Dockerfile.\n+#\n # [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.\n \n REQUIREMENTS = [\n@@ -69,14 +73,7 @@\n \"msgpack>=0.5.2\",\n \"phonenumbers>=8.2.0\",\n # we use GaugeHistogramMetric, which was added in prom-client 0.4.0.\n- # prom-client has a history of breaking backwards compatibility between\n- # minor versions (https://github.com/prometheus/client_python/issues/317),\n- # so we also pin the minor version.\n- #\n- # Note that we replicate these constraints in the Synapse Dockerfile while\n- # pre-installing dependencies. If these constraints are updated here, the\n- # same change should be made in the Dockerfile.\n- \"prometheus_client>=0.4.0,<0.9.0\",\n+ \"prometheus_client>=0.4.0\",\n # we use attr.validators.deep_iterable, which arrived in 19.1.0 (Note:\n # Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33\n # is out in November.)\n", "issue": "remove pin on prometheus-client <0.9.0\nwe should check that prom-client 0.9.0 doesn't break anything, and then bump the pin.\n\nIndeed, given it's now been a few years since prom-client 0.4.0 broke the world, maybe we should remove the pin altogether?\n", "before_files": [{"content": "# Copyright 2015, 2016 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2018 New Vector Ltd\n# Copyright 2020 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import List, Set\n\nfrom pkg_resources import (\n DistributionNotFound,\n Requirement,\n VersionConflict,\n get_provider,\n)\n\nlogger = logging.getLogger(__name__)\n\n\n# REQUIREMENTS is a simple list of requirement specifiers[1], and must be\n# installed. It is passed to setup() as install_requires in setup.py.\n#\n# CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict\n# of lists. The dict key is the optional dependency name and can be passed to\n# pip when installing. The list is a series of requirement specifiers[1] to be\n# installed when that optional dependency requirement is specified. It is passed\n# to setup() as extras_require in setup.py\n#\n# Note that these both represent runtime dependencies (and the versions\n# installed are checked at runtime).\n#\n# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.\n\nREQUIREMENTS = [\n \"jsonschema>=2.5.1\",\n \"frozendict>=1\",\n \"unpaddedbase64>=1.1.0\",\n \"canonicaljson>=1.4.0\",\n # we use the type definitions added in signedjson 1.1.\n \"signedjson>=1.1.0\",\n \"pynacl>=1.2.1\",\n \"idna>=2.5\",\n # validating SSL certs for IP addresses requires service_identity 18.1.\n \"service_identity>=18.1.0\",\n # Twisted 18.9 introduces some logger improvements that the structured\n # logger utilises\n \"Twisted>=18.9.0\",\n \"treq>=15.1\",\n # Twisted has required pyopenssl 16.0 since about Twisted 16.6.\n \"pyopenssl>=16.0.0\",\n \"pyyaml>=3.11\",\n \"pyasn1>=0.1.9\",\n \"pyasn1-modules>=0.0.7\",\n \"bcrypt>=3.1.0\",\n \"pillow>=4.3.0\",\n \"sortedcontainers>=1.4.4\",\n \"pymacaroons>=0.13.0\",\n \"msgpack>=0.5.2\",\n \"phonenumbers>=8.2.0\",\n # we use GaugeHistogramMetric, which was added in prom-client 0.4.0.\n # prom-client has a history of breaking backwards compatibility between\n # minor versions (https://github.com/prometheus/client_python/issues/317),\n # so we also pin the minor version.\n #\n # Note that we replicate these constraints in the Synapse Dockerfile while\n # pre-installing dependencies. If these constraints are updated here, the\n # same change should be made in the Dockerfile.\n \"prometheus_client>=0.4.0,<0.9.0\",\n # we use attr.validators.deep_iterable, which arrived in 19.1.0 (Note:\n # Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33\n # is out in November.)\n \"attrs>=19.1.0\",\n \"netaddr>=0.7.18\",\n \"Jinja2>=2.9\",\n \"bleach>=1.4.3\",\n \"typing-extensions>=3.7.4\",\n]\n\nCONDITIONAL_REQUIREMENTS = {\n \"matrix-synapse-ldap3\": [\"matrix-synapse-ldap3>=0.1\"],\n # we use execute_batch, which arrived in psycopg 2.7.\n \"postgres\": [\"psycopg2>=2.7\"],\n # ACME support is required to provision TLS certificates from authorities\n # that use the protocol, such as Let's Encrypt.\n \"acme\": [\n \"txacme>=0.9.2\",\n # txacme depends on eliot. Eliot 1.8.0 is incompatible with\n # python 3.5.2, as per https://github.com/itamarst/eliot/issues/418\n 'eliot<1.8.0;python_version<\"3.5.3\"',\n ],\n \"saml2\": [\"pysaml2>=4.5.0\"],\n \"oidc\": [\"authlib>=0.14.0\"],\n \"systemd\": [\"systemd-python>=231\"],\n \"url_preview\": [\"lxml>=3.5.0\"],\n \"sentry\": [\"sentry-sdk>=0.7.2\"],\n \"opentracing\": [\"jaeger-client>=4.0.0\", \"opentracing>=2.2.0\"],\n \"jwt\": [\"pyjwt>=1.6.4\"],\n # hiredis is not a *strict* dependency, but it makes things much faster.\n # (if it is not installed, we fall back to slow code.)\n \"redis\": [\"txredisapi>=1.4.7\", \"hiredis\"],\n}\n\nALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str]\n\nfor name, optional_deps in CONDITIONAL_REQUIREMENTS.items():\n # Exclude systemd as it's a system-based requirement.\n # Exclude lint as it's a dev-based requirement.\n if name not in [\"systemd\"]:\n ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS\n\n\ndef list_requirements():\n return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)\n\n\nclass DependencyException(Exception):\n @property\n def message(self):\n return \"\\n\".join(\n [\n \"Missing Requirements: %s\" % (\", \".join(self.dependencies),),\n \"To install run:\",\n \" pip install --upgrade --force %s\" % (\" \".join(self.dependencies),),\n \"\",\n ]\n )\n\n @property\n def dependencies(self):\n for i in self.args[0]:\n yield \"'\" + i + \"'\"\n\n\ndef check_requirements(for_feature=None):\n deps_needed = []\n errors = []\n\n if for_feature:\n reqs = CONDITIONAL_REQUIREMENTS[for_feature]\n else:\n reqs = REQUIREMENTS\n\n for dependency in reqs:\n try:\n _check_requirement(dependency)\n except VersionConflict as e:\n deps_needed.append(dependency)\n errors.append(\n \"Needed %s, got %s==%s\"\n % (\n dependency,\n e.dist.project_name, # type: ignore[attr-defined] # noqa\n e.dist.version, # type: ignore[attr-defined] # noqa\n )\n )\n except DistributionNotFound:\n deps_needed.append(dependency)\n if for_feature:\n errors.append(\n \"Needed %s for the '%s' feature but it was not installed\"\n % (dependency, for_feature)\n )\n else:\n errors.append(\"Needed %s but it was not installed\" % (dependency,))\n\n if not for_feature:\n # Check the optional dependencies are up to date. We allow them to not be\n # installed.\n OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), []) # type: List[str]\n\n for dependency in OPTS:\n try:\n _check_requirement(dependency)\n except VersionConflict as e:\n deps_needed.append(dependency)\n errors.append(\n \"Needed optional %s, got %s==%s\"\n % (\n dependency,\n e.dist.project_name, # type: ignore[attr-defined] # noqa\n e.dist.version, # type: ignore[attr-defined] # noqa\n )\n )\n except DistributionNotFound:\n # If it's not found, we don't care\n pass\n\n if deps_needed:\n for err in errors:\n logging.error(err)\n\n raise DependencyException(deps_needed)\n\n\ndef _check_requirement(dependency_string):\n \"\"\"Parses a dependency string, and checks if the specified requirement is installed\n\n Raises:\n VersionConflict if the requirement is installed, but with the the wrong version\n DistributionNotFound if nothing is found to provide the requirement\n \"\"\"\n req = Requirement.parse(dependency_string)\n\n # first check if the markers specify that this requirement needs installing\n if req.marker is not None and not req.marker.evaluate():\n # not required for this environment\n return\n\n get_provider(req)\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.stdout.writelines(req + \"\\n\" for req in list_requirements())\n", "path": "synapse/python_dependencies.py"}], "after_files": [{"content": "# Copyright 2015, 2016 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2018 New Vector Ltd\n# Copyright 2020 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import List, Set\n\nfrom pkg_resources import (\n DistributionNotFound,\n Requirement,\n VersionConflict,\n get_provider,\n)\n\nlogger = logging.getLogger(__name__)\n\n\n# REQUIREMENTS is a simple list of requirement specifiers[1], and must be\n# installed. It is passed to setup() as install_requires in setup.py.\n#\n# CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict\n# of lists. The dict key is the optional dependency name and can be passed to\n# pip when installing. The list is a series of requirement specifiers[1] to be\n# installed when that optional dependency requirement is specified. It is passed\n# to setup() as extras_require in setup.py\n#\n# Note that these both represent runtime dependencies (and the versions\n# installed are checked at runtime).\n#\n# Also note that we replicate these constraints in the Synapse Dockerfile while\n# pre-installing dependencies. If these constraints are updated here, the same\n# change should be made in the Dockerfile.\n#\n# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.\n\nREQUIREMENTS = [\n \"jsonschema>=2.5.1\",\n \"frozendict>=1\",\n \"unpaddedbase64>=1.1.0\",\n \"canonicaljson>=1.4.0\",\n # we use the type definitions added in signedjson 1.1.\n \"signedjson>=1.1.0\",\n \"pynacl>=1.2.1\",\n \"idna>=2.5\",\n # validating SSL certs for IP addresses requires service_identity 18.1.\n \"service_identity>=18.1.0\",\n # Twisted 18.9 introduces some logger improvements that the structured\n # logger utilises\n \"Twisted>=18.9.0\",\n \"treq>=15.1\",\n # Twisted has required pyopenssl 16.0 since about Twisted 16.6.\n \"pyopenssl>=16.0.0\",\n \"pyyaml>=3.11\",\n \"pyasn1>=0.1.9\",\n \"pyasn1-modules>=0.0.7\",\n \"bcrypt>=3.1.0\",\n \"pillow>=4.3.0\",\n \"sortedcontainers>=1.4.4\",\n \"pymacaroons>=0.13.0\",\n \"msgpack>=0.5.2\",\n \"phonenumbers>=8.2.0\",\n # we use GaugeHistogramMetric, which was added in prom-client 0.4.0.\n \"prometheus_client>=0.4.0\",\n # we use attr.validators.deep_iterable, which arrived in 19.1.0 (Note:\n # Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33\n # is out in November.)\n \"attrs>=19.1.0\",\n \"netaddr>=0.7.18\",\n \"Jinja2>=2.9\",\n \"bleach>=1.4.3\",\n \"typing-extensions>=3.7.4\",\n]\n\nCONDITIONAL_REQUIREMENTS = {\n \"matrix-synapse-ldap3\": [\"matrix-synapse-ldap3>=0.1\"],\n # we use execute_batch, which arrived in psycopg 2.7.\n \"postgres\": [\"psycopg2>=2.7\"],\n # ACME support is required to provision TLS certificates from authorities\n # that use the protocol, such as Let's Encrypt.\n \"acme\": [\n \"txacme>=0.9.2\",\n # txacme depends on eliot. Eliot 1.8.0 is incompatible with\n # python 3.5.2, as per https://github.com/itamarst/eliot/issues/418\n 'eliot<1.8.0;python_version<\"3.5.3\"',\n ],\n \"saml2\": [\"pysaml2>=4.5.0\"],\n \"oidc\": [\"authlib>=0.14.0\"],\n \"systemd\": [\"systemd-python>=231\"],\n \"url_preview\": [\"lxml>=3.5.0\"],\n \"sentry\": [\"sentry-sdk>=0.7.2\"],\n \"opentracing\": [\"jaeger-client>=4.0.0\", \"opentracing>=2.2.0\"],\n \"jwt\": [\"pyjwt>=1.6.4\"],\n # hiredis is not a *strict* dependency, but it makes things much faster.\n # (if it is not installed, we fall back to slow code.)\n \"redis\": [\"txredisapi>=1.4.7\", \"hiredis\"],\n}\n\nALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str]\n\nfor name, optional_deps in CONDITIONAL_REQUIREMENTS.items():\n # Exclude systemd as it's a system-based requirement.\n # Exclude lint as it's a dev-based requirement.\n if name not in [\"systemd\"]:\n ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS\n\n\ndef list_requirements():\n return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)\n\n\nclass DependencyException(Exception):\n @property\n def message(self):\n return \"\\n\".join(\n [\n \"Missing Requirements: %s\" % (\", \".join(self.dependencies),),\n \"To install run:\",\n \" pip install --upgrade --force %s\" % (\" \".join(self.dependencies),),\n \"\",\n ]\n )\n\n @property\n def dependencies(self):\n for i in self.args[0]:\n yield \"'\" + i + \"'\"\n\n\ndef check_requirements(for_feature=None):\n deps_needed = []\n errors = []\n\n if for_feature:\n reqs = CONDITIONAL_REQUIREMENTS[for_feature]\n else:\n reqs = REQUIREMENTS\n\n for dependency in reqs:\n try:\n _check_requirement(dependency)\n except VersionConflict as e:\n deps_needed.append(dependency)\n errors.append(\n \"Needed %s, got %s==%s\"\n % (\n dependency,\n e.dist.project_name, # type: ignore[attr-defined] # noqa\n e.dist.version, # type: ignore[attr-defined] # noqa\n )\n )\n except DistributionNotFound:\n deps_needed.append(dependency)\n if for_feature:\n errors.append(\n \"Needed %s for the '%s' feature but it was not installed\"\n % (dependency, for_feature)\n )\n else:\n errors.append(\"Needed %s but it was not installed\" % (dependency,))\n\n if not for_feature:\n # Check the optional dependencies are up to date. We allow them to not be\n # installed.\n OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), []) # type: List[str]\n\n for dependency in OPTS:\n try:\n _check_requirement(dependency)\n except VersionConflict as e:\n deps_needed.append(dependency)\n errors.append(\n \"Needed optional %s, got %s==%s\"\n % (\n dependency,\n e.dist.project_name, # type: ignore[attr-defined] # noqa\n e.dist.version, # type: ignore[attr-defined] # noqa\n )\n )\n except DistributionNotFound:\n # If it's not found, we don't care\n pass\n\n if deps_needed:\n for err in errors:\n logging.error(err)\n\n raise DependencyException(deps_needed)\n\n\ndef _check_requirement(dependency_string):\n \"\"\"Parses a dependency string, and checks if the specified requirement is installed\n\n Raises:\n VersionConflict if the requirement is installed, but with the the wrong version\n DistributionNotFound if nothing is found to provide the requirement\n \"\"\"\n req = Requirement.parse(dependency_string)\n\n # first check if the markers specify that this requirement needs installing\n if req.marker is not None and not req.marker.evaluate():\n # not required for this environment\n return\n\n get_provider(req)\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.stdout.writelines(req + \"\\n\" for req in list_requirements())\n", "path": "synapse/python_dependencies.py"}]} | 2,931 | 385 |
gh_patches_debug_19486 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pytorch<1.8 not working despite requirements.txt
## 🐛 Bug
In `requirements.txt` you depend on torch>=1.3.1 but while trying to use torchmetrics with torch1.4 I encountered an error in https://github.com/PyTorchLightning/metrics/blob/master/torchmetrics/utilities/data.py#L242 because `are_deterministic_algorithms_enabled` function is only available for torch>=1.8
### To Reproduce
Install torchmetrics and torch<1.8 and run the provided script.
#### Code sample
```python
import torch
from torchmetrics.functional import confusion_matrix
x = torch.tensor([[1, 2, 3], [1, 2, 3]]).cuda()
y = torch.tensor([[1, 2, 3], [1, 2, 3]]).cuda()
confusion_matrix(x, y, num_classes=4)
```
### Expected behavior
Should run.
### Environment
- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): pip install torchmetrics~=0.7.2
- Python & PyTorch Version (e.g., 1.0): python3.7 and pytorch1.4
- Any other relevant information such as OS (e.g., Linux): Ubuntu 20.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchmetrics/utilities/data.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Union
15
16 import torch
17 from torch import Tensor, tensor
18
19 METRIC_EPS = 1e-6
20
21
22 def dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor:
23 """Concatenation along the zero dimension."""
24 x = x if isinstance(x, (list, tuple)) else [x]
25 x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x]
26 if not x: # empty list
27 raise ValueError("No samples to concatenate")
28 return torch.cat(x, dim=0)
29
30
31 def dim_zero_sum(x: Tensor) -> Tensor:
32 """Summation along the zero dimension."""
33 return torch.sum(x, dim=0)
34
35
36 def dim_zero_mean(x: Tensor) -> Tensor:
37 """Average along the zero dimension."""
38 return torch.mean(x, dim=0)
39
40
41 def dim_zero_max(x: Tensor) -> Tensor:
42 """Max along the zero dimension."""
43 return torch.max(x, dim=0).values
44
45
46 def dim_zero_min(x: Tensor) -> Tensor:
47 """Min along the zero dimension."""
48 return torch.min(x, dim=0).values
49
50
51 def _flatten(x: Sequence) -> list:
52 """Flatten list of list into single list."""
53 return [item for sublist in x for item in sublist]
54
55
56 def _flatten_dict(x: Dict) -> Dict:
57 """Flatten dict of dicts into single dict."""
58 new_dict = {}
59 for key, value in x.items():
60 if isinstance(value, dict):
61 for k, v in value.items():
62 new_dict[k] = v
63 else:
64 new_dict[key] = value
65 return new_dict
66
67
68 def to_onehot(
69 label_tensor: Tensor,
70 num_classes: Optional[int] = None,
71 ) -> Tensor:
72 """Converts a dense label tensor to one-hot format.
73
74 Args:
75 label_tensor: dense label tensor, with shape [N, d1, d2, ...]
76 num_classes: number of classes C
77
78 Returns:
79 A sparse label tensor with shape [N, C, d1, d2, ...]
80
81 Example:
82 >>> x = torch.tensor([1, 2, 3])
83 >>> to_onehot(x)
84 tensor([[0, 1, 0, 0],
85 [0, 0, 1, 0],
86 [0, 0, 0, 1]])
87 """
88 if num_classes is None:
89 num_classes = int(label_tensor.max().detach().item() + 1)
90
91 tensor_onehot = torch.zeros(
92 label_tensor.shape[0],
93 num_classes,
94 *label_tensor.shape[1:],
95 dtype=label_tensor.dtype,
96 device=label_tensor.device,
97 )
98 index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot)
99 return tensor_onehot.scatter_(1, index, 1.0)
100
101
102 def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:
103 """Convert a probability tensor to binary by selecting top-k the highest entries.
104
105 Args:
106 prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the
107 position defined by the ``dim`` argument
108 topk: number of the highest entries to turn into 1s
109 dim: dimension on which to compare entries
110
111 Returns:
112 A binary tensor of the same shape as the input tensor of type ``torch.int32``
113
114 Example:
115 >>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])
116 >>> select_topk(x, topk=2)
117 tensor([[0, 1, 1],
118 [1, 1, 0]], dtype=torch.int32)
119 """
120 zeros = torch.zeros_like(prob_tensor)
121 if topk == 1: # argmax has better performance than topk
122 topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)
123 else:
124 topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)
125 return topk_tensor.int()
126
127
128 def to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor:
129 """Converts a tensor of probabilities to a dense label tensor.
130
131 Args:
132 x: probabilities to get the categorical label [N, d1, d2, ...]
133 argmax_dim: dimension to apply
134
135 Return:
136 A tensor with categorical labels [N, d2, ...]
137
138 Example:
139 >>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]])
140 >>> to_categorical(x)
141 tensor([1, 0])
142 """
143 return torch.argmax(x, dim=argmax_dim)
144
145
146 def apply_to_collection(
147 data: Any,
148 dtype: Union[type, tuple],
149 function: Callable,
150 *args: Any,
151 wrong_dtype: Optional[Union[type, tuple]] = None,
152 **kwargs: Any,
153 ) -> Any:
154 """Recursively applies a function to all elements of a certain dtype.
155
156 Args:
157 data: the collection to apply the function to
158 dtype: the given function will be applied to all elements of this dtype
159 function: the function to apply
160 *args: positional arguments (will be forwarded to call of ``function``)
161 wrong_dtype: the given function won't be applied if this type is specified and the given collections is of
162 the :attr:`wrong_type` even if it is of type :attr`dtype`
163 **kwargs: keyword arguments (will be forwarded to call of ``function``)
164
165 Returns:
166 the resulting collection
167
168 Example:
169 >>> apply_to_collection(torch.tensor([8, 0, 2, 6, 7]), dtype=Tensor, function=lambda x: x ** 2)
170 tensor([64, 0, 4, 36, 49])
171 >>> apply_to_collection([8, 0, 2, 6, 7], dtype=int, function=lambda x: x ** 2)
172 [64, 0, 4, 36, 49]
173 >>> apply_to_collection(dict(abc=123), dtype=int, function=lambda x: x ** 2)
174 {'abc': 15129}
175 """
176 elem_type = type(data)
177
178 # Breaking condition
179 if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):
180 return function(data, *args, **kwargs)
181
182 # Recursively apply to collection items
183 if isinstance(data, Mapping):
184 return elem_type({k: apply_to_collection(v, dtype, function, *args, **kwargs) for k, v in data.items()})
185
186 if isinstance(data, tuple) and hasattr(data, "_fields"): # named tuple
187 return elem_type(*(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data))
188
189 if isinstance(data, Sequence) and not isinstance(data, str):
190 return elem_type([apply_to_collection(d, dtype, function, *args, **kwargs) for d in data])
191
192 # data is neither of dtype, nor a collection
193 return data
194
195
196 def get_group_indexes(indexes: Tensor) -> List[Tensor]:
197 """Given an integer ``indexes``, return indexes for each different value in ``indexes``.
198
199 Args:
200 indexes:
201
202 Return:
203 A list of integer ``torch.Tensor``s
204
205 Example:
206 >>> indexes = torch.tensor([0, 0, 0, 1, 1, 1, 1])
207 >>> get_group_indexes(indexes)
208 [tensor([0, 1, 2]), tensor([3, 4, 5, 6])]
209 """
210
211 res: dict = {}
212 for i, _id in enumerate(indexes):
213 _id = _id.item()
214 if _id in res:
215 res[_id] += [i]
216 else:
217 res[_id] = [i]
218
219 return [tensor(x, dtype=torch.long) for x in res.values()]
220
221
222 def _squeeze_scalar_element_tensor(x: Tensor) -> Tensor:
223 return x.squeeze() if x.numel() == 1 else x
224
225
226 def _squeeze_if_scalar(data: Any) -> Any:
227 return apply_to_collection(data, Tensor, _squeeze_scalar_element_tensor)
228
229
230 def _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:
231 """``torch.bincount`` currently does not support deterministic mode on GPU.
232
233 This implementation fallback to a for-loop counting occurrences in that case.
234
235 Args:
236 x: tensor to count
237 minlength: minimum length to count
238
239 Returns:
240 Number of occurrences for each unique element in x
241 """
242 if x.is_cuda and torch.are_deterministic_algorithms_enabled():
243 if minlength is None:
244 minlength = len(torch.unique(x))
245 output = torch.zeros(minlength, device=x.device, dtype=torch.long)
246 for i in range(minlength):
247 output[i] = (x == i).sum()
248 return output
249 else:
250 return torch.bincount(x, minlength=minlength)
251
252
253 def allclose(tensor1: Tensor, tensor2: Tensor) -> bool:
254 """Wrapper of torch.allclose that is robust towards dtype difference."""
255 if tensor1.dtype != tensor2.dtype:
256 tensor2 = tensor2.to(dtype=tensor1.dtype)
257 return torch.allclose(tensor1, tensor2)
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchmetrics/utilities/data.py b/torchmetrics/utilities/data.py
--- a/torchmetrics/utilities/data.py
+++ b/torchmetrics/utilities/data.py
@@ -16,6 +16,20 @@
import torch
from torch import Tensor, tensor
+from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6, _TORCH_GREATER_EQUAL_1_7, _TORCH_GREATER_EQUAL_1_8
+
+if _TORCH_GREATER_EQUAL_1_8:
+ deterministic = torch.are_deterministic_algorithms_enabled
+elif _TORCH_GREATER_EQUAL_1_7:
+ deterministic = torch.is_deterministic
+elif _TORCH_GREATER_EQUAL_1_6:
+ deterministic = torch._is_deterministic
+else:
+
+ def deterministic() -> bool:
+ return True
+
+
METRIC_EPS = 1e-6
@@ -239,7 +253,7 @@
Returns:
Number of occurrences for each unique element in x
"""
- if x.is_cuda and torch.are_deterministic_algorithms_enabled():
+ if x.is_cuda and deterministic():
if minlength is None:
minlength = len(torch.unique(x))
output = torch.zeros(minlength, device=x.device, dtype=torch.long)
| {"golden_diff": "diff --git a/torchmetrics/utilities/data.py b/torchmetrics/utilities/data.py\n--- a/torchmetrics/utilities/data.py\n+++ b/torchmetrics/utilities/data.py\n@@ -16,6 +16,20 @@\n import torch\n from torch import Tensor, tensor\n \n+from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6, _TORCH_GREATER_EQUAL_1_7, _TORCH_GREATER_EQUAL_1_8\n+\n+if _TORCH_GREATER_EQUAL_1_8:\n+ deterministic = torch.are_deterministic_algorithms_enabled\n+elif _TORCH_GREATER_EQUAL_1_7:\n+ deterministic = torch.is_deterministic\n+elif _TORCH_GREATER_EQUAL_1_6:\n+ deterministic = torch._is_deterministic\n+else:\n+\n+ def deterministic() -> bool:\n+ return True\n+\n+\n METRIC_EPS = 1e-6\n \n \n@@ -239,7 +253,7 @@\n Returns:\n Number of occurrences for each unique element in x\n \"\"\"\n- if x.is_cuda and torch.are_deterministic_algorithms_enabled():\n+ if x.is_cuda and deterministic():\n if minlength is None:\n minlength = len(torch.unique(x))\n output = torch.zeros(minlength, device=x.device, dtype=torch.long)\n", "issue": "Pytorch<1.8 not working despite requirements.txt\n## \ud83d\udc1b Bug\r\n\r\nIn `requirements.txt` you depend on torch>=1.3.1 but while trying to use torchmetrics with torch1.4 I encountered an error in https://github.com/PyTorchLightning/metrics/blob/master/torchmetrics/utilities/data.py#L242 because `are_deterministic_algorithms_enabled` function is only available for torch>=1.8\r\n\r\n### To Reproduce\r\n\r\nInstall torchmetrics and torch<1.8 and run the provided script.\r\n\r\n\r\n\r\n#### Code sample\r\n\r\n```python\r\nimport torch\r\nfrom torchmetrics.functional import confusion_matrix\r\nx = torch.tensor([[1, 2, 3], [1, 2, 3]]).cuda()\r\ny = torch.tensor([[1, 2, 3], [1, 2, 3]]).cuda()\r\nconfusion_matrix(x, y, num_classes=4)\r\n```\r\n\r\n### Expected behavior\r\n\r\nShould run.\r\n\r\n### Environment\r\n\r\n- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): pip install torchmetrics~=0.7.2 \r\n- Python & PyTorch Version (e.g., 1.0): python3.7 and pytorch1.4\r\n- Any other relevant information such as OS (e.g., Linux): Ubuntu 20.04\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Union\n\nimport torch\nfrom torch import Tensor, tensor\n\nMETRIC_EPS = 1e-6\n\n\ndef dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor:\n \"\"\"Concatenation along the zero dimension.\"\"\"\n x = x if isinstance(x, (list, tuple)) else [x]\n x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x]\n if not x: # empty list\n raise ValueError(\"No samples to concatenate\")\n return torch.cat(x, dim=0)\n\n\ndef dim_zero_sum(x: Tensor) -> Tensor:\n \"\"\"Summation along the zero dimension.\"\"\"\n return torch.sum(x, dim=0)\n\n\ndef dim_zero_mean(x: Tensor) -> Tensor:\n \"\"\"Average along the zero dimension.\"\"\"\n return torch.mean(x, dim=0)\n\n\ndef dim_zero_max(x: Tensor) -> Tensor:\n \"\"\"Max along the zero dimension.\"\"\"\n return torch.max(x, dim=0).values\n\n\ndef dim_zero_min(x: Tensor) -> Tensor:\n \"\"\"Min along the zero dimension.\"\"\"\n return torch.min(x, dim=0).values\n\n\ndef _flatten(x: Sequence) -> list:\n \"\"\"Flatten list of list into single list.\"\"\"\n return [item for sublist in x for item in sublist]\n\n\ndef _flatten_dict(x: Dict) -> Dict:\n \"\"\"Flatten dict of dicts into single dict.\"\"\"\n new_dict = {}\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n new_dict[k] = v\n else:\n new_dict[key] = value\n return new_dict\n\n\ndef to_onehot(\n label_tensor: Tensor,\n num_classes: Optional[int] = None,\n) -> Tensor:\n \"\"\"Converts a dense label tensor to one-hot format.\n\n Args:\n label_tensor: dense label tensor, with shape [N, d1, d2, ...]\n num_classes: number of classes C\n\n Returns:\n A sparse label tensor with shape [N, C, d1, d2, ...]\n\n Example:\n >>> x = torch.tensor([1, 2, 3])\n >>> to_onehot(x)\n tensor([[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n \"\"\"\n if num_classes is None:\n num_classes = int(label_tensor.max().detach().item() + 1)\n\n tensor_onehot = torch.zeros(\n label_tensor.shape[0],\n num_classes,\n *label_tensor.shape[1:],\n dtype=label_tensor.dtype,\n device=label_tensor.device,\n )\n index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot)\n return tensor_onehot.scatter_(1, index, 1.0)\n\n\ndef select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:\n \"\"\"Convert a probability tensor to binary by selecting top-k the highest entries.\n\n Args:\n prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the\n position defined by the ``dim`` argument\n topk: number of the highest entries to turn into 1s\n dim: dimension on which to compare entries\n\n Returns:\n A binary tensor of the same shape as the input tensor of type ``torch.int32``\n\n Example:\n >>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])\n >>> select_topk(x, topk=2)\n tensor([[0, 1, 1],\n [1, 1, 0]], dtype=torch.int32)\n \"\"\"\n zeros = torch.zeros_like(prob_tensor)\n if topk == 1: # argmax has better performance than topk\n topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)\n else:\n topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)\n return topk_tensor.int()\n\n\ndef to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor:\n \"\"\"Converts a tensor of probabilities to a dense label tensor.\n\n Args:\n x: probabilities to get the categorical label [N, d1, d2, ...]\n argmax_dim: dimension to apply\n\n Return:\n A tensor with categorical labels [N, d2, ...]\n\n Example:\n >>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]])\n >>> to_categorical(x)\n tensor([1, 0])\n \"\"\"\n return torch.argmax(x, dim=argmax_dim)\n\n\ndef apply_to_collection(\n data: Any,\n dtype: Union[type, tuple],\n function: Callable,\n *args: Any,\n wrong_dtype: Optional[Union[type, tuple]] = None,\n **kwargs: Any,\n) -> Any:\n \"\"\"Recursively applies a function to all elements of a certain dtype.\n\n Args:\n data: the collection to apply the function to\n dtype: the given function will be applied to all elements of this dtype\n function: the function to apply\n *args: positional arguments (will be forwarded to call of ``function``)\n wrong_dtype: the given function won't be applied if this type is specified and the given collections is of\n the :attr:`wrong_type` even if it is of type :attr`dtype`\n **kwargs: keyword arguments (will be forwarded to call of ``function``)\n\n Returns:\n the resulting collection\n\n Example:\n >>> apply_to_collection(torch.tensor([8, 0, 2, 6, 7]), dtype=Tensor, function=lambda x: x ** 2)\n tensor([64, 0, 4, 36, 49])\n >>> apply_to_collection([8, 0, 2, 6, 7], dtype=int, function=lambda x: x ** 2)\n [64, 0, 4, 36, 49]\n >>> apply_to_collection(dict(abc=123), dtype=int, function=lambda x: x ** 2)\n {'abc': 15129}\n \"\"\"\n elem_type = type(data)\n\n # Breaking condition\n if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):\n return function(data, *args, **kwargs)\n\n # Recursively apply to collection items\n if isinstance(data, Mapping):\n return elem_type({k: apply_to_collection(v, dtype, function, *args, **kwargs) for k, v in data.items()})\n\n if isinstance(data, tuple) and hasattr(data, \"_fields\"): # named tuple\n return elem_type(*(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data))\n\n if isinstance(data, Sequence) and not isinstance(data, str):\n return elem_type([apply_to_collection(d, dtype, function, *args, **kwargs) for d in data])\n\n # data is neither of dtype, nor a collection\n return data\n\n\ndef get_group_indexes(indexes: Tensor) -> List[Tensor]:\n \"\"\"Given an integer ``indexes``, return indexes for each different value in ``indexes``.\n\n Args:\n indexes:\n\n Return:\n A list of integer ``torch.Tensor``s\n\n Example:\n >>> indexes = torch.tensor([0, 0, 0, 1, 1, 1, 1])\n >>> get_group_indexes(indexes)\n [tensor([0, 1, 2]), tensor([3, 4, 5, 6])]\n \"\"\"\n\n res: dict = {}\n for i, _id in enumerate(indexes):\n _id = _id.item()\n if _id in res:\n res[_id] += [i]\n else:\n res[_id] = [i]\n\n return [tensor(x, dtype=torch.long) for x in res.values()]\n\n\ndef _squeeze_scalar_element_tensor(x: Tensor) -> Tensor:\n return x.squeeze() if x.numel() == 1 else x\n\n\ndef _squeeze_if_scalar(data: Any) -> Any:\n return apply_to_collection(data, Tensor, _squeeze_scalar_element_tensor)\n\n\ndef _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:\n \"\"\"``torch.bincount`` currently does not support deterministic mode on GPU.\n\n This implementation fallback to a for-loop counting occurrences in that case.\n\n Args:\n x: tensor to count\n minlength: minimum length to count\n\n Returns:\n Number of occurrences for each unique element in x\n \"\"\"\n if x.is_cuda and torch.are_deterministic_algorithms_enabled():\n if minlength is None:\n minlength = len(torch.unique(x))\n output = torch.zeros(minlength, device=x.device, dtype=torch.long)\n for i in range(minlength):\n output[i] = (x == i).sum()\n return output\n else:\n return torch.bincount(x, minlength=minlength)\n\n\ndef allclose(tensor1: Tensor, tensor2: Tensor) -> bool:\n \"\"\"Wrapper of torch.allclose that is robust towards dtype difference.\"\"\"\n if tensor1.dtype != tensor2.dtype:\n tensor2 = tensor2.to(dtype=tensor1.dtype)\n return torch.allclose(tensor1, tensor2)\n", "path": "torchmetrics/utilities/data.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Union\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_6, _TORCH_GREATER_EQUAL_1_7, _TORCH_GREATER_EQUAL_1_8\n\nif _TORCH_GREATER_EQUAL_1_8:\n deterministic = torch.are_deterministic_algorithms_enabled\nelif _TORCH_GREATER_EQUAL_1_7:\n deterministic = torch.is_deterministic\nelif _TORCH_GREATER_EQUAL_1_6:\n deterministic = torch._is_deterministic\nelse:\n\n def deterministic() -> bool:\n return True\n\n\nMETRIC_EPS = 1e-6\n\n\ndef dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor:\n \"\"\"Concatenation along the zero dimension.\"\"\"\n x = x if isinstance(x, (list, tuple)) else [x]\n x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x]\n if not x: # empty list\n raise ValueError(\"No samples to concatenate\")\n return torch.cat(x, dim=0)\n\n\ndef dim_zero_sum(x: Tensor) -> Tensor:\n \"\"\"Summation along the zero dimension.\"\"\"\n return torch.sum(x, dim=0)\n\n\ndef dim_zero_mean(x: Tensor) -> Tensor:\n \"\"\"Average along the zero dimension.\"\"\"\n return torch.mean(x, dim=0)\n\n\ndef dim_zero_max(x: Tensor) -> Tensor:\n \"\"\"Max along the zero dimension.\"\"\"\n return torch.max(x, dim=0).values\n\n\ndef dim_zero_min(x: Tensor) -> Tensor:\n \"\"\"Min along the zero dimension.\"\"\"\n return torch.min(x, dim=0).values\n\n\ndef _flatten(x: Sequence) -> list:\n \"\"\"Flatten list of list into single list.\"\"\"\n return [item for sublist in x for item in sublist]\n\n\ndef _flatten_dict(x: Dict) -> Dict:\n \"\"\"Flatten dict of dicts into single dict.\"\"\"\n new_dict = {}\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n new_dict[k] = v\n else:\n new_dict[key] = value\n return new_dict\n\n\ndef to_onehot(\n label_tensor: Tensor,\n num_classes: Optional[int] = None,\n) -> Tensor:\n \"\"\"Converts a dense label tensor to one-hot format.\n\n Args:\n label_tensor: dense label tensor, with shape [N, d1, d2, ...]\n num_classes: number of classes C\n\n Returns:\n A sparse label tensor with shape [N, C, d1, d2, ...]\n\n Example:\n >>> x = torch.tensor([1, 2, 3])\n >>> to_onehot(x)\n tensor([[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n \"\"\"\n if num_classes is None:\n num_classes = int(label_tensor.max().detach().item() + 1)\n\n tensor_onehot = torch.zeros(\n label_tensor.shape[0],\n num_classes,\n *label_tensor.shape[1:],\n dtype=label_tensor.dtype,\n device=label_tensor.device,\n )\n index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot)\n return tensor_onehot.scatter_(1, index, 1.0)\n\n\ndef select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:\n \"\"\"Convert a probability tensor to binary by selecting top-k the highest entries.\n\n Args:\n prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the\n position defined by the ``dim`` argument\n topk: number of the highest entries to turn into 1s\n dim: dimension on which to compare entries\n\n Returns:\n A binary tensor of the same shape as the input tensor of type ``torch.int32``\n\n Example:\n >>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])\n >>> select_topk(x, topk=2)\n tensor([[0, 1, 1],\n [1, 1, 0]], dtype=torch.int32)\n \"\"\"\n zeros = torch.zeros_like(prob_tensor)\n if topk == 1: # argmax has better performance than topk\n topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)\n else:\n topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)\n return topk_tensor.int()\n\n\ndef to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor:\n \"\"\"Converts a tensor of probabilities to a dense label tensor.\n\n Args:\n x: probabilities to get the categorical label [N, d1, d2, ...]\n argmax_dim: dimension to apply\n\n Return:\n A tensor with categorical labels [N, d2, ...]\n\n Example:\n >>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]])\n >>> to_categorical(x)\n tensor([1, 0])\n \"\"\"\n return torch.argmax(x, dim=argmax_dim)\n\n\ndef apply_to_collection(\n data: Any,\n dtype: Union[type, tuple],\n function: Callable,\n *args: Any,\n wrong_dtype: Optional[Union[type, tuple]] = None,\n **kwargs: Any,\n) -> Any:\n \"\"\"Recursively applies a function to all elements of a certain dtype.\n\n Args:\n data: the collection to apply the function to\n dtype: the given function will be applied to all elements of this dtype\n function: the function to apply\n *args: positional arguments (will be forwarded to call of ``function``)\n wrong_dtype: the given function won't be applied if this type is specified and the given collections is of\n the :attr:`wrong_type` even if it is of type :attr`dtype`\n **kwargs: keyword arguments (will be forwarded to call of ``function``)\n\n Returns:\n the resulting collection\n\n Example:\n >>> apply_to_collection(torch.tensor([8, 0, 2, 6, 7]), dtype=Tensor, function=lambda x: x ** 2)\n tensor([64, 0, 4, 36, 49])\n >>> apply_to_collection([8, 0, 2, 6, 7], dtype=int, function=lambda x: x ** 2)\n [64, 0, 4, 36, 49]\n >>> apply_to_collection(dict(abc=123), dtype=int, function=lambda x: x ** 2)\n {'abc': 15129}\n \"\"\"\n elem_type = type(data)\n\n # Breaking condition\n if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):\n return function(data, *args, **kwargs)\n\n # Recursively apply to collection items\n if isinstance(data, Mapping):\n return elem_type({k: apply_to_collection(v, dtype, function, *args, **kwargs) for k, v in data.items()})\n\n if isinstance(data, tuple) and hasattr(data, \"_fields\"): # named tuple\n return elem_type(*(apply_to_collection(d, dtype, function, *args, **kwargs) for d in data))\n\n if isinstance(data, Sequence) and not isinstance(data, str):\n return elem_type([apply_to_collection(d, dtype, function, *args, **kwargs) for d in data])\n\n # data is neither of dtype, nor a collection\n return data\n\n\ndef get_group_indexes(indexes: Tensor) -> List[Tensor]:\n \"\"\"Given an integer ``indexes``, return indexes for each different value in ``indexes``.\n\n Args:\n indexes:\n\n Return:\n A list of integer ``torch.Tensor``s\n\n Example:\n >>> indexes = torch.tensor([0, 0, 0, 1, 1, 1, 1])\n >>> get_group_indexes(indexes)\n [tensor([0, 1, 2]), tensor([3, 4, 5, 6])]\n \"\"\"\n\n res: dict = {}\n for i, _id in enumerate(indexes):\n _id = _id.item()\n if _id in res:\n res[_id] += [i]\n else:\n res[_id] = [i]\n\n return [tensor(x, dtype=torch.long) for x in res.values()]\n\n\ndef _squeeze_scalar_element_tensor(x: Tensor) -> Tensor:\n return x.squeeze() if x.numel() == 1 else x\n\n\ndef _squeeze_if_scalar(data: Any) -> Any:\n return apply_to_collection(data, Tensor, _squeeze_scalar_element_tensor)\n\n\ndef _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:\n \"\"\"``torch.bincount`` currently does not support deterministic mode on GPU.\n\n This implementation fallback to a for-loop counting occurrences in that case.\n\n Args:\n x: tensor to count\n minlength: minimum length to count\n\n Returns:\n Number of occurrences for each unique element in x\n \"\"\"\n if x.is_cuda and deterministic():\n if minlength is None:\n minlength = len(torch.unique(x))\n output = torch.zeros(minlength, device=x.device, dtype=torch.long)\n for i in range(minlength):\n output[i] = (x == i).sum()\n return output\n else:\n return torch.bincount(x, minlength=minlength)\n\n\ndef allclose(tensor1: Tensor, tensor2: Tensor) -> bool:\n \"\"\"Wrapper of torch.allclose that is robust towards dtype difference.\"\"\"\n if tensor1.dtype != tensor2.dtype:\n tensor2 = tensor2.to(dtype=tensor1.dtype)\n return torch.allclose(tensor1, tensor2)\n", "path": "torchmetrics/utilities/data.py"}]} | 3,484 | 293 |
gh_patches_debug_8945 | rasdani/github-patches | git_diff | open-mmlab__mmaction2-624 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: FastRCNN is not registered in LOCALIZERS, RECOGNIZERS or DETECTORS
Hello
when I train AVA dataset find a error
ValueError: FastRCNN is not registered in LOCALIZERS, RECOGNIZERS or DETECTORS
how to solve it
think you very much
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmaction/models/builder.py`
Content:
```
1 import torch.nn as nn
2 from mmcv.utils import Registry, build_from_cfg
3
4 from mmaction.utils import import_module_error_func
5 from .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS
6
7 try:
8 from mmdet.models.builder import DETECTORS, build_detector
9 except (ImportError, ModuleNotFoundError):
10 # Define an empty registry and building func, so that can import
11 DETECTORS = Registry('detector')
12
13 @import_module_error_func('mmdet')
14 def build_detector(cfg, train_cfg, test_cfg):
15 pass
16
17
18 def build(cfg, registry, default_args=None):
19 """Build a module.
20
21 Args:
22 cfg (dict, list[dict]): The config of modules, it is either a dict
23 or a list of configs.
24 registry (:obj:`Registry`): A registry the module belongs to.
25 default_args (dict, optional): Default arguments to build the module.
26 Defaults to None.
27
28 Returns:
29 nn.Module: A built nn module.
30 """
31
32 if isinstance(cfg, list):
33 modules = [
34 build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
35 ]
36 return nn.Sequential(*modules)
37
38 return build_from_cfg(cfg, registry, default_args)
39
40
41 def build_backbone(cfg):
42 """Build backbone."""
43 return build(cfg, BACKBONES)
44
45
46 def build_head(cfg):
47 """Build head."""
48 return build(cfg, HEADS)
49
50
51 def build_recognizer(cfg, train_cfg=None, test_cfg=None):
52 """Build recognizer."""
53 return build(cfg, RECOGNIZERS,
54 dict(train_cfg=train_cfg, test_cfg=test_cfg))
55
56
57 def build_loss(cfg):
58 """Build loss."""
59 return build(cfg, LOSSES)
60
61
62 def build_localizer(cfg):
63 """Build localizer."""
64 return build(cfg, LOCALIZERS)
65
66
67 def build_model(cfg, train_cfg=None, test_cfg=None):
68 """Build model."""
69 args = cfg.copy()
70 obj_type = args.pop('type')
71 if obj_type in LOCALIZERS:
72 return build_localizer(cfg)
73 if obj_type in RECOGNIZERS:
74 return build_recognizer(cfg, train_cfg, test_cfg)
75 if obj_type in DETECTORS:
76 return build_detector(cfg, train_cfg, test_cfg)
77 raise ValueError(f'{obj_type} is not registered in '
78 'LOCALIZERS, RECOGNIZERS or DETECTORS')
79
80
81 def build_neck(cfg):
82 """Build neck."""
83 return build(cfg, NECKS)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmaction/models/builder.py b/mmaction/models/builder.py
--- a/mmaction/models/builder.py
+++ b/mmaction/models/builder.py
@@ -74,6 +74,10 @@
return build_recognizer(cfg, train_cfg, test_cfg)
if obj_type in DETECTORS:
return build_detector(cfg, train_cfg, test_cfg)
+ model_in_mmdet = ['FastRCNN']
+ if obj_type in model_in_mmdet:
+ raise ImportError(
+ 'Please install mmdet for spatial temporal detection tasks.')
raise ValueError(f'{obj_type} is not registered in '
'LOCALIZERS, RECOGNIZERS or DETECTORS')
| {"golden_diff": "diff --git a/mmaction/models/builder.py b/mmaction/models/builder.py\n--- a/mmaction/models/builder.py\n+++ b/mmaction/models/builder.py\n@@ -74,6 +74,10 @@\n return build_recognizer(cfg, train_cfg, test_cfg)\n if obj_type in DETECTORS:\n return build_detector(cfg, train_cfg, test_cfg)\n+ model_in_mmdet = ['FastRCNN']\n+ if obj_type in model_in_mmdet:\n+ raise ImportError(\n+ 'Please install mmdet for spatial temporal detection tasks.')\n raise ValueError(f'{obj_type} is not registered in '\n 'LOCALIZERS, RECOGNIZERS or DETECTORS')\n", "issue": "ValueError: FastRCNN is not registered in LOCALIZERS, RECOGNIZERS or DETECTORS\nHello\r\nwhen I train AVA dataset find a error\r\nValueError: FastRCNN is not registered in LOCALIZERS, RECOGNIZERS or DETECTORS\r\nhow to solve it \r\nthink you very much \n", "before_files": [{"content": "import torch.nn as nn\nfrom mmcv.utils import Registry, build_from_cfg\n\nfrom mmaction.utils import import_module_error_func\nfrom .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS\n\ntry:\n from mmdet.models.builder import DETECTORS, build_detector\nexcept (ImportError, ModuleNotFoundError):\n # Define an empty registry and building func, so that can import\n DETECTORS = Registry('detector')\n\n @import_module_error_func('mmdet')\n def build_detector(cfg, train_cfg, test_cfg):\n pass\n\n\ndef build(cfg, registry, default_args=None):\n \"\"\"Build a module.\n\n Args:\n cfg (dict, list[dict]): The config of modules, it is either a dict\n or a list of configs.\n registry (:obj:`Registry`): A registry the module belongs to.\n default_args (dict, optional): Default arguments to build the module.\n Defaults to None.\n\n Returns:\n nn.Module: A built nn module.\n \"\"\"\n\n if isinstance(cfg, list):\n modules = [\n build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg\n ]\n return nn.Sequential(*modules)\n\n return build_from_cfg(cfg, registry, default_args)\n\n\ndef build_backbone(cfg):\n \"\"\"Build backbone.\"\"\"\n return build(cfg, BACKBONES)\n\n\ndef build_head(cfg):\n \"\"\"Build head.\"\"\"\n return build(cfg, HEADS)\n\n\ndef build_recognizer(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build recognizer.\"\"\"\n return build(cfg, RECOGNIZERS,\n dict(train_cfg=train_cfg, test_cfg=test_cfg))\n\n\ndef build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return build(cfg, LOSSES)\n\n\ndef build_localizer(cfg):\n \"\"\"Build localizer.\"\"\"\n return build(cfg, LOCALIZERS)\n\n\ndef build_model(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build model.\"\"\"\n args = cfg.copy()\n obj_type = args.pop('type')\n if obj_type in LOCALIZERS:\n return build_localizer(cfg)\n if obj_type in RECOGNIZERS:\n return build_recognizer(cfg, train_cfg, test_cfg)\n if obj_type in DETECTORS:\n return build_detector(cfg, train_cfg, test_cfg)\n raise ValueError(f'{obj_type} is not registered in '\n 'LOCALIZERS, RECOGNIZERS or DETECTORS')\n\n\ndef build_neck(cfg):\n \"\"\"Build neck.\"\"\"\n return build(cfg, NECKS)\n", "path": "mmaction/models/builder.py"}], "after_files": [{"content": "import torch.nn as nn\nfrom mmcv.utils import Registry, build_from_cfg\n\nfrom mmaction.utils import import_module_error_func\nfrom .registry import BACKBONES, HEADS, LOCALIZERS, LOSSES, NECKS, RECOGNIZERS\n\ntry:\n from mmdet.models.builder import DETECTORS, build_detector\nexcept (ImportError, ModuleNotFoundError):\n # Define an empty registry and building func, so that can import\n DETECTORS = Registry('detector')\n\n @import_module_error_func('mmdet')\n def build_detector(cfg, train_cfg, test_cfg):\n pass\n\n\ndef build(cfg, registry, default_args=None):\n \"\"\"Build a module.\n\n Args:\n cfg (dict, list[dict]): The config of modules, it is either a dict\n or a list of configs.\n registry (:obj:`Registry`): A registry the module belongs to.\n default_args (dict, optional): Default arguments to build the module.\n Defaults to None.\n\n Returns:\n nn.Module: A built nn module.\n \"\"\"\n\n if isinstance(cfg, list):\n modules = [\n build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg\n ]\n return nn.Sequential(*modules)\n\n return build_from_cfg(cfg, registry, default_args)\n\n\ndef build_backbone(cfg):\n \"\"\"Build backbone.\"\"\"\n return build(cfg, BACKBONES)\n\n\ndef build_head(cfg):\n \"\"\"Build head.\"\"\"\n return build(cfg, HEADS)\n\n\ndef build_recognizer(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build recognizer.\"\"\"\n return build(cfg, RECOGNIZERS,\n dict(train_cfg=train_cfg, test_cfg=test_cfg))\n\n\ndef build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return build(cfg, LOSSES)\n\n\ndef build_localizer(cfg):\n \"\"\"Build localizer.\"\"\"\n return build(cfg, LOCALIZERS)\n\n\ndef build_model(cfg, train_cfg=None, test_cfg=None):\n \"\"\"Build model.\"\"\"\n args = cfg.copy()\n obj_type = args.pop('type')\n if obj_type in LOCALIZERS:\n return build_localizer(cfg)\n if obj_type in RECOGNIZERS:\n return build_recognizer(cfg, train_cfg, test_cfg)\n if obj_type in DETECTORS:\n return build_detector(cfg, train_cfg, test_cfg)\n model_in_mmdet = ['FastRCNN']\n if obj_type in model_in_mmdet:\n raise ImportError(\n 'Please install mmdet for spatial temporal detection tasks.')\n raise ValueError(f'{obj_type} is not registered in '\n 'LOCALIZERS, RECOGNIZERS or DETECTORS')\n\n\ndef build_neck(cfg):\n \"\"\"Build neck.\"\"\"\n return build(cfg, NECKS)\n", "path": "mmaction/models/builder.py"}]} | 1,050 | 157 |
gh_patches_debug_21034 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-3339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing package named `packaging`
### Describe the bug
#3267 introduced a new python dependency named `packaging` (https://pypi.org/project/packaging/). We need to add it to the `install_requires` list in `core/setup.py`.
### Steps To Reproduce
This issue only affects 0.20.0b1 at the moment. Install dbt v0.20.0b1 in a bare virtualenv and run a command that utilizes the missing packaging (`dbt --version`)
```bash
$ dbt --version
Traceback (most recent call last):
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/bin/dbt", line 5, in <module>
from dbt.main import main
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/main.py", line 17, in <module>
import dbt.task.deps as deps_task
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/task/deps.py", line 9, in <module>
from dbt.deps.resolver import resolve_packages
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/deps/resolver.py", line 11, in <module>
from dbt.deps.git import GitUnpinnedPackage
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/deps/git.py", line 5, in <module>
from dbt.clients import git, system
File "/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/clients/git.py", line 7, in <module>
from packaging import version
ModuleNotFoundError: No module named 'packaging'
```
### Additional context
This wasn't caught during testing since `packaging` is installed because it is an upstream dependency of our testing tools (`tox` and `pytest`). Our current testing setup is not configured to catch this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 def read(fname):
23 return open(os.path.join(os.path.dirname(__file__), fname)).read()
24
25
26 package_name = "dbt-core"
27 package_version = "0.20.0b1"
28 description = """dbt (data build tool) is a command line tool that helps \
29 analysts and engineers transform data in their warehouse more effectively"""
30
31
32 setup(
33 name=package_name,
34 version=package_version,
35 description=description,
36 long_description=description,
37 author="Fishtown Analytics",
38 author_email="[email protected]",
39 url="https://github.com/fishtown-analytics/dbt",
40 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
41 package_data={
42 'dbt': [
43 'include/index.html',
44 'include/global_project/dbt_project.yml',
45 'include/global_project/docs/*.md',
46 'include/global_project/macros/*.sql',
47 'include/global_project/macros/**/*.sql',
48 'include/global_project/macros/**/**/*.sql',
49 'py.typed',
50 ]
51 },
52 test_suite='test',
53 entry_points={
54 'console_scripts': [
55 'dbt = dbt.main:main',
56 ],
57 },
58 scripts=[
59 'scripts/dbt',
60 ],
61 install_requires=[
62 'Jinja2==2.11.2',
63 'PyYAML>=3.11',
64 'sqlparse>=0.2.3,<0.4',
65 'networkx>=2.3,<3',
66 'minimal-snowplow-tracker==0.0.2',
67 'colorama>=0.3.9,<0.4.5',
68 'agate>=1.6,<1.6.2',
69 'isodate>=0.6,<0.7',
70 'json-rpc>=1.12,<2',
71 'werkzeug>=0.15,<2.0',
72 'dataclasses>=0.6,<0.9;python_version<"3.7"',
73 'hologram==0.0.14',
74 'logbook>=1.5,<1.6',
75 'mashumaro==2.0',
76 'typing-extensions>=3.7.4,<3.8',
77 # the following are all to match snowflake-connector-python
78 'requests<3.0.0',
79 'idna>=2.5,<3',
80 'cffi>=1.9,<2.0.0',
81 ],
82 zip_safe=False,
83 classifiers=[
84 'Development Status :: 5 - Production/Stable',
85
86 'License :: OSI Approved :: Apache Software License',
87
88 'Operating System :: Microsoft :: Windows',
89 'Operating System :: MacOS :: MacOS X',
90 'Operating System :: POSIX :: Linux',
91
92 'Programming Language :: Python :: 3.6',
93 'Programming Language :: Python :: 3.7',
94 'Programming Language :: Python :: 3.8',
95 'Programming Language :: Python :: 3.9',
96 ],
97 python_requires=">=3.6.3",
98 )
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -61,19 +61,20 @@
install_requires=[
'Jinja2==2.11.2',
'PyYAML>=3.11',
- 'sqlparse>=0.2.3,<0.4',
- 'networkx>=2.3,<3',
- 'minimal-snowplow-tracker==0.0.2',
- 'colorama>=0.3.9,<0.4.5',
'agate>=1.6,<1.6.2',
- 'isodate>=0.6,<0.7',
- 'json-rpc>=1.12,<2',
- 'werkzeug>=0.15,<2.0',
+ 'colorama>=0.3.9,<0.4.5',
'dataclasses>=0.6,<0.9;python_version<"3.7"',
'hologram==0.0.14',
+ 'isodate>=0.6,<0.7',
+ 'json-rpc>=1.12,<2',
'logbook>=1.5,<1.6',
'mashumaro==2.0',
+ 'minimal-snowplow-tracker==0.0.2',
+ 'networkx>=2.3,<3',
+ 'packaging~=20.9',
+ 'sqlparse>=0.2.3,<0.4',
'typing-extensions>=3.7.4,<3.8',
+ 'werkzeug>=0.15,<2.0',
# the following are all to match snowflake-connector-python
'requests<3.0.0',
'idna>=2.5,<3',
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -61,19 +61,20 @@\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n- 'sqlparse>=0.2.3,<0.4',\n- 'networkx>=2.3,<3',\n- 'minimal-snowplow-tracker==0.0.2',\n- 'colorama>=0.3.9,<0.4.5',\n 'agate>=1.6,<1.6.2',\n- 'isodate>=0.6,<0.7',\n- 'json-rpc>=1.12,<2',\n- 'werkzeug>=0.15,<2.0',\n+ 'colorama>=0.3.9,<0.4.5',\n 'dataclasses>=0.6,<0.9;python_version<\"3.7\"',\n 'hologram==0.0.14',\n+ 'isodate>=0.6,<0.7',\n+ 'json-rpc>=1.12,<2',\n 'logbook>=1.5,<1.6',\n 'mashumaro==2.0',\n+ 'minimal-snowplow-tracker==0.0.2',\n+ 'networkx>=2.3,<3',\n+ 'packaging~=20.9',\n+ 'sqlparse>=0.2.3,<0.4',\n 'typing-extensions>=3.7.4,<3.8',\n+ 'werkzeug>=0.15,<2.0',\n # the following are all to match snowflake-connector-python\n 'requests<3.0.0',\n 'idna>=2.5,<3',\n", "issue": "Missing package named `packaging`\n### Describe the bug\r\n#3267 introduced a new python dependency named `packaging` (https://pypi.org/project/packaging/). We need to add it to the `install_requires` list in `core/setup.py`.\r\n\r\n### Steps To Reproduce\r\nThis issue only affects 0.20.0b1 at the moment. Install dbt v0.20.0b1 in a bare virtualenv and run a command that utilizes the missing packaging (`dbt --version`)\r\n\r\n```bash\r\n$ dbt --version\r\nTraceback (most recent call last):\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/bin/dbt\", line 5, in <module>\r\n from dbt.main import main\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/main.py\", line 17, in <module>\r\n import dbt.task.deps as deps_task\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/task/deps.py\", line 9, in <module>\r\n from dbt.deps.resolver import resolve_packages\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/deps/resolver.py\", line 11, in <module>\r\n from dbt.deps.git import GitUnpinnedPackage\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/deps/git.py\", line 5, in <module>\r\n from dbt.clients import git, system\r\n File \"/Users/kwigley/.virtualenvs/tempenv-0d661869220df/lib/python3.9/site-packages/dbt/clients/git.py\", line 7, in <module>\r\n from packaging import version\r\nModuleNotFoundError: No module named 'packaging'\r\n```\r\n\r\n### Additional context\r\nThis wasn't caught during testing since `packaging` is installed because it is an upstream dependency of our testing tools (`tox` and `pytest`). Our current testing setup is not configured to catch this. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.20.0b1\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.4.5',\n 'agate>=1.6,<1.6.2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<2.0',\n 'dataclasses>=0.6,<0.9;python_version<\"3.7\"',\n 'hologram==0.0.14',\n 'logbook>=1.5,<1.6',\n 'mashumaro==2.0',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests<3.0.0',\n 'idna>=2.5,<3',\n 'cffi>=1.9,<2.0.0',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.20.0b1\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'agate>=1.6,<1.6.2',\n 'colorama>=0.3.9,<0.4.5',\n 'dataclasses>=0.6,<0.9;python_version<\"3.7\"',\n 'hologram==0.0.14',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'logbook>=1.5,<1.6',\n 'mashumaro==2.0',\n 'minimal-snowplow-tracker==0.0.2',\n 'networkx>=2.3,<3',\n 'packaging~=20.9',\n 'sqlparse>=0.2.3,<0.4',\n 'typing-extensions>=3.7.4,<3.8',\n 'werkzeug>=0.15,<2.0',\n # the following are all to match snowflake-connector-python\n 'requests<3.0.0',\n 'idna>=2.5,<3',\n 'cffi>=1.9,<2.0.0',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}]} | 1,789 | 411 |
gh_patches_debug_12281 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2946 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
It is difficult to get information from request/response/detail tab, if url is very-very long
##### Steps to reproduce the problem:
1. Run mitmproxy.
2. Make a request with pathoc: `pathoc -c example.com:80 localhost:8080 'get:/SOMETHING VERY LONG'`
3. Enter the corresponding flow.
You won't be able to see request/response/detail of the flow without changing size of your window.

But if we decide to make a request with url over for example 10k length, even changing the size of window won't help you to see request/response/detail tab.
##### Any other comments? What have you tried so far?
About maximum length of a URL
https://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers
##### System information
Mitmproxy: 3.0.0.dev64 (commit 6dd336f)
Python: 3.5.2
OpenSSL: OpenSSL 1.1.0g 2 Nov 2017
Platform: Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/tools/console/flowview.py`
Content:
```
1 import math
2 import sys
3 from functools import lru_cache
4 from typing import Optional, Union # noqa
5
6 import urwid
7
8 from mitmproxy import contentviews
9 from mitmproxy import http
10 from mitmproxy.tools.console import common
11 from mitmproxy.tools.console import layoutwidget
12 from mitmproxy.tools.console import flowdetailview
13 from mitmproxy.tools.console import searchable
14 from mitmproxy.tools.console import tabs
15 import mitmproxy.tools.console.master # noqa
16 from mitmproxy.utils import strutils
17
18
19 class SearchError(Exception):
20 pass
21
22
23 class FlowViewHeader(urwid.WidgetWrap):
24
25 def __init__(
26 self,
27 master: "mitmproxy.tools.console.master.ConsoleMaster",
28 ) -> None:
29 self.master = master
30 self.focus_changed()
31
32 def focus_changed(self):
33 if self.master.view.focus.flow:
34 self._w = common.format_flow(
35 self.master.view.focus.flow,
36 False,
37 extended=True,
38 hostheader=self.master.options.showhost
39 )
40 else:
41 self._w = urwid.Pile([])
42
43
44 class FlowDetails(tabs.Tabs):
45 def __init__(self, master):
46 self.master = master
47 super().__init__([])
48 self.show()
49 self.last_displayed_body = None
50
51 def focus_changed(self):
52 if self.master.view.focus.flow:
53 self.tabs = [
54 (self.tab_request, self.view_request),
55 (self.tab_response, self.view_response),
56 (self.tab_details, self.view_details),
57 ]
58 self.show()
59 else:
60 self.master.window.pop()
61
62 @property
63 def view(self):
64 return self.master.view
65
66 @property
67 def flow(self):
68 return self.master.view.focus.flow
69
70 def tab_request(self):
71 if self.flow.intercepted and not self.flow.response:
72 return "Request intercepted"
73 else:
74 return "Request"
75
76 def tab_response(self):
77 if self.flow.intercepted and self.flow.response:
78 return "Response intercepted"
79 else:
80 return "Response"
81
82 def tab_details(self):
83 return "Detail"
84
85 def view_request(self):
86 return self.conn_text(self.flow.request)
87
88 def view_response(self):
89 return self.conn_text(self.flow.response)
90
91 def view_details(self):
92 return flowdetailview.flowdetails(self.view, self.flow)
93
94 def content_view(self, viewmode, message):
95 if message.raw_content is None:
96 msg, body = "", [urwid.Text([("error", "[content missing]")])]
97 return msg, body
98 else:
99 full = self.master.commands.call("view.getval @focus fullcontents false")
100 if full == "true":
101 limit = sys.maxsize
102 else:
103 limit = contentviews.VIEW_CUTOFF
104
105 flow_modify_cache_invalidation = hash((
106 message.raw_content,
107 message.headers.fields,
108 getattr(message, "path", None),
109 ))
110 # we need to pass the message off-band because it's not hashable
111 self._get_content_view_message = message
112 return self._get_content_view(viewmode, limit, flow_modify_cache_invalidation)
113
114 @lru_cache(maxsize=200)
115 def _get_content_view(self, viewmode, max_lines, _):
116 message = self._get_content_view_message
117 self._get_content_view_message = None
118 description, lines, error = contentviews.get_message_content_view(
119 viewmode, message
120 )
121 if error:
122 self.master.add_log(error, "debug")
123 # Give hint that you have to tab for the response.
124 if description == "No content" and isinstance(message, http.HTTPRequest):
125 description = "No request content (press tab to view response)"
126
127 # If the users has a wide terminal, he gets fewer lines; this should not be an issue.
128 chars_per_line = 80
129 max_chars = max_lines * chars_per_line
130 total_chars = 0
131 text_objects = []
132 for line in lines:
133 txt = []
134 for (style, text) in line:
135 if total_chars + len(text) > max_chars:
136 text = text[:max_chars - total_chars]
137 txt.append((style, text))
138 total_chars += len(text)
139 if total_chars == max_chars:
140 break
141
142 # round up to the next line.
143 total_chars = int(math.ceil(total_chars / chars_per_line) * chars_per_line)
144
145 text_objects.append(urwid.Text(txt))
146 if total_chars == max_chars:
147 text_objects.append(urwid.Text([
148 ("highlight", "Stopped displaying data after %d lines. Press " % max_lines),
149 ("key", "f"),
150 ("highlight", " to load all data.")
151 ]))
152 break
153
154 return description, text_objects
155
156 def conn_text(self, conn):
157 if conn:
158 hdrs = []
159 for k, v in conn.headers.fields:
160 # This will always force an ascii representation of headers. For example, if the server sends a
161 #
162 # X-Authors: Made with ❤ in Hamburg
163 #
164 # header, mitmproxy will display the following:
165 #
166 # X-Authors: Made with \xe2\x9d\xa4 in Hamburg.
167 #
168 # The alternative would be to just use the header's UTF-8 representation and maybe
169 # do `str.replace("\t", "\\t")` to exempt tabs from urwid's special characters escaping [1].
170 # That would in some terminals allow rendering UTF-8 characters, but the mapping
171 # wouldn't be bijective, i.e. a user couldn't distinguish "\\t" and "\t".
172 # Also, from a security perspective, a mitmproxy user couldn't be fooled by homoglyphs.
173 #
174 # 1) https://github.com/mitmproxy/mitmproxy/issues/1833
175 # https://github.com/urwid/urwid/blob/6608ee2c9932d264abd1171468d833b7a4082e13/urwid/display_common.py#L35-L36,
176
177 k = strutils.bytes_to_escaped_str(k) + ":"
178 v = strutils.bytes_to_escaped_str(v)
179 hdrs.append((k, v))
180 txt = common.format_keyvals(
181 hdrs,
182 key_format="header"
183 )
184 viewmode = self.master.commands.call("console.flowview.mode")
185 msg, body = self.content_view(viewmode, conn)
186
187 cols = [
188 urwid.Text(
189 [
190 ("heading", msg),
191 ]
192 ),
193 urwid.Text(
194 [
195 " ",
196 ('heading', "["),
197 ('heading_key', "m"),
198 ('heading', (":%s]" % viewmode)),
199 ],
200 align="right"
201 )
202 ]
203 title = urwid.AttrWrap(urwid.Columns(cols), "heading")
204
205 txt.append(title)
206 txt.extend(body)
207 else:
208 txt = [
209 urwid.Text(""),
210 urwid.Text(
211 [
212 ("highlight", "No response. Press "),
213 ("key", "e"),
214 ("highlight", " and edit any aspect to add one."),
215 ]
216 )
217 ]
218 return searchable.Searchable(txt)
219
220
221 class FlowView(urwid.Frame, layoutwidget.LayoutWidget):
222 keyctx = "flowview"
223 title = "Flow Details"
224
225 def __init__(self, master):
226 super().__init__(
227 FlowDetails(master),
228 header = FlowViewHeader(master),
229 )
230 self.master = master
231
232 def focus_changed(self, *args, **kwargs):
233 self.body.focus_changed()
234 self.header.focus_changed()
235
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/tools/console/flowview.py b/mitmproxy/tools/console/flowview.py
--- a/mitmproxy/tools/console/flowview.py
+++ b/mitmproxy/tools/console/flowview.py
@@ -30,12 +30,14 @@
self.focus_changed()
def focus_changed(self):
+ cols, _ = self.master.ui.get_cols_rows()
if self.master.view.focus.flow:
self._w = common.format_flow(
self.master.view.focus.flow,
False,
extended=True,
- hostheader=self.master.options.showhost
+ hostheader=self.master.options.showhost,
+ max_url_len=cols,
)
else:
self._w = urwid.Pile([])
| {"golden_diff": "diff --git a/mitmproxy/tools/console/flowview.py b/mitmproxy/tools/console/flowview.py\n--- a/mitmproxy/tools/console/flowview.py\n+++ b/mitmproxy/tools/console/flowview.py\n@@ -30,12 +30,14 @@\n self.focus_changed()\n \n def focus_changed(self):\n+ cols, _ = self.master.ui.get_cols_rows()\n if self.master.view.focus.flow:\n self._w = common.format_flow(\n self.master.view.focus.flow,\n False,\n extended=True,\n- hostheader=self.master.options.showhost\n+ hostheader=self.master.options.showhost,\n+ max_url_len=cols,\n )\n else:\n self._w = urwid.Pile([])\n", "issue": "It is difficult to get information from request/response/detail tab, if url is very-very long\n##### Steps to reproduce the problem:\r\n\r\n1. Run mitmproxy.\r\n2. Make a request with pathoc: `pathoc -c example.com:80 localhost:8080 'get:/SOMETHING VERY LONG'`\r\n3. Enter the corresponding flow.\r\nYou won't be able to see request/response/detail of the flow without changing size of your window. \r\n\r\n\r\nBut if we decide to make a request with url over for example 10k length, even changing the size of window won't help you to see request/response/detail tab.\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nAbout maximum length of a URL\r\nhttps://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers\r\n\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.0.dev64 (commit 6dd336f) \r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-4.4.0-112-generic-x86_64-with-Ubuntu-16.04-xenial\r\n\n", "before_files": [{"content": "import math\nimport sys\nfrom functools import lru_cache\nfrom typing import Optional, Union # noqa\n\nimport urwid\n\nfrom mitmproxy import contentviews\nfrom mitmproxy import http\nfrom mitmproxy.tools.console import common\nfrom mitmproxy.tools.console import layoutwidget\nfrom mitmproxy.tools.console import flowdetailview\nfrom mitmproxy.tools.console import searchable\nfrom mitmproxy.tools.console import tabs\nimport mitmproxy.tools.console.master # noqa\nfrom mitmproxy.utils import strutils\n\n\nclass SearchError(Exception):\n pass\n\n\nclass FlowViewHeader(urwid.WidgetWrap):\n\n def __init__(\n self,\n master: \"mitmproxy.tools.console.master.ConsoleMaster\",\n ) -> None:\n self.master = master\n self.focus_changed()\n\n def focus_changed(self):\n if self.master.view.focus.flow:\n self._w = common.format_flow(\n self.master.view.focus.flow,\n False,\n extended=True,\n hostheader=self.master.options.showhost\n )\n else:\n self._w = urwid.Pile([])\n\n\nclass FlowDetails(tabs.Tabs):\n def __init__(self, master):\n self.master = master\n super().__init__([])\n self.show()\n self.last_displayed_body = None\n\n def focus_changed(self):\n if self.master.view.focus.flow:\n self.tabs = [\n (self.tab_request, self.view_request),\n (self.tab_response, self.view_response),\n (self.tab_details, self.view_details),\n ]\n self.show()\n else:\n self.master.window.pop()\n\n @property\n def view(self):\n return self.master.view\n\n @property\n def flow(self):\n return self.master.view.focus.flow\n\n def tab_request(self):\n if self.flow.intercepted and not self.flow.response:\n return \"Request intercepted\"\n else:\n return \"Request\"\n\n def tab_response(self):\n if self.flow.intercepted and self.flow.response:\n return \"Response intercepted\"\n else:\n return \"Response\"\n\n def tab_details(self):\n return \"Detail\"\n\n def view_request(self):\n return self.conn_text(self.flow.request)\n\n def view_response(self):\n return self.conn_text(self.flow.response)\n\n def view_details(self):\n return flowdetailview.flowdetails(self.view, self.flow)\n\n def content_view(self, viewmode, message):\n if message.raw_content is None:\n msg, body = \"\", [urwid.Text([(\"error\", \"[content missing]\")])]\n return msg, body\n else:\n full = self.master.commands.call(\"view.getval @focus fullcontents false\")\n if full == \"true\":\n limit = sys.maxsize\n else:\n limit = contentviews.VIEW_CUTOFF\n\n flow_modify_cache_invalidation = hash((\n message.raw_content,\n message.headers.fields,\n getattr(message, \"path\", None),\n ))\n # we need to pass the message off-band because it's not hashable\n self._get_content_view_message = message\n return self._get_content_view(viewmode, limit, flow_modify_cache_invalidation)\n\n @lru_cache(maxsize=200)\n def _get_content_view(self, viewmode, max_lines, _):\n message = self._get_content_view_message\n self._get_content_view_message = None\n description, lines, error = contentviews.get_message_content_view(\n viewmode, message\n )\n if error:\n self.master.add_log(error, \"debug\")\n # Give hint that you have to tab for the response.\n if description == \"No content\" and isinstance(message, http.HTTPRequest):\n description = \"No request content (press tab to view response)\"\n\n # If the users has a wide terminal, he gets fewer lines; this should not be an issue.\n chars_per_line = 80\n max_chars = max_lines * chars_per_line\n total_chars = 0\n text_objects = []\n for line in lines:\n txt = []\n for (style, text) in line:\n if total_chars + len(text) > max_chars:\n text = text[:max_chars - total_chars]\n txt.append((style, text))\n total_chars += len(text)\n if total_chars == max_chars:\n break\n\n # round up to the next line.\n total_chars = int(math.ceil(total_chars / chars_per_line) * chars_per_line)\n\n text_objects.append(urwid.Text(txt))\n if total_chars == max_chars:\n text_objects.append(urwid.Text([\n (\"highlight\", \"Stopped displaying data after %d lines. Press \" % max_lines),\n (\"key\", \"f\"),\n (\"highlight\", \" to load all data.\")\n ]))\n break\n\n return description, text_objects\n\n def conn_text(self, conn):\n if conn:\n hdrs = []\n for k, v in conn.headers.fields:\n # This will always force an ascii representation of headers. For example, if the server sends a\n #\n # X-Authors: Made with \u2764 in Hamburg\n #\n # header, mitmproxy will display the following:\n #\n # X-Authors: Made with \\xe2\\x9d\\xa4 in Hamburg.\n #\n # The alternative would be to just use the header's UTF-8 representation and maybe\n # do `str.replace(\"\\t\", \"\\\\t\")` to exempt tabs from urwid's special characters escaping [1].\n # That would in some terminals allow rendering UTF-8 characters, but the mapping\n # wouldn't be bijective, i.e. a user couldn't distinguish \"\\\\t\" and \"\\t\".\n # Also, from a security perspective, a mitmproxy user couldn't be fooled by homoglyphs.\n #\n # 1) https://github.com/mitmproxy/mitmproxy/issues/1833\n # https://github.com/urwid/urwid/blob/6608ee2c9932d264abd1171468d833b7a4082e13/urwid/display_common.py#L35-L36,\n\n k = strutils.bytes_to_escaped_str(k) + \":\"\n v = strutils.bytes_to_escaped_str(v)\n hdrs.append((k, v))\n txt = common.format_keyvals(\n hdrs,\n key_format=\"header\"\n )\n viewmode = self.master.commands.call(\"console.flowview.mode\")\n msg, body = self.content_view(viewmode, conn)\n\n cols = [\n urwid.Text(\n [\n (\"heading\", msg),\n ]\n ),\n urwid.Text(\n [\n \" \",\n ('heading', \"[\"),\n ('heading_key', \"m\"),\n ('heading', (\":%s]\" % viewmode)),\n ],\n align=\"right\"\n )\n ]\n title = urwid.AttrWrap(urwid.Columns(cols), \"heading\")\n\n txt.append(title)\n txt.extend(body)\n else:\n txt = [\n urwid.Text(\"\"),\n urwid.Text(\n [\n (\"highlight\", \"No response. Press \"),\n (\"key\", \"e\"),\n (\"highlight\", \" and edit any aspect to add one.\"),\n ]\n )\n ]\n return searchable.Searchable(txt)\n\n\nclass FlowView(urwid.Frame, layoutwidget.LayoutWidget):\n keyctx = \"flowview\"\n title = \"Flow Details\"\n\n def __init__(self, master):\n super().__init__(\n FlowDetails(master),\n header = FlowViewHeader(master),\n )\n self.master = master\n\n def focus_changed(self, *args, **kwargs):\n self.body.focus_changed()\n self.header.focus_changed()\n", "path": "mitmproxy/tools/console/flowview.py"}], "after_files": [{"content": "import math\nimport sys\nfrom functools import lru_cache\nfrom typing import Optional, Union # noqa\n\nimport urwid\n\nfrom mitmproxy import contentviews\nfrom mitmproxy import http\nfrom mitmproxy.tools.console import common\nfrom mitmproxy.tools.console import layoutwidget\nfrom mitmproxy.tools.console import flowdetailview\nfrom mitmproxy.tools.console import searchable\nfrom mitmproxy.tools.console import tabs\nimport mitmproxy.tools.console.master # noqa\nfrom mitmproxy.utils import strutils\n\n\nclass SearchError(Exception):\n pass\n\n\nclass FlowViewHeader(urwid.WidgetWrap):\n\n def __init__(\n self,\n master: \"mitmproxy.tools.console.master.ConsoleMaster\",\n ) -> None:\n self.master = master\n self.focus_changed()\n\n def focus_changed(self):\n cols, _ = self.master.ui.get_cols_rows()\n if self.master.view.focus.flow:\n self._w = common.format_flow(\n self.master.view.focus.flow,\n False,\n extended=True,\n hostheader=self.master.options.showhost,\n max_url_len=cols,\n )\n else:\n self._w = urwid.Pile([])\n\n\nclass FlowDetails(tabs.Tabs):\n def __init__(self, master):\n self.master = master\n super().__init__([])\n self.show()\n self.last_displayed_body = None\n\n def focus_changed(self):\n if self.master.view.focus.flow:\n self.tabs = [\n (self.tab_request, self.view_request),\n (self.tab_response, self.view_response),\n (self.tab_details, self.view_details),\n ]\n self.show()\n else:\n self.master.window.pop()\n\n @property\n def view(self):\n return self.master.view\n\n @property\n def flow(self):\n return self.master.view.focus.flow\n\n def tab_request(self):\n if self.flow.intercepted and not self.flow.response:\n return \"Request intercepted\"\n else:\n return \"Request\"\n\n def tab_response(self):\n if self.flow.intercepted and self.flow.response:\n return \"Response intercepted\"\n else:\n return \"Response\"\n\n def tab_details(self):\n return \"Detail\"\n\n def view_request(self):\n return self.conn_text(self.flow.request)\n\n def view_response(self):\n return self.conn_text(self.flow.response)\n\n def view_details(self):\n return flowdetailview.flowdetails(self.view, self.flow)\n\n def content_view(self, viewmode, message):\n if message.raw_content is None:\n msg, body = \"\", [urwid.Text([(\"error\", \"[content missing]\")])]\n return msg, body\n else:\n full = self.master.commands.call(\"view.getval @focus fullcontents false\")\n if full == \"true\":\n limit = sys.maxsize\n else:\n limit = contentviews.VIEW_CUTOFF\n\n flow_modify_cache_invalidation = hash((\n message.raw_content,\n message.headers.fields,\n getattr(message, \"path\", None),\n ))\n # we need to pass the message off-band because it's not hashable\n self._get_content_view_message = message\n return self._get_content_view(viewmode, limit, flow_modify_cache_invalidation)\n\n @lru_cache(maxsize=200)\n def _get_content_view(self, viewmode, max_lines, _):\n message = self._get_content_view_message\n self._get_content_view_message = None\n description, lines, error = contentviews.get_message_content_view(\n viewmode, message\n )\n if error:\n self.master.add_log(error, \"debug\")\n # Give hint that you have to tab for the response.\n if description == \"No content\" and isinstance(message, http.HTTPRequest):\n description = \"No request content (press tab to view response)\"\n\n # If the users has a wide terminal, he gets fewer lines; this should not be an issue.\n chars_per_line = 80\n max_chars = max_lines * chars_per_line\n total_chars = 0\n text_objects = []\n for line in lines:\n txt = []\n for (style, text) in line:\n if total_chars + len(text) > max_chars:\n text = text[:max_chars - total_chars]\n txt.append((style, text))\n total_chars += len(text)\n if total_chars == max_chars:\n break\n\n # round up to the next line.\n total_chars = int(math.ceil(total_chars / chars_per_line) * chars_per_line)\n\n text_objects.append(urwid.Text(txt))\n if total_chars == max_chars:\n text_objects.append(urwid.Text([\n (\"highlight\", \"Stopped displaying data after %d lines. Press \" % max_lines),\n (\"key\", \"f\"),\n (\"highlight\", \" to load all data.\")\n ]))\n break\n\n return description, text_objects\n\n def conn_text(self, conn):\n if conn:\n hdrs = []\n for k, v in conn.headers.fields:\n # This will always force an ascii representation of headers. For example, if the server sends a\n #\n # X-Authors: Made with \u2764 in Hamburg\n #\n # header, mitmproxy will display the following:\n #\n # X-Authors: Made with \\xe2\\x9d\\xa4 in Hamburg.\n #\n # The alternative would be to just use the header's UTF-8 representation and maybe\n # do `str.replace(\"\\t\", \"\\\\t\")` to exempt tabs from urwid's special characters escaping [1].\n # That would in some terminals allow rendering UTF-8 characters, but the mapping\n # wouldn't be bijective, i.e. a user couldn't distinguish \"\\\\t\" and \"\\t\".\n # Also, from a security perspective, a mitmproxy user couldn't be fooled by homoglyphs.\n #\n # 1) https://github.com/mitmproxy/mitmproxy/issues/1833\n # https://github.com/urwid/urwid/blob/6608ee2c9932d264abd1171468d833b7a4082e13/urwid/display_common.py#L35-L36,\n\n k = strutils.bytes_to_escaped_str(k) + \":\"\n v = strutils.bytes_to_escaped_str(v)\n hdrs.append((k, v))\n txt = common.format_keyvals(\n hdrs,\n key_format=\"header\"\n )\n viewmode = self.master.commands.call(\"console.flowview.mode\")\n msg, body = self.content_view(viewmode, conn)\n\n cols = [\n urwid.Text(\n [\n (\"heading\", msg),\n ]\n ),\n urwid.Text(\n [\n \" \",\n ('heading', \"[\"),\n ('heading_key', \"m\"),\n ('heading', (\":%s]\" % viewmode)),\n ],\n align=\"right\"\n )\n ]\n title = urwid.AttrWrap(urwid.Columns(cols), \"heading\")\n\n txt.append(title)\n txt.extend(body)\n else:\n txt = [\n urwid.Text(\"\"),\n urwid.Text(\n [\n (\"highlight\", \"No response. Press \"),\n (\"key\", \"e\"),\n (\"highlight\", \" and edit any aspect to add one.\"),\n ]\n )\n ]\n return searchable.Searchable(txt)\n\n\nclass FlowView(urwid.Frame, layoutwidget.LayoutWidget):\n keyctx = \"flowview\"\n title = \"Flow Details\"\n\n def __init__(self, master):\n super().__init__(\n FlowDetails(master),\n header = FlowViewHeader(master),\n )\n self.master = master\n\n def focus_changed(self, *args, **kwargs):\n self.body.focus_changed()\n self.header.focus_changed()\n", "path": "mitmproxy/tools/console/flowview.py"}]} | 2,872 | 161 |
gh_patches_debug_415 | rasdani/github-patches | git_diff | freedomofpress__securedrop-6492 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Initial messages containing non-ascii characters fail if codename filtering is enabled.
## Description
Codename filtering was introduced in 2.3.0, allowing admins to block initial submissions containing only the user's codename, as they should not be shared with journalists. The filter uses the `compare_digest()` function to ensure constant-time comparison, but this fn will throw a `TypeError` if any of the strings being compared contain Unicode.
## Steps to Reproduce
- start up `make dev` on 2.4.0
- visit the JI and enable codename filtering under Admin > Instance Config
- visit the SI, create a new source, and submit an initial message containing unicode, ie `Hallo! ö, ü, ä, or ß`
## Expected Behavior
- Message is submitted
## Actual Behavior
- 500 error, and (in dev) stack trace due to TypeError
## Comments
Suggestions to fix, any other relevant information.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/utils.py`
Content:
```
1 import json
2 import re
3 import subprocess
4 import typing
5 from hmac import compare_digest
6
7 import werkzeug
8 from flask import current_app, flash, redirect, render_template, url_for
9 from flask.sessions import SessionMixin
10 from flask_babel import gettext
11 from markupsafe import Markup, escape
12 from source_user import SourceUser
13 from store import Storage
14
15 if typing.TYPE_CHECKING:
16 from typing import Optional
17
18
19 def codename_detected(message: str, codename: str) -> bool:
20 """
21 Check for codenames in incoming messages. including case where user copy/pasted
22 from /generate or the codename widget on the same page
23 """
24 message = message.strip()
25
26 return compare_digest(message.strip(), codename)
27
28
29 def flash_msg(
30 category: str,
31 declarative: "Optional[str]",
32 *msg_contents: "str",
33 ) -> None:
34 """
35 Render flash message with a (currently) optional declarative heading.
36 """
37 contents = Markup("<br>".join([escape(part) for part in msg_contents]))
38
39 msg = render_template(
40 "flash_message.html",
41 declarative=declarative,
42 msg_contents=contents,
43 )
44 flash(Markup(msg), category)
45
46
47 def clear_session_and_redirect_to_logged_out_page(flask_session: SessionMixin) -> werkzeug.Response:
48 msg = render_template(
49 "flash_message.html",
50 declarative=gettext("Important"),
51 msg_contents=Markup(
52 gettext(
53 'You were logged out due to inactivity. Click the <img src={icon} alt="" '
54 'width="16" height="16"> <b>New Identity</b> button in your Tor Browser\'s '
55 "toolbar before moving on. This will clear your Tor Browser activity data on "
56 "this device."
57 ).format(icon=url_for("static", filename="i/torbroom.png"))
58 ),
59 )
60
61 # Clear the session after we render the message so it's localized
62 flask_session.clear()
63
64 flash(Markup(msg), "error")
65 return redirect(url_for("main.index"))
66
67
68 def normalize_timestamps(logged_in_source: SourceUser) -> None:
69 """
70 Update the timestamps on all of the source's submissions. This
71 minimizes metadata that could be useful to investigators. See
72 #301.
73 """
74 source_in_db = logged_in_source.get_db_record()
75 sub_paths = [
76 Storage.get_default().path(logged_in_source.filesystem_id, submission.filename)
77 for submission in source_in_db.submissions
78 ]
79 if len(sub_paths) > 1:
80 args = ["touch", "--no-create"]
81 args.extend(sub_paths)
82 rc = subprocess.call(args)
83 if rc != 0:
84 current_app.logger.warning(
85 "Couldn't normalize submission " "timestamps (touch exited with %d)" % rc
86 )
87
88
89 def check_url_file(path: str, regexp: str) -> "Optional[str]":
90 """
91 Check that a file exists at the path given and contains a single line
92 matching the regexp. Used for checking the source interface address
93 files in /var/lib/securedrop (as the Apache user can't read Tor config)
94 """
95 try:
96 f = open(path, "r")
97 contents = f.readline().strip()
98 f.close()
99 if re.match(regexp, contents):
100 return contents
101 else:
102 return None
103 except IOError:
104 return None
105
106
107 def get_sourcev3_url() -> "Optional[str]":
108 return check_url_file("/var/lib/securedrop/source_v3_url", r"^[a-z0-9]{56}\.onion$")
109
110
111 def fit_codenames_into_cookie(codenames: dict) -> dict:
112 """
113 If `codenames` will approach `werkzeug.Response.max_cookie_size` once
114 serialized, incrementally pop off the oldest codename until the remaining
115 (newer) ones will fit.
116 """
117
118 serialized = json.dumps(codenames).encode()
119 if len(codenames) > 1 and len(serialized) > 4000: # werkzeug.Response.max_cookie_size = 4093
120 if current_app:
121 current_app.logger.warn(
122 f"Popping oldest of {len(codenames)} "
123 f"codenames ({len(serialized)} bytes) to "
124 f"fit within maximum cookie size"
125 )
126 del codenames[list(codenames)[0]] # FIFO
127
128 return fit_codenames_into_cookie(codenames)
129
130 return codenames
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/source_app/utils.py b/securedrop/source_app/utils.py
--- a/securedrop/source_app/utils.py
+++ b/securedrop/source_app/utils.py
@@ -23,7 +23,7 @@
"""
message = message.strip()
- return compare_digest(message.strip(), codename)
+ return compare_digest(message.strip().encode("utf-8"), codename.encode("utf-8"))
def flash_msg(
| {"golden_diff": "diff --git a/securedrop/source_app/utils.py b/securedrop/source_app/utils.py\n--- a/securedrop/source_app/utils.py\n+++ b/securedrop/source_app/utils.py\n@@ -23,7 +23,7 @@\n \"\"\"\n message = message.strip()\n \n- return compare_digest(message.strip(), codename)\n+ return compare_digest(message.strip().encode(\"utf-8\"), codename.encode(\"utf-8\"))\n \n \n def flash_msg(\n", "issue": "Initial messages containing non-ascii characters fail if codename filtering is enabled.\n## Description\r\n\r\nCodename filtering was introduced in 2.3.0, allowing admins to block initial submissions containing only the user's codename, as they should not be shared with journalists. The filter uses the `compare_digest()` function to ensure constant-time comparison, but this fn will throw a `TypeError` if any of the strings being compared contain Unicode.\r\n\r\n## Steps to Reproduce\r\n\r\n- start up `make dev` on 2.4.0\r\n- visit the JI and enable codename filtering under Admin > Instance Config\r\n- visit the SI, create a new source, and submit an initial message containing unicode, ie `Hallo! \u00f6, \u00fc, \u00e4, or \u00df`\r\n\r\n## Expected Behavior\r\n- Message is submitted\r\n\r\n## Actual Behavior\r\n- 500 error, and (in dev) stack trace due to TypeError\r\n\r\n## Comments\r\n\r\nSuggestions to fix, any other relevant information.\r\n\n", "before_files": [{"content": "import json\nimport re\nimport subprocess\nimport typing\nfrom hmac import compare_digest\n\nimport werkzeug\nfrom flask import current_app, flash, redirect, render_template, url_for\nfrom flask.sessions import SessionMixin\nfrom flask_babel import gettext\nfrom markupsafe import Markup, escape\nfrom source_user import SourceUser\nfrom store import Storage\n\nif typing.TYPE_CHECKING:\n from typing import Optional\n\n\ndef codename_detected(message: str, codename: str) -> bool:\n \"\"\"\n Check for codenames in incoming messages. including case where user copy/pasted\n from /generate or the codename widget on the same page\n \"\"\"\n message = message.strip()\n\n return compare_digest(message.strip(), codename)\n\n\ndef flash_msg(\n category: str,\n declarative: \"Optional[str]\",\n *msg_contents: \"str\",\n) -> None:\n \"\"\"\n Render flash message with a (currently) optional declarative heading.\n \"\"\"\n contents = Markup(\"<br>\".join([escape(part) for part in msg_contents]))\n\n msg = render_template(\n \"flash_message.html\",\n declarative=declarative,\n msg_contents=contents,\n )\n flash(Markup(msg), category)\n\n\ndef clear_session_and_redirect_to_logged_out_page(flask_session: SessionMixin) -> werkzeug.Response:\n msg = render_template(\n \"flash_message.html\",\n declarative=gettext(\"Important\"),\n msg_contents=Markup(\n gettext(\n 'You were logged out due to inactivity. Click the <img src={icon} alt=\"\" '\n 'width=\"16\" height=\"16\"> <b>New Identity</b> button in your Tor Browser\\'s '\n \"toolbar before moving on. This will clear your Tor Browser activity data on \"\n \"this device.\"\n ).format(icon=url_for(\"static\", filename=\"i/torbroom.png\"))\n ),\n )\n\n # Clear the session after we render the message so it's localized\n flask_session.clear()\n\n flash(Markup(msg), \"error\")\n return redirect(url_for(\"main.index\"))\n\n\ndef normalize_timestamps(logged_in_source: SourceUser) -> None:\n \"\"\"\n Update the timestamps on all of the source's submissions. This\n minimizes metadata that could be useful to investigators. See\n #301.\n \"\"\"\n source_in_db = logged_in_source.get_db_record()\n sub_paths = [\n Storage.get_default().path(logged_in_source.filesystem_id, submission.filename)\n for submission in source_in_db.submissions\n ]\n if len(sub_paths) > 1:\n args = [\"touch\", \"--no-create\"]\n args.extend(sub_paths)\n rc = subprocess.call(args)\n if rc != 0:\n current_app.logger.warning(\n \"Couldn't normalize submission \" \"timestamps (touch exited with %d)\" % rc\n )\n\n\ndef check_url_file(path: str, regexp: str) -> \"Optional[str]\":\n \"\"\"\n Check that a file exists at the path given and contains a single line\n matching the regexp. Used for checking the source interface address\n files in /var/lib/securedrop (as the Apache user can't read Tor config)\n \"\"\"\n try:\n f = open(path, \"r\")\n contents = f.readline().strip()\n f.close()\n if re.match(regexp, contents):\n return contents\n else:\n return None\n except IOError:\n return None\n\n\ndef get_sourcev3_url() -> \"Optional[str]\":\n return check_url_file(\"/var/lib/securedrop/source_v3_url\", r\"^[a-z0-9]{56}\\.onion$\")\n\n\ndef fit_codenames_into_cookie(codenames: dict) -> dict:\n \"\"\"\n If `codenames` will approach `werkzeug.Response.max_cookie_size` once\n serialized, incrementally pop off the oldest codename until the remaining\n (newer) ones will fit.\n \"\"\"\n\n serialized = json.dumps(codenames).encode()\n if len(codenames) > 1 and len(serialized) > 4000: # werkzeug.Response.max_cookie_size = 4093\n if current_app:\n current_app.logger.warn(\n f\"Popping oldest of {len(codenames)} \"\n f\"codenames ({len(serialized)} bytes) to \"\n f\"fit within maximum cookie size\"\n )\n del codenames[list(codenames)[0]] # FIFO\n\n return fit_codenames_into_cookie(codenames)\n\n return codenames\n", "path": "securedrop/source_app/utils.py"}], "after_files": [{"content": "import json\nimport re\nimport subprocess\nimport typing\nfrom hmac import compare_digest\n\nimport werkzeug\nfrom flask import current_app, flash, redirect, render_template, url_for\nfrom flask.sessions import SessionMixin\nfrom flask_babel import gettext\nfrom markupsafe import Markup, escape\nfrom source_user import SourceUser\nfrom store import Storage\n\nif typing.TYPE_CHECKING:\n from typing import Optional\n\n\ndef codename_detected(message: str, codename: str) -> bool:\n \"\"\"\n Check for codenames in incoming messages. including case where user copy/pasted\n from /generate or the codename widget on the same page\n \"\"\"\n message = message.strip()\n\n return compare_digest(message.strip().encode(\"utf-8\"), codename.encode(\"utf-8\"))\n\n\ndef flash_msg(\n category: str,\n declarative: \"Optional[str]\",\n *msg_contents: \"str\",\n) -> None:\n \"\"\"\n Render flash message with a (currently) optional declarative heading.\n \"\"\"\n contents = Markup(\"<br>\".join([escape(part) for part in msg_contents]))\n\n msg = render_template(\n \"flash_message.html\",\n declarative=declarative,\n msg_contents=contents,\n )\n flash(Markup(msg), category)\n\n\ndef clear_session_and_redirect_to_logged_out_page(flask_session: SessionMixin) -> werkzeug.Response:\n msg = render_template(\n \"flash_message.html\",\n declarative=gettext(\"Important\"),\n msg_contents=Markup(\n gettext(\n 'You were logged out due to inactivity. Click the <img src={icon} alt=\"\" '\n 'width=\"16\" height=\"16\"> <b>New Identity</b> button in your Tor Browser\\'s '\n \"toolbar before moving on. This will clear your Tor Browser activity data on \"\n \"this device.\"\n ).format(icon=url_for(\"static\", filename=\"i/torbroom.png\"))\n ),\n )\n\n # Clear the session after we render the message so it's localized\n flask_session.clear()\n\n flash(Markup(msg), \"error\")\n return redirect(url_for(\"main.index\"))\n\n\ndef normalize_timestamps(logged_in_source: SourceUser) -> None:\n \"\"\"\n Update the timestamps on all of the source's submissions. This\n minimizes metadata that could be useful to investigators. See\n #301.\n \"\"\"\n source_in_db = logged_in_source.get_db_record()\n sub_paths = [\n Storage.get_default().path(logged_in_source.filesystem_id, submission.filename)\n for submission in source_in_db.submissions\n ]\n if len(sub_paths) > 1:\n args = [\"touch\", \"--no-create\"]\n args.extend(sub_paths)\n rc = subprocess.call(args)\n if rc != 0:\n current_app.logger.warning(\n \"Couldn't normalize submission \" \"timestamps (touch exited with %d)\" % rc\n )\n\n\ndef check_url_file(path: str, regexp: str) -> \"Optional[str]\":\n \"\"\"\n Check that a file exists at the path given and contains a single line\n matching the regexp. Used for checking the source interface address\n files in /var/lib/securedrop (as the Apache user can't read Tor config)\n \"\"\"\n try:\n f = open(path, \"r\")\n contents = f.readline().strip()\n f.close()\n if re.match(regexp, contents):\n return contents\n else:\n return None\n except IOError:\n return None\n\n\ndef get_sourcev3_url() -> \"Optional[str]\":\n return check_url_file(\"/var/lib/securedrop/source_v3_url\", r\"^[a-z0-9]{56}\\.onion$\")\n\n\ndef fit_codenames_into_cookie(codenames: dict) -> dict:\n \"\"\"\n If `codenames` will approach `werkzeug.Response.max_cookie_size` once\n serialized, incrementally pop off the oldest codename until the remaining\n (newer) ones will fit.\n \"\"\"\n\n serialized = json.dumps(codenames).encode()\n if len(codenames) > 1 and len(serialized) > 4000: # werkzeug.Response.max_cookie_size = 4093\n if current_app:\n current_app.logger.warn(\n f\"Popping oldest of {len(codenames)} \"\n f\"codenames ({len(serialized)} bytes) to \"\n f\"fit within maximum cookie size\"\n )\n del codenames[list(codenames)[0]] # FIFO\n\n return fit_codenames_into_cookie(codenames)\n\n return codenames\n", "path": "securedrop/source_app/utils.py"}]} | 1,729 | 99 |
gh_patches_debug_6011 | rasdani/github-patches | git_diff | tensorflow__addons-687 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Time distributed WeightNormalization result in incompatible shape
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 18.04.4 LTS
- TensorFlow version and how it was installed (source or binary): tensorflow-gpu 2.0.0, binary
- TensorFlow-Addons version and how it was installed (source or binary): tensorflow-addons 0.6.0, binary
- Python version: Python 3.6.9
- Is GPU used? (yes/no): yes
**Describe the bug**
Time distributed WeightNormalization result in incompatible shape error
Error occur in `input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)`
- WeightNormalization ON: `self.input_spec (shape=[32, 64, 64, 3], ndim=4)`
- WeightNormalization OFF: `self.input_spec (ndim=4, axes={-1:3})`
**Code to reproduce the issue**
```python
import tensorflow as tf
from tensorflow_addons.layers.wrappers import WeightNormalization
weight_norm = True
batch_shape = (32, 16, 64, 64, 3)
inputs = tf.keras.layers.Input(batch_shape=batch_shape)
if weight_norm is True:
a = tf.keras.layers.Conv2D(3, 5)
b = WeightNormalization(a)
outputs = tf.keras.layers.TimeDistributed(b)(inputs)
else:
outputs = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(3, 5))(inputs)
model = tf.keras.Model(inputs, outputs)
```
**Other info / logs**
```
WARNING:tensorflow:From /opt/anaconda/lib/python3.6/site-packages/tensorflow_addons/layers/wrappers.py:84: Layer.add_variable (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.
Instructions for updating:
Please use `layer.add_weight` method instead.
Traceback (most recent call last):
File "/workarea/projects/magic/MagIC/applications/temporal/app_test.py", line 11, in <module>
outputs = tf.keras.layers.TimeDistributed(b)(inputs)
File "/opt/anaconda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 842, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/opt/anaconda/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/wrappers.py", line 254, in call
y = self.layer(inputs, **kwargs)
File "/opt/anaconda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 812, in __call__
self.name)
File "/opt/anaconda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/input_spec.py", line 224, in assert_input_compatibility
', found shape=' + str(shape))
ValueError: Input 0 is incompatible with layer weight_normalization: expected shape=[32, 64, 64, 3], found shape=[512, 64, 64, 3]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/layers/wrappers.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # =============================================================================
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18
19 import tensorflow as tf
20
21
22 @tf.keras.utils.register_keras_serializable(package='Addons')
23 class WeightNormalization(tf.keras.layers.Wrapper):
24 """This wrapper reparameterizes a layer by decoupling the weight's
25 magnitude and direction.
26
27 This speeds up convergence by improving the
28 conditioning of the optimization problem.
29 Weight Normalization: A Simple Reparameterization to Accelerate
30 Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868
31 Tim Salimans, Diederik P. Kingma (2016)
32 WeightNormalization wrapper works for keras and tf layers.
33 ```python
34 net = WeightNormalization(
35 tf.keras.layers.Conv2D(2, 2, activation='relu'),
36 input_shape=(32, 32, 3),
37 data_init=True)(x)
38 net = WeightNormalization(
39 tf.keras.layers.Conv2D(16, 5, activation='relu'),
40 data_init=True)(net)
41 net = WeightNormalization(
42 tf.keras.layers.Dense(120, activation='relu'),
43 data_init=True)(net)
44 net = WeightNormalization(
45 tf.keras.layers.Dense(n_classes),
46 data_init=True)(net)
47 ```
48 Arguments:
49 layer: a layer instance.
50 data_init: If `True` use data dependent variable initialization
51 Raises:
52 ValueError: If not initialized with a `Layer` instance.
53 ValueError: If `Layer` does not contain a `kernel` of weights
54 NotImplementedError: If `data_init` is True and running graph execution
55 """
56
57 def __init__(self, layer, data_init=True, **kwargs):
58 super(WeightNormalization, self).__init__(layer, **kwargs)
59 self.data_init = data_init
60 self._track_trackable(layer, name='layer')
61
62 def build(self, input_shape):
63 """Build `Layer`"""
64 input_shape = tf.TensorShape(input_shape).as_list()
65 self.input_spec = tf.keras.layers.InputSpec(shape=input_shape)
66
67 if not self.layer.built:
68 self.layer.build(input_shape)
69
70 if not hasattr(self.layer, 'kernel'):
71 raise ValueError('`WeightNormalization` must wrap a layer that'
72 ' contains a `kernel` for weights')
73
74 # The kernel's filter or unit dimension is -1
75 self.layer_depth = int(self.layer.kernel.shape[-1])
76 self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1))
77
78 self.g = self.add_weight(
79 name='g',
80 shape=(self.layer_depth,),
81 initializer='ones',
82 dtype=self.layer.kernel.dtype,
83 trainable=True)
84 self.v = self.layer.kernel
85
86 self._initialized = self.add_weight(
87 name='initialized',
88 shape=None,
89 initializer='zeros',
90 dtype=tf.dtypes.bool,
91 trainable=False)
92
93 if self.data_init:
94 # Used for data initialization in self._data_dep_init.
95 layer_config = tf.keras.layers.serialize(self.layer)
96 layer_config['config']['trainable'] = False
97 self._naked_clone_layer = tf.keras.layers.deserialize(layer_config)
98 self._naked_clone_layer.build(input_shape)
99 self._naked_clone_layer.set_weights(self.layer.get_weights())
100 self._naked_clone_layer.activation = None
101
102 self.built = True
103
104 def call(self, inputs):
105 """Call `Layer`"""
106
107 def _do_nothing():
108 return tf.identity(self.g)
109
110 def _update_weights():
111 # Ensure we read `self.g` after _update_weights.
112 with tf.control_dependencies(self._initialize_weights(inputs)):
113 return tf.identity(self.g)
114
115 g = tf.cond(self._initialized, _do_nothing, _update_weights)
116
117 with tf.name_scope('compute_weights'):
118 # Replace kernel by normalized weight variable.
119 self.layer.kernel = tf.nn.l2_normalize(
120 self.v, axis=self.kernel_norm_axes) * g
121
122 # Ensure we calculate result after updating kernel.
123 update_kernel = tf.identity(self.layer.kernel)
124 with tf.control_dependencies([update_kernel]):
125 outputs = self.layer(inputs)
126 return outputs
127
128 def compute_output_shape(self, input_shape):
129 return tf.TensorShape(
130 self.layer.compute_output_shape(input_shape).as_list())
131
132 def _initialize_weights(self, inputs):
133 """Initialize weight g.
134
135 The initial value of g could either from the initial value in v,
136 or by the input value if self.data_init is True.
137 """
138 with tf.control_dependencies([
139 tf.debugging.assert_equal( # pylint: disable=bad-continuation
140 self._initialized,
141 False,
142 message='The layer has been initialized.')
143 ]):
144 if self.data_init:
145 assign_tensors = self._data_dep_init(inputs)
146 else:
147 assign_tensors = self._init_norm()
148 assign_tensors.append(self._initialized.assign(True))
149 return assign_tensors
150
151 def _init_norm(self):
152 """Set the weight g with the norm of the weight vector."""
153 with tf.name_scope('init_norm'):
154 v_flat = tf.reshape(self.v, [-1, self.layer_depth])
155 v_norm = tf.linalg.norm(v_flat, axis=0)
156 g_tensor = self.g.assign(tf.reshape(v_norm, (self.layer_depth,)))
157 return [g_tensor]
158
159 def _data_dep_init(self, inputs):
160 """Data dependent initialization."""
161 with tf.name_scope('data_dep_init'):
162 # Generate data dependent init values
163 x_init = self._naked_clone_layer(inputs)
164 data_norm_axes = list(range(x_init.shape.rank - 1))
165 m_init, v_init = tf.nn.moments(x_init, data_norm_axes)
166 scale_init = 1. / tf.math.sqrt(v_init + 1e-10)
167
168 # Assign data dependent init values
169 g_tensor = self.g.assign(self.g * scale_init)
170 if hasattr(self.layer, 'bias'):
171 bias_tensor = self.layer.bias.assign(-m_init * scale_init)
172 return [g_tensor, bias_tensor]
173 else:
174 return [g_tensor]
175
176 def get_config(self):
177 config = {'data_init': self.data_init}
178 base_config = super(WeightNormalization, self).get_config()
179 return dict(list(base_config.items()) + list(config.items()))
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensorflow_addons/layers/wrappers.py b/tensorflow_addons/layers/wrappers.py
--- a/tensorflow_addons/layers/wrappers.py
+++ b/tensorflow_addons/layers/wrappers.py
@@ -62,7 +62,8 @@
def build(self, input_shape):
"""Build `Layer`"""
input_shape = tf.TensorShape(input_shape).as_list()
- self.input_spec = tf.keras.layers.InputSpec(shape=input_shape)
+ self.input_spec = tf.keras.layers.InputSpec(
+ shape=[None] + input_shape[1:])
if not self.layer.built:
self.layer.build(input_shape)
| {"golden_diff": "diff --git a/tensorflow_addons/layers/wrappers.py b/tensorflow_addons/layers/wrappers.py\n--- a/tensorflow_addons/layers/wrappers.py\n+++ b/tensorflow_addons/layers/wrappers.py\n@@ -62,7 +62,8 @@\n def build(self, input_shape):\n \"\"\"Build `Layer`\"\"\"\n input_shape = tf.TensorShape(input_shape).as_list()\n- self.input_spec = tf.keras.layers.InputSpec(shape=input_shape)\n+ self.input_spec = tf.keras.layers.InputSpec(\n+ shape=[None] + input_shape[1:])\n \n if not self.layer.built:\n self.layer.build(input_shape)\n", "issue": "Time distributed WeightNormalization result in incompatible shape\n**System information**\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 18.04.4 LTS\r\n- TensorFlow version and how it was installed (source or binary): tensorflow-gpu 2.0.0, binary\r\n- TensorFlow-Addons version and how it was installed (source or binary): tensorflow-addons 0.6.0, binary\r\n- Python version: Python 3.6.9\r\n- Is GPU used? (yes/no): yes\r\n\r\n**Describe the bug**\r\nTime distributed WeightNormalization result in incompatible shape error\r\n\r\nError occur in `input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)`\r\n - WeightNormalization ON: `self.input_spec (shape=[32, 64, 64, 3], ndim=4)`\r\n - WeightNormalization OFF: `self.input_spec (ndim=4, axes={-1:3})`\r\n\r\n**Code to reproduce the issue**\r\n\r\n```python\r\nimport tensorflow as tf\r\nfrom tensorflow_addons.layers.wrappers import WeightNormalization\r\n\r\nweight_norm = True\r\n\r\nbatch_shape = (32, 16, 64, 64, 3)\r\ninputs = tf.keras.layers.Input(batch_shape=batch_shape)\r\nif weight_norm is True:\r\n a = tf.keras.layers.Conv2D(3, 5)\r\n b = WeightNormalization(a)\r\n outputs = tf.keras.layers.TimeDistributed(b)(inputs)\r\nelse:\r\n outputs = tf.keras.layers.TimeDistributed(tf.keras.layers.Conv2D(3, 5))(inputs)\r\nmodel = tf.keras.Model(inputs, outputs)\r\n```\r\n\r\n**Other info / logs**\r\n\r\n```\r\nWARNING:tensorflow:From /opt/anaconda/lib/python3.6/site-packages/tensorflow_addons/layers/wrappers.py:84: Layer.add_variable (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\r\nInstructions for updating:\r\nPlease use `layer.add_weight` method instead.\r\nTraceback (most recent call last):\r\n File \"/workarea/projects/magic/MagIC/applications/temporal/app_test.py\", line 11, in <module>\r\n outputs = tf.keras.layers.TimeDistributed(b)(inputs)\r\n File \"/opt/anaconda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py\", line 842, in __call__\r\n outputs = call_fn(cast_inputs, *args, **kwargs)\r\n File \"/opt/anaconda/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/wrappers.py\", line 254, in call\r\n y = self.layer(inputs, **kwargs)\r\n File \"/opt/anaconda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py\", line 812, in __call__\r\n self.name)\r\n File \"/opt/anaconda/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/input_spec.py\", line 224, in assert_input_compatibility\r\n ', found shape=' + str(shape))\r\nValueError: Input 0 is incompatible with layer weight_normalization: expected shape=[32, 64, 64, 3], found shape=[512, 64, 64, 3]\r\n```\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\nclass WeightNormalization(tf.keras.layers.Wrapper):\n \"\"\"This wrapper reparameterizes a layer by decoupling the weight's\n magnitude and direction.\n\n This speeds up convergence by improving the\n conditioning of the optimization problem.\n Weight Normalization: A Simple Reparameterization to Accelerate\n Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868\n Tim Salimans, Diederik P. Kingma (2016)\n WeightNormalization wrapper works for keras and tf layers.\n ```python\n net = WeightNormalization(\n tf.keras.layers.Conv2D(2, 2, activation='relu'),\n input_shape=(32, 32, 3),\n data_init=True)(x)\n net = WeightNormalization(\n tf.keras.layers.Conv2D(16, 5, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(120, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(n_classes),\n data_init=True)(net)\n ```\n Arguments:\n layer: a layer instance.\n data_init: If `True` use data dependent variable initialization\n Raises:\n ValueError: If not initialized with a `Layer` instance.\n ValueError: If `Layer` does not contain a `kernel` of weights\n NotImplementedError: If `data_init` is True and running graph execution\n \"\"\"\n\n def __init__(self, layer, data_init=True, **kwargs):\n super(WeightNormalization, self).__init__(layer, **kwargs)\n self.data_init = data_init\n self._track_trackable(layer, name='layer')\n\n def build(self, input_shape):\n \"\"\"Build `Layer`\"\"\"\n input_shape = tf.TensorShape(input_shape).as_list()\n self.input_spec = tf.keras.layers.InputSpec(shape=input_shape)\n\n if not self.layer.built:\n self.layer.build(input_shape)\n\n if not hasattr(self.layer, 'kernel'):\n raise ValueError('`WeightNormalization` must wrap a layer that'\n ' contains a `kernel` for weights')\n\n # The kernel's filter or unit dimension is -1\n self.layer_depth = int(self.layer.kernel.shape[-1])\n self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1))\n\n self.g = self.add_weight(\n name='g',\n shape=(self.layer_depth,),\n initializer='ones',\n dtype=self.layer.kernel.dtype,\n trainable=True)\n self.v = self.layer.kernel\n\n self._initialized = self.add_weight(\n name='initialized',\n shape=None,\n initializer='zeros',\n dtype=tf.dtypes.bool,\n trainable=False)\n\n if self.data_init:\n # Used for data initialization in self._data_dep_init.\n layer_config = tf.keras.layers.serialize(self.layer)\n layer_config['config']['trainable'] = False\n self._naked_clone_layer = tf.keras.layers.deserialize(layer_config)\n self._naked_clone_layer.build(input_shape)\n self._naked_clone_layer.set_weights(self.layer.get_weights())\n self._naked_clone_layer.activation = None\n\n self.built = True\n\n def call(self, inputs):\n \"\"\"Call `Layer`\"\"\"\n\n def _do_nothing():\n return tf.identity(self.g)\n\n def _update_weights():\n # Ensure we read `self.g` after _update_weights.\n with tf.control_dependencies(self._initialize_weights(inputs)):\n return tf.identity(self.g)\n\n g = tf.cond(self._initialized, _do_nothing, _update_weights)\n\n with tf.name_scope('compute_weights'):\n # Replace kernel by normalized weight variable.\n self.layer.kernel = tf.nn.l2_normalize(\n self.v, axis=self.kernel_norm_axes) * g\n\n # Ensure we calculate result after updating kernel.\n update_kernel = tf.identity(self.layer.kernel)\n with tf.control_dependencies([update_kernel]):\n outputs = self.layer(inputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return tf.TensorShape(\n self.layer.compute_output_shape(input_shape).as_list())\n\n def _initialize_weights(self, inputs):\n \"\"\"Initialize weight g.\n\n The initial value of g could either from the initial value in v,\n or by the input value if self.data_init is True.\n \"\"\"\n with tf.control_dependencies([\n tf.debugging.assert_equal( # pylint: disable=bad-continuation\n self._initialized,\n False,\n message='The layer has been initialized.')\n ]):\n if self.data_init:\n assign_tensors = self._data_dep_init(inputs)\n else:\n assign_tensors = self._init_norm()\n assign_tensors.append(self._initialized.assign(True))\n return assign_tensors\n\n def _init_norm(self):\n \"\"\"Set the weight g with the norm of the weight vector.\"\"\"\n with tf.name_scope('init_norm'):\n v_flat = tf.reshape(self.v, [-1, self.layer_depth])\n v_norm = tf.linalg.norm(v_flat, axis=0)\n g_tensor = self.g.assign(tf.reshape(v_norm, (self.layer_depth,)))\n return [g_tensor]\n\n def _data_dep_init(self, inputs):\n \"\"\"Data dependent initialization.\"\"\"\n with tf.name_scope('data_dep_init'):\n # Generate data dependent init values\n x_init = self._naked_clone_layer(inputs)\n data_norm_axes = list(range(x_init.shape.rank - 1))\n m_init, v_init = tf.nn.moments(x_init, data_norm_axes)\n scale_init = 1. / tf.math.sqrt(v_init + 1e-10)\n\n # Assign data dependent init values\n g_tensor = self.g.assign(self.g * scale_init)\n if hasattr(self.layer, 'bias'):\n bias_tensor = self.layer.bias.assign(-m_init * scale_init)\n return [g_tensor, bias_tensor]\n else:\n return [g_tensor]\n\n def get_config(self):\n config = {'data_init': self.data_init}\n base_config = super(WeightNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "tensorflow_addons/layers/wrappers.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\nclass WeightNormalization(tf.keras.layers.Wrapper):\n \"\"\"This wrapper reparameterizes a layer by decoupling the weight's\n magnitude and direction.\n\n This speeds up convergence by improving the\n conditioning of the optimization problem.\n Weight Normalization: A Simple Reparameterization to Accelerate\n Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868\n Tim Salimans, Diederik P. Kingma (2016)\n WeightNormalization wrapper works for keras and tf layers.\n ```python\n net = WeightNormalization(\n tf.keras.layers.Conv2D(2, 2, activation='relu'),\n input_shape=(32, 32, 3),\n data_init=True)(x)\n net = WeightNormalization(\n tf.keras.layers.Conv2D(16, 5, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(120, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(n_classes),\n data_init=True)(net)\n ```\n Arguments:\n layer: a layer instance.\n data_init: If `True` use data dependent variable initialization\n Raises:\n ValueError: If not initialized with a `Layer` instance.\n ValueError: If `Layer` does not contain a `kernel` of weights\n NotImplementedError: If `data_init` is True and running graph execution\n \"\"\"\n\n def __init__(self, layer, data_init=True, **kwargs):\n super(WeightNormalization, self).__init__(layer, **kwargs)\n self.data_init = data_init\n self._track_trackable(layer, name='layer')\n\n def build(self, input_shape):\n \"\"\"Build `Layer`\"\"\"\n input_shape = tf.TensorShape(input_shape).as_list()\n self.input_spec = tf.keras.layers.InputSpec(\n shape=[None] + input_shape[1:])\n\n if not self.layer.built:\n self.layer.build(input_shape)\n\n if not hasattr(self.layer, 'kernel'):\n raise ValueError('`WeightNormalization` must wrap a layer that'\n ' contains a `kernel` for weights')\n\n # The kernel's filter or unit dimension is -1\n self.layer_depth = int(self.layer.kernel.shape[-1])\n self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1))\n\n self.g = self.add_weight(\n name='g',\n shape=(self.layer_depth,),\n initializer='ones',\n dtype=self.layer.kernel.dtype,\n trainable=True)\n self.v = self.layer.kernel\n\n self._initialized = self.add_weight(\n name='initialized',\n shape=None,\n initializer='zeros',\n dtype=tf.dtypes.bool,\n trainable=False)\n\n if self.data_init:\n # Used for data initialization in self._data_dep_init.\n layer_config = tf.keras.layers.serialize(self.layer)\n layer_config['config']['trainable'] = False\n self._naked_clone_layer = tf.keras.layers.deserialize(layer_config)\n self._naked_clone_layer.build(input_shape)\n self._naked_clone_layer.set_weights(self.layer.get_weights())\n self._naked_clone_layer.activation = None\n\n self.built = True\n\n def call(self, inputs):\n \"\"\"Call `Layer`\"\"\"\n\n def _do_nothing():\n return tf.identity(self.g)\n\n def _update_weights():\n # Ensure we read `self.g` after _update_weights.\n with tf.control_dependencies(self._initialize_weights(inputs)):\n return tf.identity(self.g)\n\n g = tf.cond(self._initialized, _do_nothing, _update_weights)\n\n with tf.name_scope('compute_weights'):\n # Replace kernel by normalized weight variable.\n self.layer.kernel = tf.nn.l2_normalize(\n self.v, axis=self.kernel_norm_axes) * g\n\n # Ensure we calculate result after updating kernel.\n update_kernel = tf.identity(self.layer.kernel)\n with tf.control_dependencies([update_kernel]):\n outputs = self.layer(inputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return tf.TensorShape(\n self.layer.compute_output_shape(input_shape).as_list())\n\n def _initialize_weights(self, inputs):\n \"\"\"Initialize weight g.\n\n The initial value of g could either from the initial value in v,\n or by the input value if self.data_init is True.\n \"\"\"\n with tf.control_dependencies([\n tf.debugging.assert_equal( # pylint: disable=bad-continuation\n self._initialized,\n False,\n message='The layer has been initialized.')\n ]):\n if self.data_init:\n assign_tensors = self._data_dep_init(inputs)\n else:\n assign_tensors = self._init_norm()\n assign_tensors.append(self._initialized.assign(True))\n return assign_tensors\n\n def _init_norm(self):\n \"\"\"Set the weight g with the norm of the weight vector.\"\"\"\n with tf.name_scope('init_norm'):\n v_flat = tf.reshape(self.v, [-1, self.layer_depth])\n v_norm = tf.linalg.norm(v_flat, axis=0)\n g_tensor = self.g.assign(tf.reshape(v_norm, (self.layer_depth,)))\n return [g_tensor]\n\n def _data_dep_init(self, inputs):\n \"\"\"Data dependent initialization.\"\"\"\n with tf.name_scope('data_dep_init'):\n # Generate data dependent init values\n x_init = self._naked_clone_layer(inputs)\n data_norm_axes = list(range(x_init.shape.rank - 1))\n m_init, v_init = tf.nn.moments(x_init, data_norm_axes)\n scale_init = 1. / tf.math.sqrt(v_init + 1e-10)\n\n # Assign data dependent init values\n g_tensor = self.g.assign(self.g * scale_init)\n if hasattr(self.layer, 'bias'):\n bias_tensor = self.layer.bias.assign(-m_init * scale_init)\n return [g_tensor, bias_tensor]\n else:\n return [g_tensor]\n\n def get_config(self):\n config = {'data_init': self.data_init}\n base_config = super(WeightNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "tensorflow_addons/layers/wrappers.py"}]} | 2,920 | 151 |
gh_patches_debug_31532 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-3889 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improving documentation for Set2Set layer
### 📚 Describe the documentation issue
I am new to `pytorch_geometric` ecosystem and I was exploring it. At the first glance to the `Set2Set` layer in the [docs](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.glob.Set2Set), it is not clear what the inputs `x` and `batch` are to the forward pass.
If I am not wrong, `x` represents the node features of the graph and `batch` represents a mapping between the node features to their graph identifiers.
### Suggest a potential alternative/fix
I was wondering whether it will be good to include it to the docs or maybe also add typing.
Potential fix in `nn.glob.set2set.py`:
```
def forward(self, x: torch.Tensor, batch: torch.Tensor):
r"""
Args:
x: The input node features.
batch: A one dimension tensor representing a mapping between nodes and their graphs
"""
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/nn/glob/set2set.py`
Content:
```
1 import torch
2 from torch_scatter import scatter_add
3 from torch_geometric.utils import softmax
4
5
6 class Set2Set(torch.nn.Module):
7 r"""The global pooling operator based on iterative content-based attention
8 from the `"Order Matters: Sequence to sequence for sets"
9 <https://arxiv.org/abs/1511.06391>`_ paper
10
11 .. math::
12 \mathbf{q}_t &= \mathrm{LSTM}(\mathbf{q}^{*}_{t-1})
13
14 \alpha_{i,t} &= \mathrm{softmax}(\mathbf{x}_i \cdot \mathbf{q}_t)
15
16 \mathbf{r}_t &= \sum_{i=1}^N \alpha_{i,t} \mathbf{x}_i
17
18 \mathbf{q}^{*}_t &= \mathbf{q}_t \, \Vert \, \mathbf{r}_t,
19
20 where :math:`\mathbf{q}^{*}_T` defines the output of the layer with twice
21 the dimensionality as the input.
22
23 Args:
24 in_channels (int): Size of each input sample.
25 processing_steps (int): Number of iterations :math:`T`.
26 num_layers (int, optional): Number of recurrent layers, *.e.g*, setting
27 :obj:`num_layers=2` would mean stacking two LSTMs together to form
28 a stacked LSTM, with the second LSTM taking in outputs of the first
29 LSTM and computing the final results. (default: :obj:`1`)
30 """
31 def __init__(self, in_channels, processing_steps, num_layers=1):
32 super().__init__()
33
34 self.in_channels = in_channels
35 self.out_channels = 2 * in_channels
36 self.processing_steps = processing_steps
37 self.num_layers = num_layers
38
39 self.lstm = torch.nn.LSTM(self.out_channels, self.in_channels,
40 num_layers)
41
42 self.reset_parameters()
43
44 def reset_parameters(self):
45 self.lstm.reset_parameters()
46
47 def forward(self, x, batch):
48 """"""
49 batch_size = batch.max().item() + 1
50
51 h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),
52 x.new_zeros((self.num_layers, batch_size, self.in_channels)))
53 q_star = x.new_zeros(batch_size, self.out_channels)
54
55 for _ in range(self.processing_steps):
56 q, h = self.lstm(q_star.unsqueeze(0), h)
57 q = q.view(batch_size, self.in_channels)
58 e = (x * q.index_select(0, batch)).sum(dim=-1, keepdim=True)
59 a = softmax(e, batch, num_nodes=batch_size)
60 r = scatter_add(a * x, batch, dim=0, dim_size=batch_size)
61 q_star = torch.cat([q, r], dim=-1)
62
63 return q_star
64
65 def __repr__(self) -> str:
66 return (f'{self.__class__.__name__}({self.in_channels}, '
67 f'{self.out_channels})')
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch_geometric/nn/glob/set2set.py b/torch_geometric/nn/glob/set2set.py
--- a/torch_geometric/nn/glob/set2set.py
+++ b/torch_geometric/nn/glob/set2set.py
@@ -1,5 +1,9 @@
+from typing import Optional
+
import torch
+from torch import Tensor
from torch_scatter import scatter_add
+
from torch_geometric.utils import softmax
@@ -27,8 +31,17 @@
:obj:`num_layers=2` would mean stacking two LSTMs together to form
a stacked LSTM, with the second LSTM taking in outputs of the first
LSTM and computing the final results. (default: :obj:`1`)
+
+ Shapes:
+ - **input:**
+ node features :math:`(|\mathcal{V}|, F)`,
+ batch vector :math:`(|\mathcal{V}|)` *(optional)*
+ - **output:**
+ set features :math:`(|\mathcal{G}|, 2 * F)` where
+ :math:`|\mathcal{G}|` denotes the number of graphs in the batch
"""
- def __init__(self, in_channels, processing_steps, num_layers=1):
+ def __init__(self, in_channels: int, processing_steps: int,
+ num_layers: int = 1):
super().__init__()
self.in_channels = in_channels
@@ -44,8 +57,16 @@
def reset_parameters(self):
self.lstm.reset_parameters()
- def forward(self, x, batch):
- """"""
+ def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor:
+ r"""
+ Args:
+ x (Tensor): The input node features.
+ batch (LongTensor, optional): A vector that maps each node to its
+ respective graph identifier. (default: :obj:`None`)
+ """
+ if batch is None:
+ batch = x.new_zeros(x.size(0), dtype=torch.int64)
+
batch_size = batch.max().item() + 1
h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),
| {"golden_diff": "diff --git a/torch_geometric/nn/glob/set2set.py b/torch_geometric/nn/glob/set2set.py\n--- a/torch_geometric/nn/glob/set2set.py\n+++ b/torch_geometric/nn/glob/set2set.py\n@@ -1,5 +1,9 @@\n+from typing import Optional\n+\n import torch\n+from torch import Tensor\n from torch_scatter import scatter_add\n+\n from torch_geometric.utils import softmax\n \n \n@@ -27,8 +31,17 @@\n :obj:`num_layers=2` would mean stacking two LSTMs together to form\n a stacked LSTM, with the second LSTM taking in outputs of the first\n LSTM and computing the final results. (default: :obj:`1`)\n+\n+ Shapes:\n+ - **input:**\n+ node features :math:`(|\\mathcal{V}|, F)`,\n+ batch vector :math:`(|\\mathcal{V}|)` *(optional)*\n+ - **output:**\n+ set features :math:`(|\\mathcal{G}|, 2 * F)` where\n+ :math:`|\\mathcal{G}|` denotes the number of graphs in the batch\n \"\"\"\n- def __init__(self, in_channels, processing_steps, num_layers=1):\n+ def __init__(self, in_channels: int, processing_steps: int,\n+ num_layers: int = 1):\n super().__init__()\n \n self.in_channels = in_channels\n@@ -44,8 +57,16 @@\n def reset_parameters(self):\n self.lstm.reset_parameters()\n \n- def forward(self, x, batch):\n- \"\"\"\"\"\"\n+ def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor:\n+ r\"\"\"\n+ Args:\n+ x (Tensor): The input node features.\n+ batch (LongTensor, optional): A vector that maps each node to its\n+ respective graph identifier. (default: :obj:`None`)\n+ \"\"\"\n+ if batch is None:\n+ batch = x.new_zeros(x.size(0), dtype=torch.int64)\n+\n batch_size = batch.max().item() + 1\n \n h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),\n", "issue": "Improving documentation for Set2Set layer\n### \ud83d\udcda Describe the documentation issue\n\nI am new to `pytorch_geometric` ecosystem and I was exploring it. At the first glance to the `Set2Set` layer in the [docs](https://pytorch-geometric.readthedocs.io/en/latest/modules/nn.html#torch_geometric.nn.glob.Set2Set), it is not clear what the inputs `x` and `batch` are to the forward pass.\r\n\r\nIf I am not wrong, `x` represents the node features of the graph and `batch` represents a mapping between the node features to their graph identifiers.\r\n\n\n### Suggest a potential alternative/fix\n\nI was wondering whether it will be good to include it to the docs or maybe also add typing.\r\nPotential fix in `nn.glob.set2set.py`:\r\n```\r\ndef forward(self, x: torch.Tensor, batch: torch.Tensor):\r\n r\"\"\"\r\n Args:\r\n x: The input node features.\r\n batch: A one dimension tensor representing a mapping between nodes and their graphs\r\n \"\"\"\r\n```\n", "before_files": [{"content": "import torch\nfrom torch_scatter import scatter_add\nfrom torch_geometric.utils import softmax\n\n\nclass Set2Set(torch.nn.Module):\n r\"\"\"The global pooling operator based on iterative content-based attention\n from the `\"Order Matters: Sequence to sequence for sets\"\n <https://arxiv.org/abs/1511.06391>`_ paper\n\n .. math::\n \\mathbf{q}_t &= \\mathrm{LSTM}(\\mathbf{q}^{*}_{t-1})\n\n \\alpha_{i,t} &= \\mathrm{softmax}(\\mathbf{x}_i \\cdot \\mathbf{q}_t)\n\n \\mathbf{r}_t &= \\sum_{i=1}^N \\alpha_{i,t} \\mathbf{x}_i\n\n \\mathbf{q}^{*}_t &= \\mathbf{q}_t \\, \\Vert \\, \\mathbf{r}_t,\n\n where :math:`\\mathbf{q}^{*}_T` defines the output of the layer with twice\n the dimensionality as the input.\n\n Args:\n in_channels (int): Size of each input sample.\n processing_steps (int): Number of iterations :math:`T`.\n num_layers (int, optional): Number of recurrent layers, *.e.g*, setting\n :obj:`num_layers=2` would mean stacking two LSTMs together to form\n a stacked LSTM, with the second LSTM taking in outputs of the first\n LSTM and computing the final results. (default: :obj:`1`)\n \"\"\"\n def __init__(self, in_channels, processing_steps, num_layers=1):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = 2 * in_channels\n self.processing_steps = processing_steps\n self.num_layers = num_layers\n\n self.lstm = torch.nn.LSTM(self.out_channels, self.in_channels,\n num_layers)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lstm.reset_parameters()\n\n def forward(self, x, batch):\n \"\"\"\"\"\"\n batch_size = batch.max().item() + 1\n\n h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),\n x.new_zeros((self.num_layers, batch_size, self.in_channels)))\n q_star = x.new_zeros(batch_size, self.out_channels)\n\n for _ in range(self.processing_steps):\n q, h = self.lstm(q_star.unsqueeze(0), h)\n q = q.view(batch_size, self.in_channels)\n e = (x * q.index_select(0, batch)).sum(dim=-1, keepdim=True)\n a = softmax(e, batch, num_nodes=batch_size)\n r = scatter_add(a * x, batch, dim=0, dim_size=batch_size)\n q_star = torch.cat([q, r], dim=-1)\n\n return q_star\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}({self.in_channels}, '\n f'{self.out_channels})')\n", "path": "torch_geometric/nn/glob/set2set.py"}], "after_files": [{"content": "from typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom torch_scatter import scatter_add\n\nfrom torch_geometric.utils import softmax\n\n\nclass Set2Set(torch.nn.Module):\n r\"\"\"The global pooling operator based on iterative content-based attention\n from the `\"Order Matters: Sequence to sequence for sets\"\n <https://arxiv.org/abs/1511.06391>`_ paper\n\n .. math::\n \\mathbf{q}_t &= \\mathrm{LSTM}(\\mathbf{q}^{*}_{t-1})\n\n \\alpha_{i,t} &= \\mathrm{softmax}(\\mathbf{x}_i \\cdot \\mathbf{q}_t)\n\n \\mathbf{r}_t &= \\sum_{i=1}^N \\alpha_{i,t} \\mathbf{x}_i\n\n \\mathbf{q}^{*}_t &= \\mathbf{q}_t \\, \\Vert \\, \\mathbf{r}_t,\n\n where :math:`\\mathbf{q}^{*}_T` defines the output of the layer with twice\n the dimensionality as the input.\n\n Args:\n in_channels (int): Size of each input sample.\n processing_steps (int): Number of iterations :math:`T`.\n num_layers (int, optional): Number of recurrent layers, *.e.g*, setting\n :obj:`num_layers=2` would mean stacking two LSTMs together to form\n a stacked LSTM, with the second LSTM taking in outputs of the first\n LSTM and computing the final results. (default: :obj:`1`)\n\n Shapes:\n - **input:**\n node features :math:`(|\\mathcal{V}|, F)`,\n batch vector :math:`(|\\mathcal{V}|)` *(optional)*\n - **output:**\n set features :math:`(|\\mathcal{G}|, 2 * F)` where\n :math:`|\\mathcal{G}|` denotes the number of graphs in the batch\n \"\"\"\n def __init__(self, in_channels: int, processing_steps: int,\n num_layers: int = 1):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = 2 * in_channels\n self.processing_steps = processing_steps\n self.num_layers = num_layers\n\n self.lstm = torch.nn.LSTM(self.out_channels, self.in_channels,\n num_layers)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lstm.reset_parameters()\n\n def forward(self, x: Tensor, batch: Optional[Tensor] = None) -> Tensor:\n r\"\"\"\n Args:\n x (Tensor): The input node features.\n batch (LongTensor, optional): A vector that maps each node to its\n respective graph identifier. (default: :obj:`None`)\n \"\"\"\n if batch is None:\n batch = x.new_zeros(x.size(0), dtype=torch.int64)\n\n batch_size = batch.max().item() + 1\n\n h = (x.new_zeros((self.num_layers, batch_size, self.in_channels)),\n x.new_zeros((self.num_layers, batch_size, self.in_channels)))\n q_star = x.new_zeros(batch_size, self.out_channels)\n\n for _ in range(self.processing_steps):\n q, h = self.lstm(q_star.unsqueeze(0), h)\n q = q.view(batch_size, self.in_channels)\n e = (x * q.index_select(0, batch)).sum(dim=-1, keepdim=True)\n a = softmax(e, batch, num_nodes=batch_size)\n r = scatter_add(a * x, batch, dim=0, dim_size=batch_size)\n q_star = torch.cat([q, r], dim=-1)\n\n return q_star\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}({self.in_channels}, '\n f'{self.out_channels})')\n", "path": "torch_geometric/nn/glob/set2set.py"}]} | 1,294 | 507 |
gh_patches_debug_18939 | rasdani/github-patches | git_diff | TileDB-Inc__TileDB-Py-1639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nightly Azure Wheel Fail on Fri, February 3rd 2023
See run for more details:
https://dev.azure.com/TileDB-Inc/CI/_build/results?buildId=$&view=results
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/config.py`
Content:
```
1 # config.py
2 #
3 # LICENSE
4 #
5 # The MIT License
6 #
7 # Copyright (c) 2020 TileDB, Inc.
8 #
9 # Permission is hereby granted, free of charge, to any person obtaining a copy
10 # of this software and associated documentation files (the "Software"), to deal
11 # in the Software without restriction, including without limitation the rights
12 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 # copies of the Software, and to permit persons to whom the Software is
14 # furnished to do so, subject to the following conditions:
15 #
16 # The above copyright notice and this permission notice shall be included in
17 # all copies or substantial portions of the Software.
18 #
19 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 # THE SOFTWARE.
26 #
27 # DESCRIPTION
28 #
29 # Please see the TileDB documentation for more information:
30 # https://docs.tiledb.com/main/how-to/configuration
31 #
32 # This program shows how to set/get the TileDB configuration parameters.
33 #
34
35 import tiledb
36
37
38 def set_get_config_ctx_vfs():
39 # Create config object
40 config = tiledb.Config()
41
42 # Set/get config to/from ctx
43 ctx = tiledb.Ctx(config)
44 print(ctx.config())
45
46 # Set/get config to/from VFS
47 vfs = tiledb.VFS(config)
48 print(vfs.config())
49
50
51 def set_get_config():
52 config = tiledb.Config()
53
54 # Set value
55 config["vfs.s3.connect_timeout_ms"] = 5000
56
57 # Get value
58 tile_cache_size = config["sm.tile_cache_size"]
59 print("Tile cache size: %s" % str(tile_cache_size))
60
61
62 def print_default():
63 config = tiledb.Config()
64 print("\nDefault settings:")
65 for p in config.items():
66 print('"%s" : "%s"' % (p[0], p[1]))
67
68
69 def iter_config_with_prefix():
70 config = tiledb.Config()
71 # Print only the S3 settings.
72 print("\nVFS S3 settings:")
73 for p in config.items("vfs.s3."):
74 print('"%s" : "%s"' % (p[0], p[1]))
75
76
77 def save_load_config():
78 # Save to file
79 config = tiledb.Config()
80 config["sm.tile_cache_size"] = 0
81 config.save("tiledb_config.txt")
82
83 # Load from file
84 config_load = tiledb.Config.load("tiledb_config.txt")
85 print(
86 "\nTile cache size after loading from file: %s"
87 % str(config_load["sm.tile_cache_size"])
88 )
89
90
91 set_get_config_ctx_vfs()
92 set_get_config()
93 print_default()
94 iter_config_with_prefix()
95 save_load_config()
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/config.py b/examples/config.py
--- a/examples/config.py
+++ b/examples/config.py
@@ -55,8 +55,8 @@
config["vfs.s3.connect_timeout_ms"] = 5000
# Get value
- tile_cache_size = config["sm.tile_cache_size"]
- print("Tile cache size: %s" % str(tile_cache_size))
+ tile_cache_size = config["sm.memory_budget"]
+ print("Memory budget: %s" % str(tile_cache_size))
def print_default():
@@ -77,14 +77,14 @@
def save_load_config():
# Save to file
config = tiledb.Config()
- config["sm.tile_cache_size"] = 0
+ config["sm.memory_budget"] = 1234
config.save("tiledb_config.txt")
# Load from file
config_load = tiledb.Config.load("tiledb_config.txt")
print(
"\nTile cache size after loading from file: %s"
- % str(config_load["sm.tile_cache_size"])
+ % str(config_load["sm.memory_budget"])
)
| {"golden_diff": "diff --git a/examples/config.py b/examples/config.py\n--- a/examples/config.py\n+++ b/examples/config.py\n@@ -55,8 +55,8 @@\n config[\"vfs.s3.connect_timeout_ms\"] = 5000\n \n # Get value\n- tile_cache_size = config[\"sm.tile_cache_size\"]\n- print(\"Tile cache size: %s\" % str(tile_cache_size))\n+ tile_cache_size = config[\"sm.memory_budget\"]\n+ print(\"Memory budget: %s\" % str(tile_cache_size))\n \n \n def print_default():\n@@ -77,14 +77,14 @@\n def save_load_config():\n # Save to file\n config = tiledb.Config()\n- config[\"sm.tile_cache_size\"] = 0\n+ config[\"sm.memory_budget\"] = 1234\n config.save(\"tiledb_config.txt\")\n \n # Load from file\n config_load = tiledb.Config.load(\"tiledb_config.txt\")\n print(\n \"\\nTile cache size after loading from file: %s\"\n- % str(config_load[\"sm.tile_cache_size\"])\n+ % str(config_load[\"sm.memory_budget\"])\n )\n", "issue": "Nightly Azure Wheel Fail on Fri, February 3rd 2023\nSee run for more details:\nhttps://dev.azure.com/TileDB-Inc/CI/_build/results?buildId=$&view=results\n", "before_files": [{"content": "# config.py\n#\n# LICENSE\n#\n# The MIT License\n#\n# Copyright (c) 2020 TileDB, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# DESCRIPTION\n#\n# Please see the TileDB documentation for more information:\n# https://docs.tiledb.com/main/how-to/configuration\n#\n# This program shows how to set/get the TileDB configuration parameters.\n#\n\nimport tiledb\n\n\ndef set_get_config_ctx_vfs():\n # Create config object\n config = tiledb.Config()\n\n # Set/get config to/from ctx\n ctx = tiledb.Ctx(config)\n print(ctx.config())\n\n # Set/get config to/from VFS\n vfs = tiledb.VFS(config)\n print(vfs.config())\n\n\ndef set_get_config():\n config = tiledb.Config()\n\n # Set value\n config[\"vfs.s3.connect_timeout_ms\"] = 5000\n\n # Get value\n tile_cache_size = config[\"sm.tile_cache_size\"]\n print(\"Tile cache size: %s\" % str(tile_cache_size))\n\n\ndef print_default():\n config = tiledb.Config()\n print(\"\\nDefault settings:\")\n for p in config.items():\n print('\"%s\" : \"%s\"' % (p[0], p[1]))\n\n\ndef iter_config_with_prefix():\n config = tiledb.Config()\n # Print only the S3 settings.\n print(\"\\nVFS S3 settings:\")\n for p in config.items(\"vfs.s3.\"):\n print('\"%s\" : \"%s\"' % (p[0], p[1]))\n\n\ndef save_load_config():\n # Save to file\n config = tiledb.Config()\n config[\"sm.tile_cache_size\"] = 0\n config.save(\"tiledb_config.txt\")\n\n # Load from file\n config_load = tiledb.Config.load(\"tiledb_config.txt\")\n print(\n \"\\nTile cache size after loading from file: %s\"\n % str(config_load[\"sm.tile_cache_size\"])\n )\n\n\nset_get_config_ctx_vfs()\nset_get_config()\nprint_default()\niter_config_with_prefix()\nsave_load_config()\n", "path": "examples/config.py"}], "after_files": [{"content": "# config.py\n#\n# LICENSE\n#\n# The MIT License\n#\n# Copyright (c) 2020 TileDB, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# DESCRIPTION\n#\n# Please see the TileDB documentation for more information:\n# https://docs.tiledb.com/main/how-to/configuration\n#\n# This program shows how to set/get the TileDB configuration parameters.\n#\n\nimport tiledb\n\n\ndef set_get_config_ctx_vfs():\n # Create config object\n config = tiledb.Config()\n\n # Set/get config to/from ctx\n ctx = tiledb.Ctx(config)\n print(ctx.config())\n\n # Set/get config to/from VFS\n vfs = tiledb.VFS(config)\n print(vfs.config())\n\n\ndef set_get_config():\n config = tiledb.Config()\n\n # Set value\n config[\"vfs.s3.connect_timeout_ms\"] = 5000\n\n # Get value\n tile_cache_size = config[\"sm.memory_budget\"]\n print(\"Memory budget: %s\" % str(tile_cache_size))\n\n\ndef print_default():\n config = tiledb.Config()\n print(\"\\nDefault settings:\")\n for p in config.items():\n print('\"%s\" : \"%s\"' % (p[0], p[1]))\n\n\ndef iter_config_with_prefix():\n config = tiledb.Config()\n # Print only the S3 settings.\n print(\"\\nVFS S3 settings:\")\n for p in config.items(\"vfs.s3.\"):\n print('\"%s\" : \"%s\"' % (p[0], p[1]))\n\n\ndef save_load_config():\n # Save to file\n config = tiledb.Config()\n config[\"sm.memory_budget\"] = 1234\n config.save(\"tiledb_config.txt\")\n\n # Load from file\n config_load = tiledb.Config.load(\"tiledb_config.txt\")\n print(\n \"\\nTile cache size after loading from file: %s\"\n % str(config_load[\"sm.memory_budget\"])\n )\n\n\nset_get_config_ctx_vfs()\nset_get_config()\nprint_default()\niter_config_with_prefix()\nsave_load_config()\n", "path": "examples/config.py"}]} | 1,162 | 253 |
gh_patches_debug_37263 | rasdani/github-patches | git_diff | fidals__shopelectro-725 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make SiteDriver a context manager
It will provide convenient api to manage selenium resources and properly shout down selenium driver.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/tasks.py`
Content:
```
1 from contextlib import contextmanager
2
3 from django.conf import settings
4 from django.core.management import call_command
5 from selenium.common.exceptions import WebDriverException
6
7 from shopelectro import selenium
8 from shopelectro.celery import app
9 from shopelectro.report import TelegramReport
10 from shopelectro.models import CategoryPage
11 from shopelectro.management.commands._update_catalog import utils
12
13
14 @contextmanager
15 def report():
16 try:
17 yield
18 except Exception as error:
19 utils.report(str(error))
20 raise error
21
22
23 @app.task
24 def generate_price_files():
25 with report():
26 call_command('price')
27 print('Generate prices complete.')
28
29
30 @app.task
31 def generate_excel_file():
32 with report():
33 call_command('excel')
34 print('Generate excel complete.')
35
36
37 @app.task
38 def collect_static():
39 with report():
40 call_command('collectstatic', '--noinput')
41
42
43 @app.task
44 def update_catalog_command():
45 with report():
46 call_command('update_catalog')
47
48
49 @app.task
50 def update_default_templates():
51 with report():
52 call_command('update_default_templates')
53
54
55 @app.task(autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)
56 def update_catalog():
57 # http://docs.celeryproject.org/en/latest/userguide/canvas.html#map-starmap
58 return [
59 update_catalog_command(),
60 update_default_templates(),
61 generate_price_files(),
62 generate_excel_file(),
63 collect_static()
64 ]
65
66 # @todo #690:30m Schedule check_purchase in the celery beat.
67
68
69 @app.task(
70 bind=True,
71 autoretry_for=(WebDriverException, AssertionError),
72 retry_kwargs={'max_retries': settings.CHECK_PURCHASE_RETRIES},
73 )
74 def check_purchase(self):
75 try:
76 driver = selenium.SiteDriver(site_url=settings.BASE_URL)
77 category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)
78 category_page.load()
79 category_page.add_to_cart()
80
81 order_page = selenium.OrderPage(driver)
82 order_page.load()
83 order_page.fill_contacts()
84 order_page.make_order()
85
86 success_page = selenium.SuccessPage(driver)
87 assert success_page.is_success()
88 except (WebDriverException, AssertionError) as err:
89 if self.request.retries + 1 > self.max_retries:
90 # report on the last attempt
91 TelegramReport().send(f'Can\'t buy a product. Got the error: {err}')
92 raise err
93
```
Path: `shopelectro/selenium/driver.py`
Content:
```
1 from urllib.parse import urljoin
2
3 from django.conf import settings
4 from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
5 from selenium.webdriver.support.ui import WebDriverWait
6 from seleniumrequests import Remote
7
8
9 class SiteDriver(Remote):
10 """Provide convenient access to the site."""
11
12 def __init__(self, *, site_url, **kwargs):
13 kwargs.setdefault('command_executor', settings.SELENIUM_URL)
14 kwargs.setdefault('desired_capabilities', DesiredCapabilities.CHROME)
15 super().__init__(**kwargs)
16
17 self.site_url = site_url
18 self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)
19
20 def get(self, url):
21 super().get(urljoin(self.site_url, url))
22
```
Path: `shopelectro/celery.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2 from datetime import timedelta
3 import os
4
5 from celery import Celery
6 from kombu import Exchange, Queue
7
8 # set the default Django settings module for the 'celery' program.
9 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')
10
11 app = Celery('shopelectro')
12
13 # Exchanges
14 default_exchange = Exchange('default', type='direct')
15 utils_exchange = Exchange('utils', type='direct')
16
17 # http://docs.celeryproject.org/en/latest/userguide/tasks.html
18 task_queues = (
19 Queue(
20 name='default',
21 exchange=default_exchange,
22 routing_key='default',
23 ),
24 Queue(
25 name='mail',
26 exchange=utils_exchange,
27 routing_key='utils.mail',
28 ),
29 Queue(
30 name='command',
31 exchange=utils_exchange,
32 routing_key='utils.command',
33 )
34 )
35
36 # http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html
37 beat_schedule = {
38 'update-catalog': {
39 'task': 'shopelectro.tasks.update_catalog',
40 'schedule': timedelta(hours=2),
41 },
42 }
43
44 # http://docs.celeryproject.org/en/master/userguide/routing.html
45 task_routes = {
46 'shopelectro.tasks.update_catalog': {
47 'queue': 'command',
48 'routing_key': 'utils.command',
49 'priority': 30,
50 },
51 'ecommerce.tasks.send_mail': {
52 'queue': 'mail',
53 'routing_key': 'utils.mail',
54 'priority': 50,
55 },
56 }
57
58 # Using a string here means the worker don't have to serialize
59 # the configuration object to child processes.
60 # - namespace='CELERY' means all celery-related configuration keys
61 # should have a `CELERY_` prefix.
62 app.config_from_object('django.conf:settings', namespace='CELERY')
63
64 # http://docs.celeryproject.org/en/latest/userguide/configuration.html
65
66 BROCKER_URL = (
67 f'amqp://{os.environ["RABBITMQ_DEFAULT_USER"]}:{os.environ["RABBITMQ_DEFAULT_PASS"]}'
68 f'@{os.environ["RABBITMQ_URL"]}:{os.environ["RABBITMQ_PORT"]}/'
69 )
70 app.conf.update(
71 broker_url=BROCKER_URL,
72 broker_heartbeat=30,
73 task_acks_late=True,
74 task_default_queue='default',
75 task_default_exchange='default',
76 task_default_routing_key='default',
77 task_ignore_result=True,
78 worker_pool_restarts=1000,
79 task_routes=task_routes,
80 task_queues=task_queues,
81 beat_schedule=beat_schedule,
82 )
83
84 # Load task modules from all registered Django app configs.
85 app.autodiscover_tasks()
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/celery.py b/shopelectro/celery.py
--- a/shopelectro/celery.py
+++ b/shopelectro/celery.py
@@ -37,7 +37,11 @@
beat_schedule = {
'update-catalog': {
'task': 'shopelectro.tasks.update_catalog',
- 'schedule': timedelta(hours=2),
+ 'schedule': timedelta(hours=2).total_seconds(),
+ },
+ 'check-purchase': {
+ 'task': 'shopelectro.tasks.check_purchase',
+ 'schedule': timedelta(days=1).total_seconds(),
},
}
@@ -48,6 +52,11 @@
'routing_key': 'utils.command',
'priority': 30,
},
+ 'shopelectro.tasks.check_purchase': {
+ 'queue': 'command',
+ 'routing_key': 'utils.command',
+ 'priority': 20,
+ },
'ecommerce.tasks.send_mail': {
'queue': 'mail',
'routing_key': 'utils.mail',
diff --git a/shopelectro/selenium/driver.py b/shopelectro/selenium/driver.py
--- a/shopelectro/selenium/driver.py
+++ b/shopelectro/selenium/driver.py
@@ -17,5 +17,11 @@
self.site_url = site_url
self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self.quit()
+
def get(self, url):
super().get(urljoin(self.site_url, url))
diff --git a/shopelectro/tasks.py b/shopelectro/tasks.py
--- a/shopelectro/tasks.py
+++ b/shopelectro/tasks.py
@@ -63,8 +63,6 @@
collect_static()
]
-# @todo #690:30m Schedule check_purchase in the celery beat.
-
@app.task(
bind=True,
@@ -73,18 +71,18 @@
)
def check_purchase(self):
try:
- driver = selenium.SiteDriver(site_url=settings.BASE_URL)
- category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)
- category_page.load()
- category_page.add_to_cart()
-
- order_page = selenium.OrderPage(driver)
- order_page.load()
- order_page.fill_contacts()
- order_page.make_order()
-
- success_page = selenium.SuccessPage(driver)
- assert success_page.is_success()
+ with selenium.SiteDriver(site_url=settings.BASE_URL) as driver:
+ category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)
+ category_page.load()
+ category_page.add_to_cart()
+
+ order_page = selenium.OrderPage(driver)
+ order_page.load()
+ order_page.fill_contacts()
+ order_page.make_order()
+
+ success_page = selenium.SuccessPage(driver)
+ assert success_page.is_success()
except (WebDriverException, AssertionError) as err:
if self.request.retries + 1 > self.max_retries:
# report on the last attempt
| {"golden_diff": "diff --git a/shopelectro/celery.py b/shopelectro/celery.py\n--- a/shopelectro/celery.py\n+++ b/shopelectro/celery.py\n@@ -37,7 +37,11 @@\n beat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n- 'schedule': timedelta(hours=2),\n+ 'schedule': timedelta(hours=2).total_seconds(),\n+ },\n+ 'check-purchase': {\n+ 'task': 'shopelectro.tasks.check_purchase',\n+ 'schedule': timedelta(days=1).total_seconds(),\n },\n }\n \n@@ -48,6 +52,11 @@\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n+ 'shopelectro.tasks.check_purchase': {\n+ 'queue': 'command',\n+ 'routing_key': 'utils.command',\n+ 'priority': 20,\n+ },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\ndiff --git a/shopelectro/selenium/driver.py b/shopelectro/selenium/driver.py\n--- a/shopelectro/selenium/driver.py\n+++ b/shopelectro/selenium/driver.py\n@@ -17,5 +17,11 @@\n self.site_url = site_url\n self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)\n \n+ def __enter__(self):\n+ return self\n+\n+ def __exit__(self, *args, **kwargs):\n+ self.quit()\n+\n def get(self, url):\n super().get(urljoin(self.site_url, url))\ndiff --git a/shopelectro/tasks.py b/shopelectro/tasks.py\n--- a/shopelectro/tasks.py\n+++ b/shopelectro/tasks.py\n@@ -63,8 +63,6 @@\n collect_static()\n ]\n \n-# @todo #690:30m Schedule check_purchase in the celery beat.\n-\n \n @app.task(\n bind=True,\n@@ -73,18 +71,18 @@\n )\n def check_purchase(self):\n try:\n- driver = selenium.SiteDriver(site_url=settings.BASE_URL)\n- category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n- category_page.load()\n- category_page.add_to_cart()\n-\n- order_page = selenium.OrderPage(driver)\n- order_page.load()\n- order_page.fill_contacts()\n- order_page.make_order()\n-\n- success_page = selenium.SuccessPage(driver)\n- assert success_page.is_success()\n+ with selenium.SiteDriver(site_url=settings.BASE_URL) as driver:\n+ category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n+ category_page.load()\n+ category_page.add_to_cart()\n+\n+ order_page = selenium.OrderPage(driver)\n+ order_page.load()\n+ order_page.fill_contacts()\n+ order_page.make_order()\n+\n+ success_page = selenium.SuccessPage(driver)\n+ assert success_page.is_success()\n except (WebDriverException, AssertionError) as err:\n if self.request.retries + 1 > self.max_retries:\n # report on the last attempt\n", "issue": "Make SiteDriver a context manager\nIt will provide convenient api to manage selenium resources and properly shout down selenium driver. \n", "before_files": [{"content": "from contextlib import contextmanager\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom selenium.common.exceptions import WebDriverException\n\nfrom shopelectro import selenium\nfrom shopelectro.celery import app\nfrom shopelectro.report import TelegramReport\nfrom shopelectro.models import CategoryPage\nfrom shopelectro.management.commands._update_catalog import utils\n\n\n@contextmanager\ndef report():\n try:\n yield\n except Exception as error:\n utils.report(str(error))\n raise error\n\n\[email protected]\ndef generate_price_files():\n with report():\n call_command('price')\n print('Generate prices complete.')\n\n\[email protected]\ndef generate_excel_file():\n with report():\n call_command('excel')\n print('Generate excel complete.')\n\n\[email protected]\ndef collect_static():\n with report():\n call_command('collectstatic', '--noinput')\n\n\[email protected]\ndef update_catalog_command():\n with report():\n call_command('update_catalog')\n\n\[email protected]\ndef update_default_templates():\n with report():\n call_command('update_default_templates')\n\n\[email protected](autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)\ndef update_catalog():\n # http://docs.celeryproject.org/en/latest/userguide/canvas.html#map-starmap\n return [\n update_catalog_command(),\n update_default_templates(),\n generate_price_files(),\n generate_excel_file(),\n collect_static()\n ]\n\n# @todo #690:30m Schedule check_purchase in the celery beat.\n\n\[email protected](\n bind=True,\n autoretry_for=(WebDriverException, AssertionError),\n retry_kwargs={'max_retries': settings.CHECK_PURCHASE_RETRIES},\n)\ndef check_purchase(self):\n try:\n driver = selenium.SiteDriver(site_url=settings.BASE_URL)\n category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n category_page.load()\n category_page.add_to_cart()\n\n order_page = selenium.OrderPage(driver)\n order_page.load()\n order_page.fill_contacts()\n order_page.make_order()\n\n success_page = selenium.SuccessPage(driver)\n assert success_page.is_success()\n except (WebDriverException, AssertionError) as err:\n if self.request.retries + 1 > self.max_retries:\n # report on the last attempt\n TelegramReport().send(f'Can\\'t buy a product. Got the error: {err}')\n raise err\n", "path": "shopelectro/tasks.py"}, {"content": "from urllib.parse import urljoin\n\nfrom django.conf import settings\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom seleniumrequests import Remote\n\n\nclass SiteDriver(Remote):\n \"\"\"Provide convenient access to the site.\"\"\"\n\n def __init__(self, *, site_url, **kwargs):\n kwargs.setdefault('command_executor', settings.SELENIUM_URL)\n kwargs.setdefault('desired_capabilities', DesiredCapabilities.CHROME)\n super().__init__(**kwargs)\n\n self.site_url = site_url\n self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)\n\n def get(self, url):\n super().get(urljoin(self.site_url, url))\n", "path": "shopelectro/selenium/driver.py"}, {"content": "from __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nimport os\n\nfrom celery import Celery\nfrom kombu import Exchange, Queue\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')\n\napp = Celery('shopelectro')\n\n# Exchanges\ndefault_exchange = Exchange('default', type='direct')\nutils_exchange = Exchange('utils', type='direct')\n\n# http://docs.celeryproject.org/en/latest/userguide/tasks.html\ntask_queues = (\n Queue(\n name='default',\n exchange=default_exchange,\n routing_key='default',\n ),\n Queue(\n name='mail',\n exchange=utils_exchange,\n routing_key='utils.mail',\n ),\n Queue(\n name='command',\n exchange=utils_exchange,\n routing_key='utils.command',\n )\n)\n\n# http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html\nbeat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n 'schedule': timedelta(hours=2),\n },\n}\n\n# http://docs.celeryproject.org/en/master/userguide/routing.html\ntask_routes = {\n 'shopelectro.tasks.update_catalog': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\n 'priority': 50,\n },\n}\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html\n\nBROCKER_URL = (\n f'amqp://{os.environ[\"RABBITMQ_DEFAULT_USER\"]}:{os.environ[\"RABBITMQ_DEFAULT_PASS\"]}'\n f'@{os.environ[\"RABBITMQ_URL\"]}:{os.environ[\"RABBITMQ_PORT\"]}/'\n)\napp.conf.update(\n broker_url=BROCKER_URL,\n broker_heartbeat=30,\n task_acks_late=True,\n task_default_queue='default',\n task_default_exchange='default',\n task_default_routing_key='default',\n task_ignore_result=True,\n worker_pool_restarts=1000,\n task_routes=task_routes,\n task_queues=task_queues,\n beat_schedule=beat_schedule,\n)\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n", "path": "shopelectro/celery.py"}], "after_files": [{"content": "from contextlib import contextmanager\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom selenium.common.exceptions import WebDriverException\n\nfrom shopelectro import selenium\nfrom shopelectro.celery import app\nfrom shopelectro.report import TelegramReport\nfrom shopelectro.models import CategoryPage\nfrom shopelectro.management.commands._update_catalog import utils\n\n\n@contextmanager\ndef report():\n try:\n yield\n except Exception as error:\n utils.report(str(error))\n raise error\n\n\[email protected]\ndef generate_price_files():\n with report():\n call_command('price')\n print('Generate prices complete.')\n\n\[email protected]\ndef generate_excel_file():\n with report():\n call_command('excel')\n print('Generate excel complete.')\n\n\[email protected]\ndef collect_static():\n with report():\n call_command('collectstatic', '--noinput')\n\n\[email protected]\ndef update_catalog_command():\n with report():\n call_command('update_catalog')\n\n\[email protected]\ndef update_default_templates():\n with report():\n call_command('update_default_templates')\n\n\[email protected](autoretry_for=(Exception,), max_retries=3, default_retry_delay=60*10) # Ignore PycodestyleBear (E226)\ndef update_catalog():\n # http://docs.celeryproject.org/en/latest/userguide/canvas.html#map-starmap\n return [\n update_catalog_command(),\n update_default_templates(),\n generate_price_files(),\n generate_excel_file(),\n collect_static()\n ]\n\n\[email protected](\n bind=True,\n autoretry_for=(WebDriverException, AssertionError),\n retry_kwargs={'max_retries': settings.CHECK_PURCHASE_RETRIES},\n)\ndef check_purchase(self):\n try:\n with selenium.SiteDriver(site_url=settings.BASE_URL) as driver:\n category_page = selenium.CategoryPage(driver, CategoryPage.objects.first().slug)\n category_page.load()\n category_page.add_to_cart()\n\n order_page = selenium.OrderPage(driver)\n order_page.load()\n order_page.fill_contacts()\n order_page.make_order()\n\n success_page = selenium.SuccessPage(driver)\n assert success_page.is_success()\n except (WebDriverException, AssertionError) as err:\n if self.request.retries + 1 > self.max_retries:\n # report on the last attempt\n TelegramReport().send(f'Can\\'t buy a product. Got the error: {err}')\n raise err\n", "path": "shopelectro/tasks.py"}, {"content": "from urllib.parse import urljoin\n\nfrom django.conf import settings\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom seleniumrequests import Remote\n\n\nclass SiteDriver(Remote):\n \"\"\"Provide convenient access to the site.\"\"\"\n\n def __init__(self, *, site_url, **kwargs):\n kwargs.setdefault('command_executor', settings.SELENIUM_URL)\n kwargs.setdefault('desired_capabilities', DesiredCapabilities.CHROME)\n super().__init__(**kwargs)\n\n self.site_url = site_url\n self.wait = WebDriverWait(self, settings.SELENIUM_WAIT_SECONDS)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args, **kwargs):\n self.quit()\n\n def get(self, url):\n super().get(urljoin(self.site_url, url))\n", "path": "shopelectro/selenium/driver.py"}, {"content": "from __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nimport os\n\nfrom celery import Celery\nfrom kombu import Exchange, Queue\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')\n\napp = Celery('shopelectro')\n\n# Exchanges\ndefault_exchange = Exchange('default', type='direct')\nutils_exchange = Exchange('utils', type='direct')\n\n# http://docs.celeryproject.org/en/latest/userguide/tasks.html\ntask_queues = (\n Queue(\n name='default',\n exchange=default_exchange,\n routing_key='default',\n ),\n Queue(\n name='mail',\n exchange=utils_exchange,\n routing_key='utils.mail',\n ),\n Queue(\n name='command',\n exchange=utils_exchange,\n routing_key='utils.command',\n )\n)\n\n# http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html\nbeat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n 'schedule': timedelta(hours=2).total_seconds(),\n },\n 'check-purchase': {\n 'task': 'shopelectro.tasks.check_purchase',\n 'schedule': timedelta(days=1).total_seconds(),\n },\n}\n\n# http://docs.celeryproject.org/en/master/userguide/routing.html\ntask_routes = {\n 'shopelectro.tasks.update_catalog': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n 'shopelectro.tasks.check_purchase': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 20,\n },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\n 'priority': 50,\n },\n}\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html\n\nBROCKER_URL = (\n f'amqp://{os.environ[\"RABBITMQ_DEFAULT_USER\"]}:{os.environ[\"RABBITMQ_DEFAULT_PASS\"]}'\n f'@{os.environ[\"RABBITMQ_URL\"]}:{os.environ[\"RABBITMQ_PORT\"]}/'\n)\napp.conf.update(\n broker_url=BROCKER_URL,\n broker_heartbeat=30,\n task_acks_late=True,\n task_default_queue='default',\n task_default_exchange='default',\n task_default_routing_key='default',\n task_ignore_result=True,\n worker_pool_restarts=1000,\n task_routes=task_routes,\n task_queues=task_queues,\n beat_schedule=beat_schedule,\n)\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n", "path": "shopelectro/celery.py"}]} | 1,991 | 715 |
gh_patches_debug_11568 | rasdani/github-patches | git_diff | pandas-dev__pandas-27300 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH: maybe_convert_objects seen NaT speed-up
I believe after `seen.object_ = 1` here:
https://github.com/pandas-dev/pandas/blob/c64c9cb44222a42f7b02d4d6007919cd0645f1be/pandas/_libs/lib.pyx#L1956-L1958
should go `break` as well.
Test:
```python
size = 10**7
arr = list(range(size))
arr[0] = pd.NaT
arr = np.array(arr)
```
```python
%timeit lib.maybe_convert_objects(arr, convert_datetime=0, convert_timedelta=0)
```
As now output is:
```python
1.84 s ± 14.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
```
With `break`:
```python
57.1 µs ± 887 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
```
And both cases produce same array:
```python
array([NaT, 1, 2, ..., 9999997, 9999998, 9999999], dtype=object)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `asv_bench/benchmarks/algorithms.py`
Content:
```
1 from importlib import import_module
2
3 import numpy as np
4
5 import pandas as pd
6 from pandas.util import testing as tm
7
8 for imp in ["pandas.util", "pandas.tools.hashing"]:
9 try:
10 hashing = import_module(imp)
11 break
12 except (ImportError, TypeError, ValueError):
13 pass
14
15
16 class Factorize:
17
18 params = [[True, False], ["int", "uint", "float", "string"]]
19 param_names = ["sort", "dtype"]
20
21 def setup(self, sort, dtype):
22 N = 10 ** 5
23 data = {
24 "int": pd.Int64Index(np.arange(N).repeat(5)),
25 "uint": pd.UInt64Index(np.arange(N).repeat(5)),
26 "float": pd.Float64Index(np.random.randn(N).repeat(5)),
27 "string": tm.makeStringIndex(N).repeat(5),
28 }
29 self.idx = data[dtype]
30
31 def time_factorize(self, sort, dtype):
32 self.idx.factorize(sort=sort)
33
34
35 class FactorizeUnique:
36
37 params = [[True, False], ["int", "uint", "float", "string"]]
38 param_names = ["sort", "dtype"]
39
40 def setup(self, sort, dtype):
41 N = 10 ** 5
42 data = {
43 "int": pd.Int64Index(np.arange(N)),
44 "uint": pd.UInt64Index(np.arange(N)),
45 "float": pd.Float64Index(np.arange(N)),
46 "string": tm.makeStringIndex(N),
47 }
48 self.idx = data[dtype]
49 assert self.idx.is_unique
50
51 def time_factorize(self, sort, dtype):
52 self.idx.factorize(sort=sort)
53
54
55 class Duplicated:
56
57 params = [["first", "last", False], ["int", "uint", "float", "string"]]
58 param_names = ["keep", "dtype"]
59
60 def setup(self, keep, dtype):
61 N = 10 ** 5
62 data = {
63 "int": pd.Int64Index(np.arange(N).repeat(5)),
64 "uint": pd.UInt64Index(np.arange(N).repeat(5)),
65 "float": pd.Float64Index(np.random.randn(N).repeat(5)),
66 "string": tm.makeStringIndex(N).repeat(5),
67 }
68 self.idx = data[dtype]
69 # cache is_unique
70 self.idx.is_unique
71
72 def time_duplicated(self, keep, dtype):
73 self.idx.duplicated(keep=keep)
74
75
76 class DuplicatedUniqueIndex:
77
78 params = ["int", "uint", "float", "string"]
79 param_names = ["dtype"]
80
81 def setup(self, dtype):
82 N = 10 ** 5
83 data = {
84 "int": pd.Int64Index(np.arange(N)),
85 "uint": pd.UInt64Index(np.arange(N)),
86 "float": pd.Float64Index(np.random.randn(N)),
87 "string": tm.makeStringIndex(N),
88 }
89 self.idx = data[dtype]
90 # cache is_unique
91 self.idx.is_unique
92
93 def time_duplicated_unique(self, dtype):
94 self.idx.duplicated()
95
96
97 class Hashing:
98 def setup_cache(self):
99 N = 10 ** 5
100
101 df = pd.DataFrame(
102 {
103 "strings": pd.Series(
104 tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=N))
105 ),
106 "floats": np.random.randn(N),
107 "ints": np.arange(N),
108 "dates": pd.date_range("20110101", freq="s", periods=N),
109 "timedeltas": pd.timedelta_range("1 day", freq="s", periods=N),
110 }
111 )
112 df["categories"] = df["strings"].astype("category")
113 df.iloc[10:20] = np.nan
114 return df
115
116 def time_frame(self, df):
117 hashing.hash_pandas_object(df)
118
119 def time_series_int(self, df):
120 hashing.hash_pandas_object(df["ints"])
121
122 def time_series_string(self, df):
123 hashing.hash_pandas_object(df["strings"])
124
125 def time_series_float(self, df):
126 hashing.hash_pandas_object(df["floats"])
127
128 def time_series_categorical(self, df):
129 hashing.hash_pandas_object(df["categories"])
130
131 def time_series_timedeltas(self, df):
132 hashing.hash_pandas_object(df["timedeltas"])
133
134 def time_series_dates(self, df):
135 hashing.hash_pandas_object(df["dates"])
136
137
138 class Quantile:
139 params = [
140 [0, 0.5, 1],
141 ["linear", "nearest", "lower", "higher", "midpoint"],
142 ["float", "int", "uint"],
143 ]
144 param_names = ["quantile", "interpolation", "dtype"]
145
146 def setup(self, quantile, interpolation, dtype):
147 N = 10 ** 5
148 data = {
149 "int": np.arange(N),
150 "uint": np.arange(N).astype(np.uint64),
151 "float": np.random.randn(N),
152 }
153 self.idx = pd.Series(data[dtype].repeat(5))
154
155 def time_quantile(self, quantile, interpolation, dtype):
156 self.idx.quantile(quantile, interpolation=interpolation)
157
158
159 class SortIntegerArray:
160 params = [10 ** 3, 10 ** 5]
161
162 def setup(self, N):
163 data = np.arange(N, dtype=float)
164 data[40] = np.nan
165 self.array = pd.array(data, dtype="Int64")
166
167 def time_argsort(self, N):
168 self.array.argsort()
169
170
171 from .pandas_vb_common import setup # noqa: F401 isort:skip
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py
--- a/asv_bench/benchmarks/algorithms.py
+++ b/asv_bench/benchmarks/algorithms.py
@@ -2,6 +2,8 @@
import numpy as np
+from pandas._libs import lib
+
import pandas as pd
from pandas.util import testing as tm
@@ -13,6 +15,19 @@
pass
+class MaybeConvertObjects:
+ def setup(self):
+ N = 10 ** 5
+
+ data = list(range(N))
+ data[0] = pd.NaT
+ data = np.array(data)
+ self.data = data
+
+ def time_maybe_convert_objects(self):
+ lib.maybe_convert_objects(self.data)
+
+
class Factorize:
params = [[True, False], ["int", "uint", "float", "string"]]
| {"golden_diff": "diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py\n--- a/asv_bench/benchmarks/algorithms.py\n+++ b/asv_bench/benchmarks/algorithms.py\n@@ -2,6 +2,8 @@\n \n import numpy as np\n \n+from pandas._libs import lib\n+\n import pandas as pd\n from pandas.util import testing as tm\n \n@@ -13,6 +15,19 @@\n pass\n \n \n+class MaybeConvertObjects:\n+ def setup(self):\n+ N = 10 ** 5\n+\n+ data = list(range(N))\n+ data[0] = pd.NaT\n+ data = np.array(data)\n+ self.data = data\n+\n+ def time_maybe_convert_objects(self):\n+ lib.maybe_convert_objects(self.data)\n+\n+\n class Factorize:\n \n params = [[True, False], [\"int\", \"uint\", \"float\", \"string\"]]\n", "issue": "ENH: maybe_convert_objects seen NaT speed-up\nI believe after `seen.object_ = 1` here:\r\nhttps://github.com/pandas-dev/pandas/blob/c64c9cb44222a42f7b02d4d6007919cd0645f1be/pandas/_libs/lib.pyx#L1956-L1958\r\nshould go `break` as well.\r\n\r\nTest:\r\n```python\r\nsize = 10**7\r\narr = list(range(size))\r\narr[0] = pd.NaT\r\narr = np.array(arr)\r\n```\r\n```python\r\n%timeit lib.maybe_convert_objects(arr, convert_datetime=0, convert_timedelta=0)\r\n```\r\nAs now output is:\r\n```python\r\n1.84 s \u00b1 14.3 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\n```\r\nWith `break`:\r\n```python\r\n57.1 \u00b5s \u00b1 887 ns per loop (mean \u00b1 std. dev. of 7 runs, 10000 loops each)\r\n```\r\nAnd both cases produce same array:\r\n```python\r\narray([NaT, 1, 2, ..., 9999997, 9999998, 9999999], dtype=object)\r\n```\n", "before_files": [{"content": "from importlib import import_module\n\nimport numpy as np\n\nimport pandas as pd\nfrom pandas.util import testing as tm\n\nfor imp in [\"pandas.util\", \"pandas.tools.hashing\"]:\n try:\n hashing = import_module(imp)\n break\n except (ImportError, TypeError, ValueError):\n pass\n\n\nclass Factorize:\n\n params = [[True, False], [\"int\", \"uint\", \"float\", \"string\"]]\n param_names = [\"sort\", \"dtype\"]\n\n def setup(self, sort, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N).repeat(5)),\n \"uint\": pd.UInt64Index(np.arange(N).repeat(5)),\n \"float\": pd.Float64Index(np.random.randn(N).repeat(5)),\n \"string\": tm.makeStringIndex(N).repeat(5),\n }\n self.idx = data[dtype]\n\n def time_factorize(self, sort, dtype):\n self.idx.factorize(sort=sort)\n\n\nclass FactorizeUnique:\n\n params = [[True, False], [\"int\", \"uint\", \"float\", \"string\"]]\n param_names = [\"sort\", \"dtype\"]\n\n def setup(self, sort, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N)),\n \"uint\": pd.UInt64Index(np.arange(N)),\n \"float\": pd.Float64Index(np.arange(N)),\n \"string\": tm.makeStringIndex(N),\n }\n self.idx = data[dtype]\n assert self.idx.is_unique\n\n def time_factorize(self, sort, dtype):\n self.idx.factorize(sort=sort)\n\n\nclass Duplicated:\n\n params = [[\"first\", \"last\", False], [\"int\", \"uint\", \"float\", \"string\"]]\n param_names = [\"keep\", \"dtype\"]\n\n def setup(self, keep, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N).repeat(5)),\n \"uint\": pd.UInt64Index(np.arange(N).repeat(5)),\n \"float\": pd.Float64Index(np.random.randn(N).repeat(5)),\n \"string\": tm.makeStringIndex(N).repeat(5),\n }\n self.idx = data[dtype]\n # cache is_unique\n self.idx.is_unique\n\n def time_duplicated(self, keep, dtype):\n self.idx.duplicated(keep=keep)\n\n\nclass DuplicatedUniqueIndex:\n\n params = [\"int\", \"uint\", \"float\", \"string\"]\n param_names = [\"dtype\"]\n\n def setup(self, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N)),\n \"uint\": pd.UInt64Index(np.arange(N)),\n \"float\": pd.Float64Index(np.random.randn(N)),\n \"string\": tm.makeStringIndex(N),\n }\n self.idx = data[dtype]\n # cache is_unique\n self.idx.is_unique\n\n def time_duplicated_unique(self, dtype):\n self.idx.duplicated()\n\n\nclass Hashing:\n def setup_cache(self):\n N = 10 ** 5\n\n df = pd.DataFrame(\n {\n \"strings\": pd.Series(\n tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=N))\n ),\n \"floats\": np.random.randn(N),\n \"ints\": np.arange(N),\n \"dates\": pd.date_range(\"20110101\", freq=\"s\", periods=N),\n \"timedeltas\": pd.timedelta_range(\"1 day\", freq=\"s\", periods=N),\n }\n )\n df[\"categories\"] = df[\"strings\"].astype(\"category\")\n df.iloc[10:20] = np.nan\n return df\n\n def time_frame(self, df):\n hashing.hash_pandas_object(df)\n\n def time_series_int(self, df):\n hashing.hash_pandas_object(df[\"ints\"])\n\n def time_series_string(self, df):\n hashing.hash_pandas_object(df[\"strings\"])\n\n def time_series_float(self, df):\n hashing.hash_pandas_object(df[\"floats\"])\n\n def time_series_categorical(self, df):\n hashing.hash_pandas_object(df[\"categories\"])\n\n def time_series_timedeltas(self, df):\n hashing.hash_pandas_object(df[\"timedeltas\"])\n\n def time_series_dates(self, df):\n hashing.hash_pandas_object(df[\"dates\"])\n\n\nclass Quantile:\n params = [\n [0, 0.5, 1],\n [\"linear\", \"nearest\", \"lower\", \"higher\", \"midpoint\"],\n [\"float\", \"int\", \"uint\"],\n ]\n param_names = [\"quantile\", \"interpolation\", \"dtype\"]\n\n def setup(self, quantile, interpolation, dtype):\n N = 10 ** 5\n data = {\n \"int\": np.arange(N),\n \"uint\": np.arange(N).astype(np.uint64),\n \"float\": np.random.randn(N),\n }\n self.idx = pd.Series(data[dtype].repeat(5))\n\n def time_quantile(self, quantile, interpolation, dtype):\n self.idx.quantile(quantile, interpolation=interpolation)\n\n\nclass SortIntegerArray:\n params = [10 ** 3, 10 ** 5]\n\n def setup(self, N):\n data = np.arange(N, dtype=float)\n data[40] = np.nan\n self.array = pd.array(data, dtype=\"Int64\")\n\n def time_argsort(self, N):\n self.array.argsort()\n\n\nfrom .pandas_vb_common import setup # noqa: F401 isort:skip\n", "path": "asv_bench/benchmarks/algorithms.py"}], "after_files": [{"content": "from importlib import import_module\n\nimport numpy as np\n\nfrom pandas._libs import lib\n\nimport pandas as pd\nfrom pandas.util import testing as tm\n\nfor imp in [\"pandas.util\", \"pandas.tools.hashing\"]:\n try:\n hashing = import_module(imp)\n break\n except (ImportError, TypeError, ValueError):\n pass\n\n\nclass MaybeConvertObjects:\n def setup(self):\n N = 10 ** 5\n\n data = list(range(N))\n data[0] = pd.NaT\n data = np.array(data)\n self.data = data\n\n def time_maybe_convert_objects(self):\n lib.maybe_convert_objects(self.data)\n\n\nclass Factorize:\n\n params = [[True, False], [\"int\", \"uint\", \"float\", \"string\"]]\n param_names = [\"sort\", \"dtype\"]\n\n def setup(self, sort, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N).repeat(5)),\n \"uint\": pd.UInt64Index(np.arange(N).repeat(5)),\n \"float\": pd.Float64Index(np.random.randn(N).repeat(5)),\n \"string\": tm.makeStringIndex(N).repeat(5),\n }\n self.idx = data[dtype]\n\n def time_factorize(self, sort, dtype):\n self.idx.factorize(sort=sort)\n\n\nclass FactorizeUnique:\n\n params = [[True, False], [\"int\", \"uint\", \"float\", \"string\"]]\n param_names = [\"sort\", \"dtype\"]\n\n def setup(self, sort, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N)),\n \"uint\": pd.UInt64Index(np.arange(N)),\n \"float\": pd.Float64Index(np.arange(N)),\n \"string\": tm.makeStringIndex(N),\n }\n self.idx = data[dtype]\n assert self.idx.is_unique\n\n def time_factorize(self, sort, dtype):\n self.idx.factorize(sort=sort)\n\n\nclass Duplicated:\n\n params = [[\"first\", \"last\", False], [\"int\", \"uint\", \"float\", \"string\"]]\n param_names = [\"keep\", \"dtype\"]\n\n def setup(self, keep, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N).repeat(5)),\n \"uint\": pd.UInt64Index(np.arange(N).repeat(5)),\n \"float\": pd.Float64Index(np.random.randn(N).repeat(5)),\n \"string\": tm.makeStringIndex(N).repeat(5),\n }\n self.idx = data[dtype]\n # cache is_unique\n self.idx.is_unique\n\n def time_duplicated(self, keep, dtype):\n self.idx.duplicated(keep=keep)\n\n\nclass DuplicatedUniqueIndex:\n\n params = [\"int\", \"uint\", \"float\", \"string\"]\n param_names = [\"dtype\"]\n\n def setup(self, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N)),\n \"uint\": pd.UInt64Index(np.arange(N)),\n \"float\": pd.Float64Index(np.random.randn(N)),\n \"string\": tm.makeStringIndex(N),\n }\n self.idx = data[dtype]\n # cache is_unique\n self.idx.is_unique\n\n def time_duplicated_unique(self, dtype):\n self.idx.duplicated()\n\n\nclass Hashing:\n def setup_cache(self):\n N = 10 ** 5\n\n df = pd.DataFrame(\n {\n \"strings\": pd.Series(\n tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=N))\n ),\n \"floats\": np.random.randn(N),\n \"ints\": np.arange(N),\n \"dates\": pd.date_range(\"20110101\", freq=\"s\", periods=N),\n \"timedeltas\": pd.timedelta_range(\"1 day\", freq=\"s\", periods=N),\n }\n )\n df[\"categories\"] = df[\"strings\"].astype(\"category\")\n df.iloc[10:20] = np.nan\n return df\n\n def time_frame(self, df):\n hashing.hash_pandas_object(df)\n\n def time_series_int(self, df):\n hashing.hash_pandas_object(df[\"ints\"])\n\n def time_series_string(self, df):\n hashing.hash_pandas_object(df[\"strings\"])\n\n def time_series_float(self, df):\n hashing.hash_pandas_object(df[\"floats\"])\n\n def time_series_categorical(self, df):\n hashing.hash_pandas_object(df[\"categories\"])\n\n def time_series_timedeltas(self, df):\n hashing.hash_pandas_object(df[\"timedeltas\"])\n\n def time_series_dates(self, df):\n hashing.hash_pandas_object(df[\"dates\"])\n\n\nclass Quantile:\n params = [\n [0, 0.5, 1],\n [\"linear\", \"nearest\", \"lower\", \"higher\", \"midpoint\"],\n [\"float\", \"int\", \"uint\"],\n ]\n param_names = [\"quantile\", \"interpolation\", \"dtype\"]\n\n def setup(self, quantile, interpolation, dtype):\n N = 10 ** 5\n data = {\n \"int\": np.arange(N),\n \"uint\": np.arange(N).astype(np.uint64),\n \"float\": np.random.randn(N),\n }\n self.idx = pd.Series(data[dtype].repeat(5))\n\n def time_quantile(self, quantile, interpolation, dtype):\n self.idx.quantile(quantile, interpolation=interpolation)\n\n\nclass SortIntegerArray:\n params = [10 ** 3, 10 ** 5]\n\n def setup(self, N):\n data = np.arange(N, dtype=float)\n data[40] = np.nan\n self.array = pd.array(data, dtype=\"Int64\")\n\n def time_argsort(self, N):\n self.array.argsort()\n\n\nfrom .pandas_vb_common import setup # noqa: F401 isort:skip\n", "path": "asv_bench/benchmarks/algorithms.py"}]} | 2,251 | 214 |
gh_patches_debug_33345 | rasdani/github-patches | git_diff | deepchecks__deepchecks-1207 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] [CV] confusion metrix mixed index bug
**Describe the bug**
Confusion metrix throws error when there are mixed indexes (mixed labels?)(needs to look into bit more)
**To Reproduce**
Run confusion metrix with the following notebook:
https://www.kaggle.com/code/itay94/using-deepchecks-to-validate-ssdlite-model?scriptVersionId=92088847
**Expected behavior**
Cast the str to int or don't support str
**Screenshots**

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/vision/checks/performance/confusion_matrix.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module containing confusion matrix report check."""
12 import typing as t
13
14 import pandas as pd
15 import numpy as np
16 import torch
17 from plotly.express import imshow
18 from queue import PriorityQueue
19 from collections import defaultdict
20
21 from deepchecks.core import CheckResult, DatasetKind
22 from deepchecks.vision import SingleDatasetCheck, Context, Batch
23 from deepchecks.vision.vision_data import TaskType
24 from deepchecks.vision.metrics_utils.iou_utils import jaccard_iou
25
26
27 __all__ = ['ConfusionMatrixReport']
28
29
30 def filter_confusion_matrix(confusion_matrix: pd.DataFrame, number_of_categories: int) -> \
31 t.Tuple[np.ndarray, int]:
32 pq = PriorityQueue()
33 for row, values in enumerate(confusion_matrix):
34 for col, value in enumerate(values):
35 if row != col:
36 pq.put((-value, (row, col)))
37 categories = set()
38 while not pq.empty():
39 if len(categories) >= number_of_categories:
40 break
41 _, (row, col) = pq.get()
42 categories.add(row)
43 categories.add(col)
44 categories = sorted(categories)
45 return confusion_matrix[np.ix_(categories, categories)], categories
46
47
48 class ConfusionMatrixReport(SingleDatasetCheck):
49 """Calculate the confusion matrix of the model on the given dataset.
50
51 For object detection, each detected bounding box calculates the IoU for each label and then is that label class is
52 used for the confusion matrix. detected bounding boxes that don't match a label has their own class and same
53 for labels without detected bounding boxes.
54
55 Parameters
56 ----------
57 categories_to_display (int, default 10):
58 Maximum number of categories to display
59 confidence_threshold (float, default 0.3):
60 Threshold to consider bounding box as detected.
61 iou_threshold (float, default 0.5):
62 Threshold to consider detected bounding box as labeled bounding box.
63 """
64
65 def __init__(self,
66 categories_to_display: int = 10,
67 confidence_threshold: float = 0.3,
68 iou_threshold: float = 0.5,
69 **kwargs):
70 super().__init__(**kwargs)
71 self.confidence_threshold = confidence_threshold
72 self.categories_to_display = categories_to_display
73 self.iou_threshold = iou_threshold
74 self.matrix = None
75 self.classes_list = None
76 self.not_found_idx = None
77 self.unseen_class_idx = None
78 self.task_type = None
79
80 def initialize_run(self, context: Context, dataset_kind: DatasetKind = None):
81 """Initialize run by creating an empty matrix the size of the data."""
82 context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)
83 dataset = context.get_data_by_kind(dataset_kind)
84 self.task_type = dataset.task_type
85 self.matrix = defaultdict(lambda: defaultdict(int))
86
87 def update(self, context: Context, batch: Batch, dataset_kind: DatasetKind = DatasetKind.TRAIN):
88 """Add batch to confusion matrix."""
89 labels = batch.labels
90 predictions = batch.predictions
91 if self.task_type == TaskType.CLASSIFICATION:
92 self.update_classification(predictions, labels)
93 elif self.task_type == TaskType.OBJECT_DETECTION:
94 self.update_object_detection(predictions, labels)
95
96 def compute(self, context: Context, dataset_kind: DatasetKind = None) -> CheckResult:
97 """Compute and plot confusion matrix after all batches were processed."""
98 assert self.matrix is not None
99
100 dataset = context.get_data_by_kind(dataset_kind)
101 matrix = pd.DataFrame(self.matrix).T
102 matrix.replace(np.nan, 0, inplace=True)
103
104 classes = sorted(
105 set(matrix.index).union(set(matrix.columns)),
106 key=lambda x: np.inf if isinstance(x, str) else x
107 )
108
109 matrix = pd.DataFrame(matrix, index=classes, columns=classes).to_numpy()
110
111 confusion_matrix, categories = filter_confusion_matrix(
112 matrix,
113 self.categories_to_display
114 )
115 confusion_matrix = np.nan_to_num(confusion_matrix)
116
117 description = [f'Showing {self.categories_to_display} of {dataset.num_classes} classes:']
118 classes_to_display = []
119 classes_map = dict(enumerate(classes)) # class index -> class label
120
121 for category in categories:
122 category = classes_map[category]
123 if category == 'no-overlapping':
124 description.append(
125 '"No overlapping" categories are labels and prediction which did not have a matching '
126 'label/prediction. For example a predictions that did not have a sufficiently overlapping '
127 'label bounding box will appear under the "No overlapping label" category'
128 )
129 classes_to_display.append('no-overlapping')
130 elif isinstance(category, int):
131 classes_to_display.append(dataset.label_id_to_name(category))
132 else:
133 raise RuntimeError(
134 'Internal Error! categories list must '
135 'contain items of type - Union[int, Literal["no-overlapping"]]'
136 )
137
138 x = []
139 y = []
140
141 for it in classes_to_display:
142 if it != 'no-overlapping':
143 x.append(it)
144 y.append(it)
145 else:
146 x.append('No overlapping prediction')
147 y.append('No overlapping label')
148
149 description.append(
150 imshow(
151 confusion_matrix,
152 x=x,
153 y=y,
154 text_auto=True)
155 .update_layout(width=600, height=600)
156 .update_xaxes(title='Predicted Value', type='category')
157 .update_yaxes(title='True value', type='category')
158 )
159 return CheckResult(
160 matrix,
161 header='Confusion Matrix',
162 display=description
163 )
164
165 def update_object_detection(self, predictions, labels):
166 """Update the confusion matrix by batch for object detection task."""
167 assert self.matrix is not None
168
169 for image_detections, image_labels in zip(predictions, labels):
170 detections_passed_threshold = [
171 detection for detection in image_detections
172 if detection[4] > self.confidence_threshold
173 ]
174
175 if len(detections_passed_threshold) == 0:
176 # detections are empty, update matrix for labels
177 for label in image_labels:
178 label_class = int(label[0].item())
179 self.matrix[label_class]['no-overlapping'] += 1
180 continue
181
182 list_of_ious = (
183 (label_index, detected_index, jaccard_iou(detected, label))
184 for label_index, label in enumerate(image_labels)
185 for detected_index, detected in enumerate(detections_passed_threshold)
186 )
187 matches = np.array([
188 [label_index, detected_index, ious]
189 for label_index, detected_index, ious in list_of_ious
190 if ious > self.iou_threshold
191 ])
192
193 # remove duplicate matches
194 if len(matches) > 0:
195 # sort by ious, in descend order
196 matches = matches[matches[:, 2].argsort()[::-1]]
197 # leave matches with unique prediction and the highest ious
198 matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
199 # sort by ious, in descend order
200 matches = matches[matches[:, 2].argsort()[::-1]]
201 # leave matches with unique label and the highest ious
202 matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
203
204 n_of_matches = len(matches)
205
206 for label_index, label in enumerate(image_labels):
207 label_class = int(label[0])
208 if n_of_matches > 0 and (matches[:, 0] == label_index).any():
209 detection_index = int(matches[matches[:, 0] == label_index, 1][0])
210 detected_class = int(image_detections[detection_index][5])
211 self.matrix[label_class][detected_class] += 1
212 else:
213 self.matrix[label_class]['no-overlapping'] += 1
214
215 for detection_index, detection in enumerate(detections_passed_threshold):
216 if n_of_matches > 0 and not (matches[:, 1] == detection_index).any():
217 detected_class = int(detection[5])
218 self.matrix['no-overlapping'][detected_class] += 1
219
220 def update_classification(self, predictions, labels):
221 """Update the confusion matrix by batch for classification task."""
222 assert self.matrix is not None
223
224 for predicted_classes, image_labels in zip(predictions, labels):
225 detected_class = max(range(len(predicted_classes)), key=predicted_classes.__getitem__)
226 label_class = image_labels.item() if isinstance(image_labels, torch.Tensor) else image_labels
227 self.matrix[label_class][detected_class] += 1
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepchecks/vision/checks/performance/confusion_matrix.py b/deepchecks/vision/checks/performance/confusion_matrix.py
--- a/deepchecks/vision/checks/performance/confusion_matrix.py
+++ b/deepchecks/vision/checks/performance/confusion_matrix.py
@@ -99,6 +99,7 @@
dataset = context.get_data_by_kind(dataset_kind)
matrix = pd.DataFrame(self.matrix).T
+ matrix = matrix.rename(index={-1: 'no-overlapping'}, columns={-1: 'no-overlapping'})
matrix.replace(np.nan, 0, inplace=True)
classes = sorted(
@@ -176,7 +177,7 @@
# detections are empty, update matrix for labels
for label in image_labels:
label_class = int(label[0].item())
- self.matrix[label_class]['no-overlapping'] += 1
+ self.matrix[label_class][-1] += 1
continue
list_of_ious = (
@@ -210,12 +211,12 @@
detected_class = int(image_detections[detection_index][5])
self.matrix[label_class][detected_class] += 1
else:
- self.matrix[label_class]['no-overlapping'] += 1
+ self.matrix[label_class][-1] += 1
for detection_index, detection in enumerate(detections_passed_threshold):
if n_of_matches > 0 and not (matches[:, 1] == detection_index).any():
detected_class = int(detection[5])
- self.matrix['no-overlapping'][detected_class] += 1
+ self.matrix[-1][detected_class] += 1
def update_classification(self, predictions, labels):
"""Update the confusion matrix by batch for classification task."""
| {"golden_diff": "diff --git a/deepchecks/vision/checks/performance/confusion_matrix.py b/deepchecks/vision/checks/performance/confusion_matrix.py\n--- a/deepchecks/vision/checks/performance/confusion_matrix.py\n+++ b/deepchecks/vision/checks/performance/confusion_matrix.py\n@@ -99,6 +99,7 @@\n \n dataset = context.get_data_by_kind(dataset_kind)\n matrix = pd.DataFrame(self.matrix).T\n+ matrix = matrix.rename(index={-1: 'no-overlapping'}, columns={-1: 'no-overlapping'})\n matrix.replace(np.nan, 0, inplace=True)\n \n classes = sorted(\n@@ -176,7 +177,7 @@\n # detections are empty, update matrix for labels\n for label in image_labels:\n label_class = int(label[0].item())\n- self.matrix[label_class]['no-overlapping'] += 1\n+ self.matrix[label_class][-1] += 1\n continue\n \n list_of_ious = (\n@@ -210,12 +211,12 @@\n detected_class = int(image_detections[detection_index][5])\n self.matrix[label_class][detected_class] += 1\n else:\n- self.matrix[label_class]['no-overlapping'] += 1\n+ self.matrix[label_class][-1] += 1\n \n for detection_index, detection in enumerate(detections_passed_threshold):\n if n_of_matches > 0 and not (matches[:, 1] == detection_index).any():\n detected_class = int(detection[5])\n- self.matrix['no-overlapping'][detected_class] += 1\n+ self.matrix[-1][detected_class] += 1\n \n def update_classification(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for classification task.\"\"\"\n", "issue": "[BUG] [CV] confusion metrix mixed index bug\n**Describe the bug**\r\nConfusion metrix throws error when there are mixed indexes (mixed labels?)(needs to look into bit more)\r\n\r\n**To Reproduce**\r\nRun confusion metrix with the following notebook:\r\nhttps://www.kaggle.com/code/itay94/using-deepchecks-to-validate-ssdlite-model?scriptVersionId=92088847\r\n\r\n**Expected behavior**\r\nCast the str to int or don't support str\r\n\r\n**Screenshots**\r\n\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing confusion matrix report check.\"\"\"\nimport typing as t\n\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom plotly.express import imshow\nfrom queue import PriorityQueue\nfrom collections import defaultdict\n\nfrom deepchecks.core import CheckResult, DatasetKind\nfrom deepchecks.vision import SingleDatasetCheck, Context, Batch\nfrom deepchecks.vision.vision_data import TaskType\nfrom deepchecks.vision.metrics_utils.iou_utils import jaccard_iou\n\n\n__all__ = ['ConfusionMatrixReport']\n\n\ndef filter_confusion_matrix(confusion_matrix: pd.DataFrame, number_of_categories: int) -> \\\n t.Tuple[np.ndarray, int]:\n pq = PriorityQueue()\n for row, values in enumerate(confusion_matrix):\n for col, value in enumerate(values):\n if row != col:\n pq.put((-value, (row, col)))\n categories = set()\n while not pq.empty():\n if len(categories) >= number_of_categories:\n break\n _, (row, col) = pq.get()\n categories.add(row)\n categories.add(col)\n categories = sorted(categories)\n return confusion_matrix[np.ix_(categories, categories)], categories\n\n\nclass ConfusionMatrixReport(SingleDatasetCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\n\n For object detection, each detected bounding box calculates the IoU for each label and then is that label class is\n used for the confusion matrix. detected bounding boxes that don't match a label has their own class and same\n for labels without detected bounding boxes.\n\n Parameters\n ----------\n categories_to_display (int, default 10):\n Maximum number of categories to display\n confidence_threshold (float, default 0.3):\n Threshold to consider bounding box as detected.\n iou_threshold (float, default 0.5):\n Threshold to consider detected bounding box as labeled bounding box.\n \"\"\"\n\n def __init__(self,\n categories_to_display: int = 10,\n confidence_threshold: float = 0.3,\n iou_threshold: float = 0.5,\n **kwargs):\n super().__init__(**kwargs)\n self.confidence_threshold = confidence_threshold\n self.categories_to_display = categories_to_display\n self.iou_threshold = iou_threshold\n self.matrix = None\n self.classes_list = None\n self.not_found_idx = None\n self.unseen_class_idx = None\n self.task_type = None\n\n def initialize_run(self, context: Context, dataset_kind: DatasetKind = None):\n \"\"\"Initialize run by creating an empty matrix the size of the data.\"\"\"\n context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)\n dataset = context.get_data_by_kind(dataset_kind)\n self.task_type = dataset.task_type\n self.matrix = defaultdict(lambda: defaultdict(int))\n\n def update(self, context: Context, batch: Batch, dataset_kind: DatasetKind = DatasetKind.TRAIN):\n \"\"\"Add batch to confusion matrix.\"\"\"\n labels = batch.labels\n predictions = batch.predictions\n if self.task_type == TaskType.CLASSIFICATION:\n self.update_classification(predictions, labels)\n elif self.task_type == TaskType.OBJECT_DETECTION:\n self.update_object_detection(predictions, labels)\n\n def compute(self, context: Context, dataset_kind: DatasetKind = None) -> CheckResult:\n \"\"\"Compute and plot confusion matrix after all batches were processed.\"\"\"\n assert self.matrix is not None\n\n dataset = context.get_data_by_kind(dataset_kind)\n matrix = pd.DataFrame(self.matrix).T\n matrix.replace(np.nan, 0, inplace=True)\n\n classes = sorted(\n set(matrix.index).union(set(matrix.columns)),\n key=lambda x: np.inf if isinstance(x, str) else x\n )\n\n matrix = pd.DataFrame(matrix, index=classes, columns=classes).to_numpy()\n\n confusion_matrix, categories = filter_confusion_matrix(\n matrix,\n self.categories_to_display\n )\n confusion_matrix = np.nan_to_num(confusion_matrix)\n\n description = [f'Showing {self.categories_to_display} of {dataset.num_classes} classes:']\n classes_to_display = []\n classes_map = dict(enumerate(classes)) # class index -> class label\n\n for category in categories:\n category = classes_map[category]\n if category == 'no-overlapping':\n description.append(\n '\"No overlapping\" categories are labels and prediction which did not have a matching '\n 'label/prediction. For example a predictions that did not have a sufficiently overlapping '\n 'label bounding box will appear under the \"No overlapping label\" category'\n )\n classes_to_display.append('no-overlapping')\n elif isinstance(category, int):\n classes_to_display.append(dataset.label_id_to_name(category))\n else:\n raise RuntimeError(\n 'Internal Error! categories list must '\n 'contain items of type - Union[int, Literal[\"no-overlapping\"]]'\n )\n\n x = []\n y = []\n\n for it in classes_to_display:\n if it != 'no-overlapping':\n x.append(it)\n y.append(it)\n else:\n x.append('No overlapping prediction')\n y.append('No overlapping label')\n\n description.append(\n imshow(\n confusion_matrix,\n x=x,\n y=y,\n text_auto=True)\n .update_layout(width=600, height=600)\n .update_xaxes(title='Predicted Value', type='category')\n .update_yaxes(title='True value', type='category')\n )\n return CheckResult(\n matrix,\n header='Confusion Matrix',\n display=description\n )\n\n def update_object_detection(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for object detection task.\"\"\"\n assert self.matrix is not None\n\n for image_detections, image_labels in zip(predictions, labels):\n detections_passed_threshold = [\n detection for detection in image_detections\n if detection[4] > self.confidence_threshold\n ]\n\n if len(detections_passed_threshold) == 0:\n # detections are empty, update matrix for labels\n for label in image_labels:\n label_class = int(label[0].item())\n self.matrix[label_class]['no-overlapping'] += 1\n continue\n\n list_of_ious = (\n (label_index, detected_index, jaccard_iou(detected, label))\n for label_index, label in enumerate(image_labels)\n for detected_index, detected in enumerate(detections_passed_threshold)\n )\n matches = np.array([\n [label_index, detected_index, ious]\n for label_index, detected_index, ious in list_of_ious\n if ious > self.iou_threshold\n ])\n\n # remove duplicate matches\n if len(matches) > 0:\n # sort by ious, in descend order\n matches = matches[matches[:, 2].argsort()[::-1]]\n # leave matches with unique prediction and the highest ious\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n # sort by ious, in descend order\n matches = matches[matches[:, 2].argsort()[::-1]]\n # leave matches with unique label and the highest ious\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n\n n_of_matches = len(matches)\n\n for label_index, label in enumerate(image_labels):\n label_class = int(label[0])\n if n_of_matches > 0 and (matches[:, 0] == label_index).any():\n detection_index = int(matches[matches[:, 0] == label_index, 1][0])\n detected_class = int(image_detections[detection_index][5])\n self.matrix[label_class][detected_class] += 1\n else:\n self.matrix[label_class]['no-overlapping'] += 1\n\n for detection_index, detection in enumerate(detections_passed_threshold):\n if n_of_matches > 0 and not (matches[:, 1] == detection_index).any():\n detected_class = int(detection[5])\n self.matrix['no-overlapping'][detected_class] += 1\n\n def update_classification(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for classification task.\"\"\"\n assert self.matrix is not None\n\n for predicted_classes, image_labels in zip(predictions, labels):\n detected_class = max(range(len(predicted_classes)), key=predicted_classes.__getitem__)\n label_class = image_labels.item() if isinstance(image_labels, torch.Tensor) else image_labels\n self.matrix[label_class][detected_class] += 1\n", "path": "deepchecks/vision/checks/performance/confusion_matrix.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing confusion matrix report check.\"\"\"\nimport typing as t\n\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom plotly.express import imshow\nfrom queue import PriorityQueue\nfrom collections import defaultdict\n\nfrom deepchecks.core import CheckResult, DatasetKind\nfrom deepchecks.vision import SingleDatasetCheck, Context, Batch\nfrom deepchecks.vision.vision_data import TaskType\nfrom deepchecks.vision.metrics_utils.iou_utils import jaccard_iou\n\n\n__all__ = ['ConfusionMatrixReport']\n\n\ndef filter_confusion_matrix(confusion_matrix: pd.DataFrame, number_of_categories: int) -> \\\n t.Tuple[np.ndarray, int]:\n pq = PriorityQueue()\n for row, values in enumerate(confusion_matrix):\n for col, value in enumerate(values):\n if row != col:\n pq.put((-value, (row, col)))\n categories = set()\n while not pq.empty():\n if len(categories) >= number_of_categories:\n break\n _, (row, col) = pq.get()\n categories.add(row)\n categories.add(col)\n categories = sorted(categories)\n return confusion_matrix[np.ix_(categories, categories)], categories\n\n\nclass ConfusionMatrixReport(SingleDatasetCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\n\n For object detection, each detected bounding box calculates the IoU for each label and then is that label class is\n used for the confusion matrix. detected bounding boxes that don't match a label has their own class and same\n for labels without detected bounding boxes.\n\n Parameters\n ----------\n categories_to_display (int, default 10):\n Maximum number of categories to display\n confidence_threshold (float, default 0.3):\n Threshold to consider bounding box as detected.\n iou_threshold (float, default 0.5):\n Threshold to consider detected bounding box as labeled bounding box.\n \"\"\"\n\n def __init__(self,\n categories_to_display: int = 10,\n confidence_threshold: float = 0.3,\n iou_threshold: float = 0.5,\n **kwargs):\n super().__init__(**kwargs)\n self.confidence_threshold = confidence_threshold\n self.categories_to_display = categories_to_display\n self.iou_threshold = iou_threshold\n self.matrix = None\n self.classes_list = None\n self.not_found_idx = None\n self.unseen_class_idx = None\n self.task_type = None\n\n def initialize_run(self, context: Context, dataset_kind: DatasetKind = None):\n \"\"\"Initialize run by creating an empty matrix the size of the data.\"\"\"\n context.assert_task_type(TaskType.CLASSIFICATION, TaskType.OBJECT_DETECTION)\n dataset = context.get_data_by_kind(dataset_kind)\n self.task_type = dataset.task_type\n self.matrix = defaultdict(lambda: defaultdict(int))\n\n def update(self, context: Context, batch: Batch, dataset_kind: DatasetKind = DatasetKind.TRAIN):\n \"\"\"Add batch to confusion matrix.\"\"\"\n labels = batch.labels\n predictions = batch.predictions\n if self.task_type == TaskType.CLASSIFICATION:\n self.update_classification(predictions, labels)\n elif self.task_type == TaskType.OBJECT_DETECTION:\n self.update_object_detection(predictions, labels)\n\n def compute(self, context: Context, dataset_kind: DatasetKind = None) -> CheckResult:\n \"\"\"Compute and plot confusion matrix after all batches were processed.\"\"\"\n assert self.matrix is not None\n\n dataset = context.get_data_by_kind(dataset_kind)\n matrix = pd.DataFrame(self.matrix).T\n matrix = matrix.rename(index={-1: 'no-overlapping'}, columns={-1: 'no-overlapping'})\n matrix.replace(np.nan, 0, inplace=True)\n\n classes = sorted(\n set(matrix.index).union(set(matrix.columns)),\n key=lambda x: np.inf if isinstance(x, str) else x\n )\n\n matrix = pd.DataFrame(matrix, index=classes, columns=classes).to_numpy()\n\n confusion_matrix, categories = filter_confusion_matrix(\n matrix,\n self.categories_to_display\n )\n confusion_matrix = np.nan_to_num(confusion_matrix)\n\n description = [f'Showing {self.categories_to_display} of {dataset.num_classes} classes:']\n classes_to_display = []\n classes_map = dict(enumerate(classes)) # class index -> class label\n\n for category in categories:\n category = classes_map[category]\n if category == 'no-overlapping':\n description.append(\n '\"No overlapping\" categories are labels and prediction which did not have a matching '\n 'label/prediction. For example a predictions that did not have a sufficiently overlapping '\n 'label bounding box will appear under the \"No overlapping label\" category'\n )\n classes_to_display.append('no-overlapping')\n elif isinstance(category, int):\n classes_to_display.append(dataset.label_id_to_name(category))\n else:\n raise RuntimeError(\n 'Internal Error! categories list must '\n 'contain items of type - Union[int, Literal[\"no-overlapping\"]]'\n )\n\n x = []\n y = []\n\n for it in classes_to_display:\n if it != 'no-overlapping':\n x.append(it)\n y.append(it)\n else:\n x.append('No overlapping prediction')\n y.append('No overlapping label')\n\n description.append(\n imshow(\n confusion_matrix,\n x=x,\n y=y,\n text_auto=True)\n .update_layout(width=600, height=600)\n .update_xaxes(title='Predicted Value', type='category')\n .update_yaxes(title='True value', type='category')\n )\n return CheckResult(\n matrix,\n header='Confusion Matrix',\n display=description\n )\n\n def update_object_detection(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for object detection task.\"\"\"\n assert self.matrix is not None\n\n for image_detections, image_labels in zip(predictions, labels):\n detections_passed_threshold = [\n detection for detection in image_detections\n if detection[4] > self.confidence_threshold\n ]\n\n if len(detections_passed_threshold) == 0:\n # detections are empty, update matrix for labels\n for label in image_labels:\n label_class = int(label[0].item())\n self.matrix[label_class][-1] += 1\n continue\n\n list_of_ious = (\n (label_index, detected_index, jaccard_iou(detected, label))\n for label_index, label in enumerate(image_labels)\n for detected_index, detected in enumerate(detections_passed_threshold)\n )\n matches = np.array([\n [label_index, detected_index, ious]\n for label_index, detected_index, ious in list_of_ious\n if ious > self.iou_threshold\n ])\n\n # remove duplicate matches\n if len(matches) > 0:\n # sort by ious, in descend order\n matches = matches[matches[:, 2].argsort()[::-1]]\n # leave matches with unique prediction and the highest ious\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n # sort by ious, in descend order\n matches = matches[matches[:, 2].argsort()[::-1]]\n # leave matches with unique label and the highest ious\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n\n n_of_matches = len(matches)\n\n for label_index, label in enumerate(image_labels):\n label_class = int(label[0])\n if n_of_matches > 0 and (matches[:, 0] == label_index).any():\n detection_index = int(matches[matches[:, 0] == label_index, 1][0])\n detected_class = int(image_detections[detection_index][5])\n self.matrix[label_class][detected_class] += 1\n else:\n self.matrix[label_class][-1] += 1\n\n for detection_index, detection in enumerate(detections_passed_threshold):\n if n_of_matches > 0 and not (matches[:, 1] == detection_index).any():\n detected_class = int(detection[5])\n self.matrix[-1][detected_class] += 1\n\n def update_classification(self, predictions, labels):\n \"\"\"Update the confusion matrix by batch for classification task.\"\"\"\n assert self.matrix is not None\n\n for predicted_classes, image_labels in zip(predictions, labels):\n detected_class = max(range(len(predicted_classes)), key=predicted_classes.__getitem__)\n label_class = image_labels.item() if isinstance(image_labels, torch.Tensor) else image_labels\n self.matrix[label_class][detected_class] += 1\n", "path": "deepchecks/vision/checks/performance/confusion_matrix.py"}]} | 2,970 | 397 |
gh_patches_debug_47843 | rasdani/github-patches | git_diff | holoviz__panel-4047 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make it easy and useful to use the VideoStream
I've never seen any showcases of the `VideoStream` in Panel. As Panel can update relatively quickly via Tornado and websockets it should be the best data app framework for Video. And with `panel convert` even better.
But there are lot of issues
- There are no inspirational examples out there
- When you try out you run into several issues
- The VideoStream transfers a high resolution image
- The VideoStream transfers a .png base64 data url. It is slow to work with in Pillow and other frameworks. JPEG is much faster to load and transform
- You need to be able to convert from base64 to PIL.Image for using Pillow and np.ndarray for using scikit-learn etc. This might be easy for experienced data vision people. But for beginners this is so hard.
To make it easier for Panel users to get started and create something useful. I propose we add an example to the Gallery that people can use for inspiration. We can use the example to identify and solve any performance issues.
The example should end up being easy to understand and use + super performant.
## Additional Context
[Yuichiro](https://twitter.com/whitphx) has done some really amazing work to make this possible in Streamlit. We should be able to find inspiration and make something at least as awesome. See https://edit.share.stlite.net/?sampleAppId=realtime_image_processing
[](https://twitter.com/whitphx/status/1583708572063772675?s=20&t=lZLbNEIl7LQpVWu1RyNEiQ)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import json
3 import os
4 import shutil
5 import sys
6
7 import pyct.build
8
9 from setuptools import find_packages, setup
10 from setuptools.command.develop import develop
11 from setuptools.command.install import install
12 from setuptools.command.sdist import sdist
13
14 PANEL_LITE_BUILD = 'PANEL_LITE' in os.environ
15
16
17 def get_setup_version(reponame):
18 """
19 Helper to get the current version from either git describe or the
20 .version file (if available).
21 """
22 basepath = os.path.split(__file__)[0]
23 version_file_path = os.path.join(basepath, reponame, '.version')
24 try:
25 from param import version
26 except Exception:
27 version = None
28 if version is not None:
29 return version.Version.setup_version(basepath, reponame, archive_commit="$Format:%h$")
30 else:
31 print("WARNING: param>=1.6.0 unavailable. If you are installing a package, "
32 "this warning can safely be ignored. If you are creating a package or "
33 "otherwise operating in a git repository, you should install param>=1.6.0.")
34 return json.load(open(version_file_path, 'r'))['version_string']
35
36
37 def _build_paneljs():
38 from bokeh.ext import build
39
40 from panel.compiler import bundle_resources
41 print("Building custom models:")
42 panel_dir = os.path.join(os.path.dirname(__file__), "panel")
43 build(panel_dir)
44 print("Bundling custom model resources:")
45 bundle_resources()
46 if sys.platform != "win32":
47 # npm can cause non-blocking stdout; so reset it just in case
48 import fcntl
49 flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL)
50 fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK)
51
52
53 class CustomDevelopCommand(develop):
54 """Custom installation for development mode."""
55
56 def run(self):
57 if not PANEL_LITE_BUILD:
58 _build_paneljs()
59 develop.run(self)
60
61
62 class CustomInstallCommand(install):
63 """Custom installation for install mode."""
64
65 def run(self):
66 if not PANEL_LITE_BUILD:
67 _build_paneljs()
68 install.run(self)
69
70
71 class CustomSdistCommand(sdist):
72 """Custom installation for sdist mode."""
73
74 def run(self):
75 if not PANEL_LITE_BUILD:
76 _build_paneljs()
77 sdist.run(self)
78
79
80 _COMMANDS = {
81 'develop': CustomDevelopCommand,
82 'install': CustomInstallCommand,
83 'sdist': CustomSdistCommand,
84 }
85
86 try:
87 from wheel.bdist_wheel import bdist_wheel
88
89 class CustomBdistWheelCommand(bdist_wheel):
90 """Custom bdist_wheel command to force cancelling qiskit-terra wheel
91 creation."""
92
93 def run(self):
94 """Do nothing so the command intentionally fails."""
95 if not PANEL_LITE_BUILD:
96 _build_paneljs()
97 bdist_wheel.run(self)
98
99 _COMMANDS['bdist_wheel'] = CustomBdistWheelCommand
100 except Exception:
101 pass
102
103 ########## dependencies ##########
104
105 install_requires = [
106 'bokeh >=2.4.0,<2.5.0',
107 'param >=1.12.0',
108 'pyviz_comms >=0.7.4',
109 'markdown',
110 'requests',
111 'tqdm >=4.48.0',
112 'pyct >=0.4.4',
113 'bleach',
114 'setuptools >=42',
115 'typing_extensions',
116 ]
117
118 _recommended = [
119 'jupyterlab',
120 'holoviews >1.14.1',
121 'matplotlib',
122 'pillow',
123 'plotly'
124 ]
125
126 _tests = [
127 # Test dependencies
128 'flake8',
129 'parameterized',
130 'pytest',
131 'nbval',
132 'flaky',
133 'pytest-xdist',
134 'pytest-cov',
135 'pre-commit',
136 'psutil',
137 # Libraries tested in unit tests
138 'folium',
139 'ipympl',
140 'scipy',
141 'twine',
142 'pandas >=1.3',
143 'ipython >=7.0',
144 'holoviews',
145 'diskcache',
146 'markdown-it-py',
147 'ipyvuetify',
148 'reacton',
149 # Added lxml temporarily as installing pyechars or idom on Python 3.11
150 # via pip tries to build it and fails. To be removed.
151 'lxml',
152 'numpy <1.24', # Avoid VTK test fail
153 ]
154
155 _ui = [
156 'playwright',
157 'pytest-playwright'
158 ]
159
160 extras_require = {
161 'examples': [
162 'hvplot',
163 'plotly >=4.0',
164 'altair',
165 'streamz',
166 'vega_datasets',
167 'vtk ==9.0.1',
168 'scikit-learn',
169 'datashader',
170 'jupyter_bokeh >=3.0.2',
171 'django <4',
172 'channels',
173 'pyvista<0.33',
174 'ipywidgets',
175 'ipywidgets_bokeh',
176 'ipyvolume',
177 'ipyleaflet',
178 'ipympl',
179 'folium',
180 'xarray',
181 'pyinstrument >=4.0',
182 'aiohttp',
183 'croniter',
184 'graphviz',
185 'networkx >=2.5',
186 'pygraphviz',
187 'seaborn',
188 'pydeck',
189 'graphviz',
190 'lxml',
191 'python-graphviz',
192 'xgboost',
193 'ipyvuetify',
194 'reacton'
195 ],
196 'tests': _tests,
197 'recommended': _recommended,
198 'doc': _recommended + [
199 'nbsite >=0.7.2rc2',
200 'pydata-sphinx-theme <=0.9.0',
201 'sphinx-copybutton',
202 'sphinx-design',
203 ],
204 'ui': _ui
205 }
206
207 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
208
209 # Superset of what's in pyproject.toml (includes non-python
210 # dependencies). Also, pyproject.toml isn't supported by all tools
211 # anyway (e.g. older versions of pip, or conda - which also supports
212 # non-python dependencies). Note that setup_requires isn't used
213 # because it doesn't work well with pip.
214 extras_require['build'] = [
215 'param >=1.9.2',
216 'pyct >=0.4.4',
217 'setuptools >=42',
218 'bokeh >=2.4.3,<2.5.0',
219 'pyviz_comms >=0.7.4',
220 'requests',
221 'packaging',
222 'bleach',
223 'tqdm >=4.48.0',
224 ]
225
226 setup_args = dict(
227 name='panel',
228 version=get_setup_version("panel"),
229 description='A high level app and dashboarding solution for Python.',
230 long_description=open('README.md').read() if os.path.isfile('README.md') else 'Consult README.md',
231 long_description_content_type="text/markdown",
232 author="HoloViz",
233 author_email="[email protected]",
234 maintainer="HoloViz",
235 maintainer_email="[email protected]",
236 platforms=['Windows', 'Mac OS X', 'Linux'],
237 license='BSD',
238 url='http://panel.holoviz.org',
239 project_urls={
240 'Source': 'https://github.com/holoviz/panel',
241 },
242 cmdclass=_COMMANDS,
243 packages=find_packages(),
244 include_package_data=True,
245 data_files=[
246 # like `jupyter serverextension enable --sys-prefix`
247 (
248 "etc/jupyter/jupyter_notebook_config.d",
249 ["jupyter-config/jupyter_notebook_config.d/panel-client-jupyter.json"],
250 ),
251 # like `jupyter server extension enable --sys-prefix`
252 (
253 "etc/jupyter/jupyter_server_config.d",
254 ["jupyter-config/jupyter_server_config.d/panel-client-jupyter.json"],
255 ),
256 ],
257 classifiers=[
258 "License :: OSI Approved :: BSD License",
259 "Development Status :: 5 - Production/Stable",
260 "Programming Language :: Python :: 3",
261 "Programming Language :: Python :: 3.7",
262 "Programming Language :: Python :: 3.8",
263 "Programming Language :: Python :: 3.9",
264 "Programming Language :: Python :: 3.10",
265 "Programming Language :: Python :: 3.11",
266 "Operating System :: OS Independent",
267 "Intended Audience :: Developers",
268 "Intended Audience :: Science/Research",
269 "Intended Audience :: Financial and Insurance Industry",
270 "Intended Audience :: Healthcare Industry",
271 "Intended Audience :: Information Technology",
272 "Intended Audience :: Legal Industry",
273 "Intended Audience :: Other Audience",
274 "Intended Audience :: Science/Research",
275 "Natural Language :: English",
276 "Topic :: Scientific/Engineering",
277 "Topic :: Scientific/Engineering :: Visualization",
278 "Topic :: Scientific/Engineering :: Information Analysis",
279 "Topic :: Office/Business",
280 "Topic :: Office/Business :: Financial",
281 "Topic :: Software Development :: Libraries"],
282 python_requires=">=3.7",
283 entry_points={
284 'console_scripts': [
285 'panel = panel.command:main'
286 ]
287 },
288 install_requires=install_requires,
289 extras_require=extras_require,
290 tests_require=extras_require['tests']
291 )
292
293 def clean_js_version(version):
294 version = version.replace('-', '')
295 for dev in ('a', 'b', 'rc'):
296 version = version.replace(dev+'.', dev)
297 return version
298
299 if __name__ == "__main__":
300 example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
301 'panel', 'examples')
302
303 if 'develop' not in sys.argv and 'egg_info' not in sys.argv:
304 pyct.build.examples(example_path, __file__, force=True)
305
306 version = setup_args['version']
307 if 'post' not in version:
308 with open('./panel/package.json') as f:
309 package_json = json.load(f)
310 js_version = package_json['version']
311 version = version.split('+')[0]
312 if any(dev in version for dev in ('a', 'b', 'rc')) and not '-' in js_version:
313 raise ValueError(f"panel.js dev versions ({js_version}) must "
314 "must separate dev suffix with a dash, e.g. "
315 "v1.0.0rc1 should be v1.0.0-rc.1.")
316 if version != 'None' and version != clean_js_version(js_version):
317 raise ValueError(f"panel.js version ({js_version}) does not match "
318 f"panel version ({version}). Cannot build release.")
319
320 setup(**setup_args)
321
322 if os.path.isdir(example_path):
323 shutil.rmtree(example_path)
324
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -191,7 +191,8 @@
'python-graphviz',
'xgboost',
'ipyvuetify',
- 'reacton'
+ 'reacton',
+ 'scikit-image',
],
'tests': _tests,
'recommended': _recommended,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -191,7 +191,8 @@\n 'python-graphviz',\n 'xgboost',\n 'ipyvuetify',\n- 'reacton'\n+ 'reacton',\n+ 'scikit-image',\n ],\n 'tests': _tests,\n 'recommended': _recommended,\n", "issue": "Make it easy and useful to use the VideoStream\nI've never seen any showcases of the `VideoStream` in Panel. As Panel can update relatively quickly via Tornado and websockets it should be the best data app framework for Video. And with `panel convert` even better.\r\n\r\nBut there are lot of issues\r\n\r\n- There are no inspirational examples out there\r\n- When you try out you run into several issues\r\n - The VideoStream transfers a high resolution image\r\n - The VideoStream transfers a .png base64 data url. It is slow to work with in Pillow and other frameworks. JPEG is much faster to load and transform\r\n - You need to be able to convert from base64 to PIL.Image for using Pillow and np.ndarray for using scikit-learn etc. This might be easy for experienced data vision people. But for beginners this is so hard.\r\n\r\nTo make it easier for Panel users to get started and create something useful. I propose we add an example to the Gallery that people can use for inspiration. We can use the example to identify and solve any performance issues.\r\n\r\nThe example should end up being easy to understand and use + super performant.\r\n\r\n## Additional Context\r\n\r\n[Yuichiro](https://twitter.com/whitphx) has done some really amazing work to make this possible in Streamlit. We should be able to find inspiration and make something at least as awesome. See https://edit.share.stlite.net/?sampleAppId=realtime_image_processing\r\n\r\n[](https://twitter.com/whitphx/status/1583708572063772675?s=20&t=lZLbNEIl7LQpVWu1RyNEiQ)\n", "before_files": [{"content": "#!/usr/bin/env python\nimport json\nimport os\nimport shutil\nimport sys\n\nimport pyct.build\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\nfrom setuptools.command.sdist import sdist\n\nPANEL_LITE_BUILD = 'PANEL_LITE' in os.environ\n\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, '.version')\n try:\n from param import version\n except Exception:\n version = None\n if version is not None:\n return version.Version.setup_version(basepath, reponame, archive_commit=\"$Format:%h$\")\n else:\n print(\"WARNING: param>=1.6.0 unavailable. If you are installing a package, \"\n \"this warning can safely be ignored. If you are creating a package or \"\n \"otherwise operating in a git repository, you should install param>=1.6.0.\")\n return json.load(open(version_file_path, 'r'))['version_string']\n\n\ndef _build_paneljs():\n from bokeh.ext import build\n\n from panel.compiler import bundle_resources\n print(\"Building custom models:\")\n panel_dir = os.path.join(os.path.dirname(__file__), \"panel\")\n build(panel_dir)\n print(\"Bundling custom model resources:\")\n bundle_resources()\n if sys.platform != \"win32\":\n # npm can cause non-blocking stdout; so reset it just in case\n import fcntl\n flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL)\n fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK)\n\n\nclass CustomDevelopCommand(develop):\n \"\"\"Custom installation for development mode.\"\"\"\n\n def run(self):\n if not PANEL_LITE_BUILD:\n _build_paneljs()\n develop.run(self)\n\n\nclass CustomInstallCommand(install):\n \"\"\"Custom installation for install mode.\"\"\"\n\n def run(self):\n if not PANEL_LITE_BUILD:\n _build_paneljs()\n install.run(self)\n\n\nclass CustomSdistCommand(sdist):\n \"\"\"Custom installation for sdist mode.\"\"\"\n\n def run(self):\n if not PANEL_LITE_BUILD:\n _build_paneljs()\n sdist.run(self)\n\n\n_COMMANDS = {\n 'develop': CustomDevelopCommand,\n 'install': CustomInstallCommand,\n 'sdist': CustomSdistCommand,\n}\n\ntry:\n from wheel.bdist_wheel import bdist_wheel\n\n class CustomBdistWheelCommand(bdist_wheel):\n \"\"\"Custom bdist_wheel command to force cancelling qiskit-terra wheel\n creation.\"\"\"\n\n def run(self):\n \"\"\"Do nothing so the command intentionally fails.\"\"\"\n if not PANEL_LITE_BUILD:\n _build_paneljs()\n bdist_wheel.run(self)\n\n _COMMANDS['bdist_wheel'] = CustomBdistWheelCommand\nexcept Exception:\n pass\n\n########## dependencies ##########\n\ninstall_requires = [\n 'bokeh >=2.4.0,<2.5.0',\n 'param >=1.12.0',\n 'pyviz_comms >=0.7.4',\n 'markdown',\n 'requests',\n 'tqdm >=4.48.0',\n 'pyct >=0.4.4',\n 'bleach',\n 'setuptools >=42',\n 'typing_extensions',\n]\n\n_recommended = [\n 'jupyterlab',\n 'holoviews >1.14.1',\n 'matplotlib',\n 'pillow',\n 'plotly'\n]\n\n_tests = [\n # Test dependencies\n 'flake8',\n 'parameterized',\n 'pytest',\n 'nbval',\n 'flaky',\n 'pytest-xdist',\n 'pytest-cov',\n 'pre-commit',\n 'psutil',\n # Libraries tested in unit tests\n 'folium',\n 'ipympl',\n 'scipy',\n 'twine',\n 'pandas >=1.3',\n 'ipython >=7.0',\n 'holoviews',\n 'diskcache',\n 'markdown-it-py',\n 'ipyvuetify',\n 'reacton',\n # Added lxml temporarily as installing pyechars or idom on Python 3.11\n # via pip tries to build it and fails. To be removed.\n 'lxml',\n 'numpy <1.24', # Avoid VTK test fail\n]\n\n_ui = [\n 'playwright',\n 'pytest-playwright'\n]\n\nextras_require = {\n 'examples': [\n 'hvplot',\n 'plotly >=4.0',\n 'altair',\n 'streamz',\n 'vega_datasets',\n 'vtk ==9.0.1',\n 'scikit-learn',\n 'datashader',\n 'jupyter_bokeh >=3.0.2',\n 'django <4',\n 'channels',\n 'pyvista<0.33',\n 'ipywidgets',\n 'ipywidgets_bokeh',\n 'ipyvolume',\n 'ipyleaflet',\n 'ipympl',\n 'folium',\n 'xarray',\n 'pyinstrument >=4.0',\n 'aiohttp',\n 'croniter',\n 'graphviz',\n 'networkx >=2.5',\n 'pygraphviz',\n 'seaborn',\n 'pydeck',\n 'graphviz',\n 'lxml',\n 'python-graphviz',\n 'xgboost',\n 'ipyvuetify',\n 'reacton'\n ],\n 'tests': _tests,\n 'recommended': _recommended,\n 'doc': _recommended + [\n 'nbsite >=0.7.2rc2',\n 'pydata-sphinx-theme <=0.9.0',\n 'sphinx-copybutton',\n 'sphinx-design',\n ],\n 'ui': _ui\n}\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\n# Superset of what's in pyproject.toml (includes non-python\n# dependencies). Also, pyproject.toml isn't supported by all tools\n# anyway (e.g. older versions of pip, or conda - which also supports\n# non-python dependencies). Note that setup_requires isn't used\n# because it doesn't work well with pip.\nextras_require['build'] = [\n 'param >=1.9.2',\n 'pyct >=0.4.4',\n 'setuptools >=42',\n 'bokeh >=2.4.3,<2.5.0',\n 'pyviz_comms >=0.7.4',\n 'requests',\n 'packaging',\n 'bleach',\n 'tqdm >=4.48.0',\n]\n\nsetup_args = dict(\n name='panel',\n version=get_setup_version(\"panel\"),\n description='A high level app and dashboarding solution for Python.',\n long_description=open('README.md').read() if os.path.isfile('README.md') else 'Consult README.md',\n long_description_content_type=\"text/markdown\",\n author=\"HoloViz\",\n author_email=\"[email protected]\",\n maintainer=\"HoloViz\",\n maintainer_email=\"[email protected]\",\n platforms=['Windows', 'Mac OS X', 'Linux'],\n license='BSD',\n url='http://panel.holoviz.org',\n project_urls={\n 'Source': 'https://github.com/holoviz/panel',\n },\n cmdclass=_COMMANDS,\n packages=find_packages(),\n include_package_data=True,\n data_files=[\n # like `jupyter serverextension enable --sys-prefix`\n (\n \"etc/jupyter/jupyter_notebook_config.d\",\n [\"jupyter-config/jupyter_notebook_config.d/panel-client-jupyter.json\"],\n ),\n # like `jupyter server extension enable --sys-prefix`\n (\n \"etc/jupyter/jupyter_server_config.d\",\n [\"jupyter-config/jupyter_server_config.d/panel-client-jupyter.json\"],\n ),\n ],\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Legal Industry\",\n \"Intended Audience :: Other Audience\",\n \"Intended Audience :: Science/Research\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Office/Business\",\n \"Topic :: Office/Business :: Financial\",\n \"Topic :: Software Development :: Libraries\"],\n python_requires=\">=3.7\",\n entry_points={\n 'console_scripts': [\n 'panel = panel.command:main'\n ]\n },\n install_requires=install_requires,\n extras_require=extras_require,\n tests_require=extras_require['tests']\n)\n\ndef clean_js_version(version):\n version = version.replace('-', '')\n for dev in ('a', 'b', 'rc'):\n version = version.replace(dev+'.', dev)\n return version\n\nif __name__ == \"__main__\":\n example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'panel', 'examples')\n\n if 'develop' not in sys.argv and 'egg_info' not in sys.argv:\n pyct.build.examples(example_path, __file__, force=True)\n\n version = setup_args['version']\n if 'post' not in version:\n with open('./panel/package.json') as f:\n package_json = json.load(f)\n js_version = package_json['version']\n version = version.split('+')[0]\n if any(dev in version for dev in ('a', 'b', 'rc')) and not '-' in js_version:\n raise ValueError(f\"panel.js dev versions ({js_version}) must \"\n \"must separate dev suffix with a dash, e.g. \"\n \"v1.0.0rc1 should be v1.0.0-rc.1.\")\n if version != 'None' and version != clean_js_version(js_version):\n raise ValueError(f\"panel.js version ({js_version}) does not match \"\n f\"panel version ({version}). Cannot build release.\")\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport json\nimport os\nimport shutil\nimport sys\n\nimport pyct.build\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\nfrom setuptools.command.sdist import sdist\n\nPANEL_LITE_BUILD = 'PANEL_LITE' in os.environ\n\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, '.version')\n try:\n from param import version\n except Exception:\n version = None\n if version is not None:\n return version.Version.setup_version(basepath, reponame, archive_commit=\"$Format:%h$\")\n else:\n print(\"WARNING: param>=1.6.0 unavailable. If you are installing a package, \"\n \"this warning can safely be ignored. If you are creating a package or \"\n \"otherwise operating in a git repository, you should install param>=1.6.0.\")\n return json.load(open(version_file_path, 'r'))['version_string']\n\n\ndef _build_paneljs():\n from bokeh.ext import build\n\n from panel.compiler import bundle_resources\n print(\"Building custom models:\")\n panel_dir = os.path.join(os.path.dirname(__file__), \"panel\")\n build(panel_dir)\n print(\"Bundling custom model resources:\")\n bundle_resources()\n if sys.platform != \"win32\":\n # npm can cause non-blocking stdout; so reset it just in case\n import fcntl\n flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL)\n fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK)\n\n\nclass CustomDevelopCommand(develop):\n \"\"\"Custom installation for development mode.\"\"\"\n\n def run(self):\n if not PANEL_LITE_BUILD:\n _build_paneljs()\n develop.run(self)\n\n\nclass CustomInstallCommand(install):\n \"\"\"Custom installation for install mode.\"\"\"\n\n def run(self):\n if not PANEL_LITE_BUILD:\n _build_paneljs()\n install.run(self)\n\n\nclass CustomSdistCommand(sdist):\n \"\"\"Custom installation for sdist mode.\"\"\"\n\n def run(self):\n if not PANEL_LITE_BUILD:\n _build_paneljs()\n sdist.run(self)\n\n\n_COMMANDS = {\n 'develop': CustomDevelopCommand,\n 'install': CustomInstallCommand,\n 'sdist': CustomSdistCommand,\n}\n\ntry:\n from wheel.bdist_wheel import bdist_wheel\n\n class CustomBdistWheelCommand(bdist_wheel):\n \"\"\"Custom bdist_wheel command to force cancelling qiskit-terra wheel\n creation.\"\"\"\n\n def run(self):\n \"\"\"Do nothing so the command intentionally fails.\"\"\"\n if not PANEL_LITE_BUILD:\n _build_paneljs()\n bdist_wheel.run(self)\n\n _COMMANDS['bdist_wheel'] = CustomBdistWheelCommand\nexcept Exception:\n pass\n\n########## dependencies ##########\n\ninstall_requires = [\n 'bokeh >=2.4.0,<2.5.0',\n 'param >=1.12.0',\n 'pyviz_comms >=0.7.4',\n 'markdown',\n 'requests',\n 'tqdm >=4.48.0',\n 'pyct >=0.4.4',\n 'bleach',\n 'setuptools >=42',\n 'typing_extensions',\n]\n\n_recommended = [\n 'jupyterlab',\n 'holoviews >1.14.1',\n 'matplotlib',\n 'pillow',\n 'plotly'\n]\n\n_tests = [\n # Test dependencies\n 'flake8',\n 'parameterized',\n 'pytest',\n 'nbval',\n 'flaky',\n 'pytest-xdist',\n 'pytest-cov',\n 'pre-commit',\n 'psutil',\n # Libraries tested in unit tests\n 'folium',\n 'ipympl',\n 'scipy',\n 'twine',\n 'pandas >=1.3',\n 'ipython >=7.0',\n 'holoviews',\n 'diskcache',\n 'markdown-it-py',\n 'ipyvuetify',\n 'reacton',\n # Added lxml temporarily as installing pyechars or idom on Python 3.11\n # via pip tries to build it and fails. To be removed.\n 'lxml',\n 'numpy <1.24', # Avoid VTK test fail\n]\n\n_ui = [\n 'playwright',\n 'pytest-playwright'\n]\n\nextras_require = {\n 'examples': [\n 'hvplot',\n 'plotly >=4.0',\n 'altair',\n 'streamz',\n 'vega_datasets',\n 'vtk ==9.0.1',\n 'scikit-learn',\n 'datashader',\n 'jupyter_bokeh >=3.0.2',\n 'django <4',\n 'channels',\n 'pyvista<0.33',\n 'ipywidgets',\n 'ipywidgets_bokeh',\n 'ipyvolume',\n 'ipyleaflet',\n 'ipympl',\n 'folium',\n 'xarray',\n 'pyinstrument >=4.0',\n 'aiohttp',\n 'croniter',\n 'graphviz',\n 'networkx >=2.5',\n 'pygraphviz',\n 'seaborn',\n 'pydeck',\n 'graphviz',\n 'lxml',\n 'python-graphviz',\n 'xgboost',\n 'ipyvuetify',\n 'reacton',\n 'scikit-image',\n ],\n 'tests': _tests,\n 'recommended': _recommended,\n 'doc': _recommended + [\n 'nbsite >=0.7.2rc2',\n 'pydata-sphinx-theme <=0.9.0',\n 'sphinx-copybutton',\n 'sphinx-design',\n ],\n 'ui': _ui\n}\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\n# Superset of what's in pyproject.toml (includes non-python\n# dependencies). Also, pyproject.toml isn't supported by all tools\n# anyway (e.g. older versions of pip, or conda - which also supports\n# non-python dependencies). Note that setup_requires isn't used\n# because it doesn't work well with pip.\nextras_require['build'] = [\n 'param >=1.9.2',\n 'pyct >=0.4.4',\n 'setuptools >=42',\n 'bokeh >=2.4.3,<2.5.0',\n 'pyviz_comms >=0.7.4',\n 'requests',\n 'packaging',\n 'bleach',\n 'tqdm >=4.48.0',\n]\n\nsetup_args = dict(\n name='panel',\n version=get_setup_version(\"panel\"),\n description='A high level app and dashboarding solution for Python.',\n long_description=open('README.md').read() if os.path.isfile('README.md') else 'Consult README.md',\n long_description_content_type=\"text/markdown\",\n author=\"HoloViz\",\n author_email=\"[email protected]\",\n maintainer=\"HoloViz\",\n maintainer_email=\"[email protected]\",\n platforms=['Windows', 'Mac OS X', 'Linux'],\n license='BSD',\n url='http://panel.holoviz.org',\n project_urls={\n 'Source': 'https://github.com/holoviz/panel',\n },\n cmdclass=_COMMANDS,\n packages=find_packages(),\n include_package_data=True,\n data_files=[\n # like `jupyter serverextension enable --sys-prefix`\n (\n \"etc/jupyter/jupyter_notebook_config.d\",\n [\"jupyter-config/jupyter_notebook_config.d/panel-client-jupyter.json\"],\n ),\n # like `jupyter server extension enable --sys-prefix`\n (\n \"etc/jupyter/jupyter_server_config.d\",\n [\"jupyter-config/jupyter_server_config.d/panel-client-jupyter.json\"],\n ),\n ],\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Legal Industry\",\n \"Intended Audience :: Other Audience\",\n \"Intended Audience :: Science/Research\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Office/Business\",\n \"Topic :: Office/Business :: Financial\",\n \"Topic :: Software Development :: Libraries\"],\n python_requires=\">=3.7\",\n entry_points={\n 'console_scripts': [\n 'panel = panel.command:main'\n ]\n },\n install_requires=install_requires,\n extras_require=extras_require,\n tests_require=extras_require['tests']\n)\n\ndef clean_js_version(version):\n version = version.replace('-', '')\n for dev in ('a', 'b', 'rc'):\n version = version.replace(dev+'.', dev)\n return version\n\nif __name__ == \"__main__\":\n example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'panel', 'examples')\n\n if 'develop' not in sys.argv and 'egg_info' not in sys.argv:\n pyct.build.examples(example_path, __file__, force=True)\n\n version = setup_args['version']\n if 'post' not in version:\n with open('./panel/package.json') as f:\n package_json = json.load(f)\n js_version = package_json['version']\n version = version.split('+')[0]\n if any(dev in version for dev in ('a', 'b', 'rc')) and not '-' in js_version:\n raise ValueError(f\"panel.js dev versions ({js_version}) must \"\n \"must separate dev suffix with a dash, e.g. \"\n \"v1.0.0rc1 should be v1.0.0-rc.1.\")\n if version != 'None' and version != clean_js_version(js_version):\n raise ValueError(f\"panel.js version ({js_version}) does not match \"\n f\"panel version ({version}). Cannot build release.\")\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n", "path": "setup.py"}]} | 3,934 | 88 |
gh_patches_debug_15614 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-1390 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] inplace operation in RBFKernelGrad when diag=True
# 🐛 Bug
RBFKernelGrad uses an inplace operation when diag=True that breaks backward.
## To reproduce
```python
import torch
from gpytorch.kernels import RBFKernelGrad
# This works
k = RBFKernelGrad()
train_x=torch.tensor([1., 2., 3.])
z = k(train_x, train_x)
z[0, 0].backward()
print(k.raw_lengthscale.grad)
# This fails
z = k(train_x, train_x, diag=True)
z[0].backward()
print(k.raw_lengthscale.grad)
```
** Stack trace/error message **
```
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-17-ecb6525006fe> in <module>
1 z = k(train_x, train_x, diag=True)
----> 2 z[0].backward()
3 print(k.raw_lengthscale.grad)
/mnt/xarfuse/uid-66331/be3771ae-seed-nspid4026531836-ns-4026531840/torch/tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
231 create_graph=create_graph,
232 inputs=inputs)
--> 233 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
234
235 def register_hook(self, hook):
/mnt/xarfuse/uid-66331/be3771ae-seed-nspid4026531836-ns-4026531840/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
144 Variable._execution_engine.run_backward(
145 tensors, grad_tensors_, retain_graph, create_graph, inputs,
--> 146 allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
147
148
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [1, 1]], which is output 0 of SoftplusBackward, is at version 1; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).
```
## Expected Behavior
Same as what happens without `diag=True`.
## System information
**Please complete the following information:**
- GPyTorch Version 1.3.0
- PyTorch Version 1.7.1
- Linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/kernels/rbf_kernel_grad.py`
Content:
```
1 #!/usr/bin/env python3
2 import torch
3
4 from ..lazy.kronecker_product_lazy_tensor import KroneckerProductLazyTensor
5 from .rbf_kernel import RBFKernel, postprocess_rbf
6
7
8 class RBFKernelGrad(RBFKernel):
9 r"""
10 Computes a covariance matrix of the RBF kernel that models the covariance
11 between the values and partial derivatives for inputs :math:`\mathbf{x_1}`
12 and :math:`\mathbf{x_2}`.
13
14 See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options.
15
16 .. note::
17
18 This kernel does not have an `outputscale` parameter. To add a scaling parameter,
19 decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
20
21 Args:
22 :attr:`batch_shape` (torch.Size, optional):
23 Set this if you want a separate lengthscale for each
24 batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.
25 :attr:`active_dims` (tuple of ints, optional):
26 Set this if you want to compute the covariance of only a few input dimensions. The ints
27 corresponds to the indices of the dimensions. Default: `None`.
28 :attr:`lengthscale_prior` (Prior, optional):
29 Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.
30 :attr:`lengthscale_constraint` (Constraint, optional):
31 Set this if you want to apply a constraint to the lengthscale parameter. Default: `Positive`.
32 :attr:`eps` (float):
33 The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`.
34
35 Attributes:
36 :attr:`lengthscale` (Tensor):
37 The lengthscale parameter. Size/shape of parameter depends on the
38 :attr:`ard_num_dims` and :attr:`batch_shape` arguments.
39
40 Example:
41 >>> x = torch.randn(10, 5)
42 >>> # Non-batch: Simple option
43 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())
44 >>> covar = covar_module(x) # Output: LazyTensor of size (60 x 60), where 60 = n * (d + 1)
45 >>>
46 >>> batch_x = torch.randn(2, 10, 5)
47 >>> # Batch: Simple option
48 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())
49 >>> # Batch: different lengthscale for each batch
50 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2])))
51 >>> covar = covar_module(x) # Output: LazyTensor of size (2 x 60 x 60)
52 """
53
54 def forward(self, x1, x2, diag=False, **params):
55 batch_shape = x1.shape[:-2]
56 n_batch_dims = len(batch_shape)
57 n1, d = x1.shape[-2:]
58 n2 = x2.shape[-2]
59
60 K = torch.zeros(*batch_shape, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype)
61
62 if not diag:
63 # Scale the inputs by the lengthscale (for stability)
64 x1_ = x1.div(self.lengthscale)
65 x2_ = x2.div(self.lengthscale)
66
67 # Form all possible rank-1 products for the gradient and Hessian blocks
68 outer = x1_.view(*batch_shape, n1, 1, d) - x2_.view(*batch_shape, 1, n2, d)
69 outer = outer / self.lengthscale.unsqueeze(-2)
70 outer = torch.transpose(outer, -1, -2).contiguous()
71
72 # 1) Kernel block
73 diff = self.covar_dist(x1_, x2_, square_dist=True, dist_postprocess_func=postprocess_rbf, **params)
74 K_11 = diff
75 K[..., :n1, :n2] = K_11
76
77 # 2) First gradient block
78 outer1 = outer.view(*batch_shape, n1, n2 * d)
79 K[..., :n1, n2:] = outer1 * K_11.repeat([*([1] * (n_batch_dims + 1)), d])
80
81 # 3) Second gradient block
82 outer2 = outer.transpose(-1, -3).reshape(*batch_shape, n2, n1 * d)
83 outer2 = outer2.transpose(-1, -2)
84 K[..., n1:, :n2] = -outer2 * K_11.repeat([*([1] * n_batch_dims), d, 1])
85
86 # 4) Hessian block
87 outer3 = outer1.repeat([*([1] * n_batch_dims), d, 1]) * outer2.repeat([*([1] * (n_batch_dims + 1)), d])
88 kp = KroneckerProductLazyTensor(
89 torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1) / self.lengthscale.pow(2),
90 torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1),
91 )
92 chain_rule = kp.evaluate() - outer3
93 K[..., n1:, n2:] = chain_rule * K_11.repeat([*([1] * n_batch_dims), d, d])
94
95 # Symmetrize for stability
96 if n1 == n2 and torch.eq(x1, x2).all():
97 K = 0.5 * (K.transpose(-1, -2) + K)
98
99 # Apply a perfect shuffle permutation to match the MutiTask ordering
100 pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().reshape((n1 * (d + 1)))
101 pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))
102 K = K[..., pi1, :][..., :, pi2]
103
104 return K
105
106 else:
107 if not (n1 == n2 and torch.eq(x1, x2).all()):
108 raise RuntimeError("diag=True only works when x1 == x2")
109
110 kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)
111 grad_diag = torch.ones(*batch_shape, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow_(2)
112 grad_diag = grad_diag.transpose(-1, -2).reshape(*batch_shape, n2 * d)
113 k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)
114 pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))
115 return k_diag[..., pi]
116
117 def num_outputs_per_input(self, x1, x2):
118 return x1.size(-1) + 1
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gpytorch/kernels/rbf_kernel_grad.py b/gpytorch/kernels/rbf_kernel_grad.py
--- a/gpytorch/kernels/rbf_kernel_grad.py
+++ b/gpytorch/kernels/rbf_kernel_grad.py
@@ -108,7 +108,7 @@
raise RuntimeError("diag=True only works when x1 == x2")
kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)
- grad_diag = torch.ones(*batch_shape, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow_(2)
+ grad_diag = torch.ones(*batch_shape, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow(2)
grad_diag = grad_diag.transpose(-1, -2).reshape(*batch_shape, n2 * d)
k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)
pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))
| {"golden_diff": "diff --git a/gpytorch/kernels/rbf_kernel_grad.py b/gpytorch/kernels/rbf_kernel_grad.py\n--- a/gpytorch/kernels/rbf_kernel_grad.py\n+++ b/gpytorch/kernels/rbf_kernel_grad.py\n@@ -108,7 +108,7 @@\n raise RuntimeError(\"diag=True only works when x1 == x2\")\n \n kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)\n- grad_diag = torch.ones(*batch_shape, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow_(2)\n+ grad_diag = torch.ones(*batch_shape, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow(2)\n grad_diag = grad_diag.transpose(-1, -2).reshape(*batch_shape, n2 * d)\n k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)\n pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))\n", "issue": "[Bug] inplace operation in RBFKernelGrad when diag=True\n# \ud83d\udc1b Bug\r\n\r\nRBFKernelGrad uses an inplace operation when diag=True that breaks backward.\r\n\r\n## To reproduce\r\n\r\n```python\r\nimport torch\r\nfrom gpytorch.kernels import RBFKernelGrad\r\n\r\n# This works\r\nk = RBFKernelGrad()\r\ntrain_x=torch.tensor([1., 2., 3.])\r\nz = k(train_x, train_x)\r\nz[0, 0].backward()\r\nprint(k.raw_lengthscale.grad)\r\n\r\n# This fails\r\nz = k(train_x, train_x, diag=True)\r\nz[0].backward()\r\nprint(k.raw_lengthscale.grad)\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\n---------------------------------------------------------------------------\r\nRuntimeError Traceback (most recent call last)\r\n<ipython-input-17-ecb6525006fe> in <module>\r\n 1 z = k(train_x, train_x, diag=True)\r\n----> 2 z[0].backward()\r\n 3 print(k.raw_lengthscale.grad)\r\n\r\n/mnt/xarfuse/uid-66331/be3771ae-seed-nspid4026531836-ns-4026531840/torch/tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)\r\n 231 create_graph=create_graph,\r\n 232 inputs=inputs)\r\n--> 233 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\r\n 234 \r\n 235 def register_hook(self, hook):\r\n\r\n/mnt/xarfuse/uid-66331/be3771ae-seed-nspid4026531836-ns-4026531840/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\r\n 144 Variable._execution_engine.run_backward(\r\n 145 tensors, grad_tensors_, retain_graph, create_graph, inputs,\r\n--> 146 allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag\r\n 147 \r\n 148 \r\n\r\nRuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [1, 1]], which is output 0 of SoftplusBackward, is at version 1; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).\r\n```\r\n\r\n## Expected Behavior\r\n\r\nSame as what happens without `diag=True`.\r\n\r\n## System information\r\n\r\n**Please complete the following information:**\r\n- GPyTorch Version 1.3.0\r\n- PyTorch Version 1.7.1\r\n- Linux\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport torch\n\nfrom ..lazy.kronecker_product_lazy_tensor import KroneckerProductLazyTensor\nfrom .rbf_kernel import RBFKernel, postprocess_rbf\n\n\nclass RBFKernelGrad(RBFKernel):\n r\"\"\"\n Computes a covariance matrix of the RBF kernel that models the covariance\n between the values and partial derivatives for inputs :math:`\\mathbf{x_1}`\n and :math:`\\mathbf{x_2}`.\n\n See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options.\n\n .. note::\n\n This kernel does not have an `outputscale` parameter. To add a scaling parameter,\n decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.\n\n Args:\n :attr:`batch_shape` (torch.Size, optional):\n Set this if you want a separate lengthscale for each\n batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.\n :attr:`active_dims` (tuple of ints, optional):\n Set this if you want to compute the covariance of only a few input dimensions. The ints\n corresponds to the indices of the dimensions. Default: `None`.\n :attr:`lengthscale_prior` (Prior, optional):\n Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.\n :attr:`lengthscale_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the lengthscale parameter. Default: `Positive`.\n :attr:`eps` (float):\n The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`.\n\n Attributes:\n :attr:`lengthscale` (Tensor):\n The lengthscale parameter. Size/shape of parameter depends on the\n :attr:`ard_num_dims` and :attr:`batch_shape` arguments.\n\n Example:\n >>> x = torch.randn(10, 5)\n >>> # Non-batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())\n >>> covar = covar_module(x) # Output: LazyTensor of size (60 x 60), where 60 = n * (d + 1)\n >>>\n >>> batch_x = torch.randn(2, 10, 5)\n >>> # Batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())\n >>> # Batch: different lengthscale for each batch\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2])))\n >>> covar = covar_module(x) # Output: LazyTensor of size (2 x 60 x 60)\n \"\"\"\n\n def forward(self, x1, x2, diag=False, **params):\n batch_shape = x1.shape[:-2]\n n_batch_dims = len(batch_shape)\n n1, d = x1.shape[-2:]\n n2 = x2.shape[-2]\n\n K = torch.zeros(*batch_shape, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype)\n\n if not diag:\n # Scale the inputs by the lengthscale (for stability)\n x1_ = x1.div(self.lengthscale)\n x2_ = x2.div(self.lengthscale)\n\n # Form all possible rank-1 products for the gradient and Hessian blocks\n outer = x1_.view(*batch_shape, n1, 1, d) - x2_.view(*batch_shape, 1, n2, d)\n outer = outer / self.lengthscale.unsqueeze(-2)\n outer = torch.transpose(outer, -1, -2).contiguous()\n\n # 1) Kernel block\n diff = self.covar_dist(x1_, x2_, square_dist=True, dist_postprocess_func=postprocess_rbf, **params)\n K_11 = diff\n K[..., :n1, :n2] = K_11\n\n # 2) First gradient block\n outer1 = outer.view(*batch_shape, n1, n2 * d)\n K[..., :n1, n2:] = outer1 * K_11.repeat([*([1] * (n_batch_dims + 1)), d])\n\n # 3) Second gradient block\n outer2 = outer.transpose(-1, -3).reshape(*batch_shape, n2, n1 * d)\n outer2 = outer2.transpose(-1, -2)\n K[..., n1:, :n2] = -outer2 * K_11.repeat([*([1] * n_batch_dims), d, 1])\n\n # 4) Hessian block\n outer3 = outer1.repeat([*([1] * n_batch_dims), d, 1]) * outer2.repeat([*([1] * (n_batch_dims + 1)), d])\n kp = KroneckerProductLazyTensor(\n torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1) / self.lengthscale.pow(2),\n torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1),\n )\n chain_rule = kp.evaluate() - outer3\n K[..., n1:, n2:] = chain_rule * K_11.repeat([*([1] * n_batch_dims), d, d])\n\n # Symmetrize for stability\n if n1 == n2 and torch.eq(x1, x2).all():\n K = 0.5 * (K.transpose(-1, -2) + K)\n\n # Apply a perfect shuffle permutation to match the MutiTask ordering\n pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().reshape((n1 * (d + 1)))\n pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))\n K = K[..., pi1, :][..., :, pi2]\n\n return K\n\n else:\n if not (n1 == n2 and torch.eq(x1, x2).all()):\n raise RuntimeError(\"diag=True only works when x1 == x2\")\n\n kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)\n grad_diag = torch.ones(*batch_shape, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow_(2)\n grad_diag = grad_diag.transpose(-1, -2).reshape(*batch_shape, n2 * d)\n k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)\n pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))\n return k_diag[..., pi]\n\n def num_outputs_per_input(self, x1, x2):\n return x1.size(-1) + 1\n", "path": "gpytorch/kernels/rbf_kernel_grad.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport torch\n\nfrom ..lazy.kronecker_product_lazy_tensor import KroneckerProductLazyTensor\nfrom .rbf_kernel import RBFKernel, postprocess_rbf\n\n\nclass RBFKernelGrad(RBFKernel):\n r\"\"\"\n Computes a covariance matrix of the RBF kernel that models the covariance\n between the values and partial derivatives for inputs :math:`\\mathbf{x_1}`\n and :math:`\\mathbf{x_2}`.\n\n See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options.\n\n .. note::\n\n This kernel does not have an `outputscale` parameter. To add a scaling parameter,\n decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.\n\n Args:\n :attr:`batch_shape` (torch.Size, optional):\n Set this if you want a separate lengthscale for each\n batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.\n :attr:`active_dims` (tuple of ints, optional):\n Set this if you want to compute the covariance of only a few input dimensions. The ints\n corresponds to the indices of the dimensions. Default: `None`.\n :attr:`lengthscale_prior` (Prior, optional):\n Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.\n :attr:`lengthscale_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the lengthscale parameter. Default: `Positive`.\n :attr:`eps` (float):\n The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`.\n\n Attributes:\n :attr:`lengthscale` (Tensor):\n The lengthscale parameter. Size/shape of parameter depends on the\n :attr:`ard_num_dims` and :attr:`batch_shape` arguments.\n\n Example:\n >>> x = torch.randn(10, 5)\n >>> # Non-batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())\n >>> covar = covar_module(x) # Output: LazyTensor of size (60 x 60), where 60 = n * (d + 1)\n >>>\n >>> batch_x = torch.randn(2, 10, 5)\n >>> # Batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())\n >>> # Batch: different lengthscale for each batch\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2])))\n >>> covar = covar_module(x) # Output: LazyTensor of size (2 x 60 x 60)\n \"\"\"\n\n def forward(self, x1, x2, diag=False, **params):\n batch_shape = x1.shape[:-2]\n n_batch_dims = len(batch_shape)\n n1, d = x1.shape[-2:]\n n2 = x2.shape[-2]\n\n K = torch.zeros(*batch_shape, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype)\n\n if not diag:\n # Scale the inputs by the lengthscale (for stability)\n x1_ = x1.div(self.lengthscale)\n x2_ = x2.div(self.lengthscale)\n\n # Form all possible rank-1 products for the gradient and Hessian blocks\n outer = x1_.view(*batch_shape, n1, 1, d) - x2_.view(*batch_shape, 1, n2, d)\n outer = outer / self.lengthscale.unsqueeze(-2)\n outer = torch.transpose(outer, -1, -2).contiguous()\n\n # 1) Kernel block\n diff = self.covar_dist(x1_, x2_, square_dist=True, dist_postprocess_func=postprocess_rbf, **params)\n K_11 = diff\n K[..., :n1, :n2] = K_11\n\n # 2) First gradient block\n outer1 = outer.view(*batch_shape, n1, n2 * d)\n K[..., :n1, n2:] = outer1 * K_11.repeat([*([1] * (n_batch_dims + 1)), d])\n\n # 3) Second gradient block\n outer2 = outer.transpose(-1, -3).reshape(*batch_shape, n2, n1 * d)\n outer2 = outer2.transpose(-1, -2)\n K[..., n1:, :n2] = -outer2 * K_11.repeat([*([1] * n_batch_dims), d, 1])\n\n # 4) Hessian block\n outer3 = outer1.repeat([*([1] * n_batch_dims), d, 1]) * outer2.repeat([*([1] * (n_batch_dims + 1)), d])\n kp = KroneckerProductLazyTensor(\n torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1) / self.lengthscale.pow(2),\n torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1),\n )\n chain_rule = kp.evaluate() - outer3\n K[..., n1:, n2:] = chain_rule * K_11.repeat([*([1] * n_batch_dims), d, d])\n\n # Symmetrize for stability\n if n1 == n2 and torch.eq(x1, x2).all():\n K = 0.5 * (K.transpose(-1, -2) + K)\n\n # Apply a perfect shuffle permutation to match the MutiTask ordering\n pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().reshape((n1 * (d + 1)))\n pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))\n K = K[..., pi1, :][..., :, pi2]\n\n return K\n\n else:\n if not (n1 == n2 and torch.eq(x1, x2).all()):\n raise RuntimeError(\"diag=True only works when x1 == x2\")\n\n kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)\n grad_diag = torch.ones(*batch_shape, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow(2)\n grad_diag = grad_diag.transpose(-1, -2).reshape(*batch_shape, n2 * d)\n k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)\n pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().reshape((n2 * (d + 1)))\n return k_diag[..., pi]\n\n def num_outputs_per_input(self, x1, x2):\n return x1.size(-1) + 1\n", "path": "gpytorch/kernels/rbf_kernel_grad.py"}]} | 2,751 | 248 |
gh_patches_debug_53387 | rasdani/github-patches | git_diff | chainer__chainer-781 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support numpy 1.10
numpy 1.10.0 is released on 2015/10/07
https://pypi.python.org/pypi/numpy/1.10.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/creation/ranges.py`
Content:
```
1 import numpy
2
3 import cupy
4 from cupy import core
5
6
7 def arange(start, stop=None, step=1, dtype=None):
8 """Rerurns an array with evenly spaced values within a given interval.
9
10 Values are generated within the half-open interval [start, stop). The first
11 three arguments are mapped like the ``range`` built-in function, i.e. start
12 and step are optional.
13
14 Args:
15 start: Start of the interval.
16 stop: End of the interval.
17 step: Step width between each pair of consecutive values.
18 dtype: Data type specifier. It is inferred from other arguments by
19 default.
20
21 Returns:
22 cupy.ndarray: The 1-D array of range values.
23
24 .. seealso:: :func:`numpy.arange`
25
26 """
27 if dtype is None:
28 if any(numpy.dtype(type(val)).kind == 'f'
29 for val in (start, stop, step)):
30 dtype = float
31 else:
32 dtype = int
33
34 if stop is None:
35 stop = start
36 start = 0
37 size = int(numpy.ceil((stop - start) / step))
38 if size <= 0:
39 return cupy.empty((0,), dtype=dtype)
40
41 ret = cupy.empty((size,), dtype=dtype)
42 typ = numpy.dtype(dtype).type
43 _arange_ufunc(typ(start), typ(step), ret, dtype=dtype)
44 return ret
45
46
47 def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
48 """Returns an array with evenly-spaced values within a given interval.
49
50 Instead of specifying the step width like :func:`cupy.arange`, this
51 function requires the total number of elements specified.
52
53 Args:
54 start: Start of the interval.
55 stop: End of the interval.
56 num: Number of elements.
57 endpoint (bool): If True, the stop value is included as the last
58 element. Otherwise, the stop value is omitted.
59 retstep (bool): If True, this function returns (array, step).
60 Otherwise, it returns only the array.
61 dtype: Data type specifier. It is inferred from the start and stop
62 arguments by default.
63
64 Returns:
65 cupy.ndarray: The 1-D array of ranged values.
66
67 """
68 if num < 0:
69 raise ValueError('linspace with num<0 is not supported')
70
71 if dtype is None:
72 # In actual implementation, only float is used
73 dtype = float
74
75 ret = cupy.empty((num,), dtype=dtype)
76 if num == 0:
77 step = float('nan')
78 elif num == 1:
79 ret.fill(start)
80 step = float('nan')
81 else:
82 div = (num - 1) if endpoint else num
83 step = float(stop - start) / div
84 stop = float(stop)
85
86 if step == 0.0:
87 # for underflow
88 _linspace_ufunc_underflow(start, stop - start, div, ret)
89 else:
90 _linspace_ufunc(start, step, ret)
91
92 if endpoint:
93 ret[-1] = stop
94
95 if retstep:
96 return ret, step
97 else:
98 return ret
99
100
101 # TODO(okuta): Implement logspace
102
103
104 # TODO(okuta): Implement meshgrid
105
106
107 # mgrid
108 # ogrid
109
110
111 _arange_ufunc = core.create_ufunc(
112 'cupy_arange',
113 ('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
114 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
115 'out0 = in0 + i * in1')
116
117
118 _linspace_ufunc = core.create_ufunc(
119 'cupy_linspace',
120 ('dd->d',),
121 'out0 = in0 + i * in1')
122
123 _linspace_ufunc_underflow = core.create_ufunc(
124 'cupy_linspace',
125 ('ddd->d',),
126 'out0 = in0 + i * in1 / in2')
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/creation/ranges.py b/cupy/creation/ranges.py
--- a/cupy/creation/ranges.py
+++ b/cupy/creation/ranges.py
@@ -85,9 +85,10 @@
if step == 0.0:
# for underflow
- _linspace_ufunc_underflow(start, stop - start, div, ret)
+ _linspace_ufunc_underflow(start, stop - start, div, ret,
+ casting='unsafe')
else:
- _linspace_ufunc(start, step, ret)
+ _linspace_ufunc(start, step, ret, casting='unsafe')
if endpoint:
ret[-1] = stop
| {"golden_diff": "diff --git a/cupy/creation/ranges.py b/cupy/creation/ranges.py\n--- a/cupy/creation/ranges.py\n+++ b/cupy/creation/ranges.py\n@@ -85,9 +85,10 @@\n \n if step == 0.0:\n # for underflow\n- _linspace_ufunc_underflow(start, stop - start, div, ret)\n+ _linspace_ufunc_underflow(start, stop - start, div, ret,\n+ casting='unsafe')\n else:\n- _linspace_ufunc(start, step, ret)\n+ _linspace_ufunc(start, step, ret, casting='unsafe')\n \n if endpoint:\n ret[-1] = stop\n", "issue": "Support numpy 1.10\nnumpy 1.10.0 is released on 2015/10/07\n\nhttps://pypi.python.org/pypi/numpy/1.10.0\n\n", "before_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy import core\n\n\ndef arange(start, stop=None, step=1, dtype=None):\n \"\"\"Rerurns an array with evenly spaced values within a given interval.\n\n Values are generated within the half-open interval [start, stop). The first\n three arguments are mapped like the ``range`` built-in function, i.e. start\n and step are optional.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n step: Step width between each pair of consecutive values.\n dtype: Data type specifier. It is inferred from other arguments by\n default.\n\n Returns:\n cupy.ndarray: The 1-D array of range values.\n\n .. seealso:: :func:`numpy.arange`\n\n \"\"\"\n if dtype is None:\n if any(numpy.dtype(type(val)).kind == 'f'\n for val in (start, stop, step)):\n dtype = float\n else:\n dtype = int\n\n if stop is None:\n stop = start\n start = 0\n size = int(numpy.ceil((stop - start) / step))\n if size <= 0:\n return cupy.empty((0,), dtype=dtype)\n\n ret = cupy.empty((size,), dtype=dtype)\n typ = numpy.dtype(dtype).type\n _arange_ufunc(typ(start), typ(step), ret, dtype=dtype)\n return ret\n\n\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):\n \"\"\"Returns an array with evenly-spaced values within a given interval.\n\n Instead of specifying the step width like :func:`cupy.arange`, this\n function requires the total number of elements specified.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n num: Number of elements.\n endpoint (bool): If True, the stop value is included as the last\n element. Otherwise, the stop value is omitted.\n retstep (bool): If True, this function returns (array, step).\n Otherwise, it returns only the array.\n dtype: Data type specifier. It is inferred from the start and stop\n arguments by default.\n\n Returns:\n cupy.ndarray: The 1-D array of ranged values.\n\n \"\"\"\n if num < 0:\n raise ValueError('linspace with num<0 is not supported')\n\n if dtype is None:\n # In actual implementation, only float is used\n dtype = float\n\n ret = cupy.empty((num,), dtype=dtype)\n if num == 0:\n step = float('nan')\n elif num == 1:\n ret.fill(start)\n step = float('nan')\n else:\n div = (num - 1) if endpoint else num\n step = float(stop - start) / div\n stop = float(stop)\n\n if step == 0.0:\n # for underflow\n _linspace_ufunc_underflow(start, stop - start, div, ret)\n else:\n _linspace_ufunc(start, step, ret)\n\n if endpoint:\n ret[-1] = stop\n\n if retstep:\n return ret, step\n else:\n return ret\n\n\n# TODO(okuta): Implement logspace\n\n\n# TODO(okuta): Implement meshgrid\n\n\n# mgrid\n# ogrid\n\n\n_arange_ufunc = core.create_ufunc(\n 'cupy_arange',\n ('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',\n 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),\n 'out0 = in0 + i * in1')\n\n\n_linspace_ufunc = core.create_ufunc(\n 'cupy_linspace',\n ('dd->d',),\n 'out0 = in0 + i * in1')\n\n_linspace_ufunc_underflow = core.create_ufunc(\n 'cupy_linspace',\n ('ddd->d',),\n 'out0 = in0 + i * in1 / in2')\n", "path": "cupy/creation/ranges.py"}], "after_files": [{"content": "import numpy\n\nimport cupy\nfrom cupy import core\n\n\ndef arange(start, stop=None, step=1, dtype=None):\n \"\"\"Rerurns an array with evenly spaced values within a given interval.\n\n Values are generated within the half-open interval [start, stop). The first\n three arguments are mapped like the ``range`` built-in function, i.e. start\n and step are optional.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n step: Step width between each pair of consecutive values.\n dtype: Data type specifier. It is inferred from other arguments by\n default.\n\n Returns:\n cupy.ndarray: The 1-D array of range values.\n\n .. seealso:: :func:`numpy.arange`\n\n \"\"\"\n if dtype is None:\n if any(numpy.dtype(type(val)).kind == 'f'\n for val in (start, stop, step)):\n dtype = float\n else:\n dtype = int\n\n if stop is None:\n stop = start\n start = 0\n size = int(numpy.ceil((stop - start) / step))\n if size <= 0:\n return cupy.empty((0,), dtype=dtype)\n\n ret = cupy.empty((size,), dtype=dtype)\n typ = numpy.dtype(dtype).type\n _arange_ufunc(typ(start), typ(step), ret, dtype=dtype)\n return ret\n\n\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):\n \"\"\"Returns an array with evenly-spaced values within a given interval.\n\n Instead of specifying the step width like :func:`cupy.arange`, this\n function requires the total number of elements specified.\n\n Args:\n start: Start of the interval.\n stop: End of the interval.\n num: Number of elements.\n endpoint (bool): If True, the stop value is included as the last\n element. Otherwise, the stop value is omitted.\n retstep (bool): If True, this function returns (array, step).\n Otherwise, it returns only the array.\n dtype: Data type specifier. It is inferred from the start and stop\n arguments by default.\n\n Returns:\n cupy.ndarray: The 1-D array of ranged values.\n\n \"\"\"\n if num < 0:\n raise ValueError('linspace with num<0 is not supported')\n\n if dtype is None:\n # In actual implementation, only float is used\n dtype = float\n\n ret = cupy.empty((num,), dtype=dtype)\n if num == 0:\n step = float('nan')\n elif num == 1:\n ret.fill(start)\n step = float('nan')\n else:\n div = (num - 1) if endpoint else num\n step = float(stop - start) / div\n stop = float(stop)\n\n if step == 0.0:\n # for underflow\n _linspace_ufunc_underflow(start, stop - start, div, ret,\n casting='unsafe')\n else:\n _linspace_ufunc(start, step, ret, casting='unsafe')\n\n if endpoint:\n ret[-1] = stop\n\n if retstep:\n return ret, step\n else:\n return ret\n\n\n# TODO(okuta): Implement logspace\n\n\n# TODO(okuta): Implement meshgrid\n\n\n# mgrid\n# ogrid\n\n\n_arange_ufunc = core.create_ufunc(\n 'cupy_arange',\n ('bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',\n 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),\n 'out0 = in0 + i * in1')\n\n\n_linspace_ufunc = core.create_ufunc(\n 'cupy_linspace',\n ('dd->d',),\n 'out0 = in0 + i * in1')\n\n_linspace_ufunc_underflow = core.create_ufunc(\n 'cupy_linspace',\n ('ddd->d',),\n 'out0 = in0 + i * in1 / in2')\n", "path": "cupy/creation/ranges.py"}]} | 1,516 | 163 |
gh_patches_debug_1201 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-588 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No way to define options that have no defaults
Currently if you set a value in `cookiecutter.json` to `null` it becomes `None` and is then turned into the _string_ `'None'`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/prompt.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.prompt
6 ---------------------
7
8 Functions for prompting the user for project info.
9 """
10
11 from collections import OrderedDict
12
13 import click
14 from past.builtins import basestring
15
16 from future.utils import iteritems
17 from jinja2.environment import Environment
18
19
20 def read_user_variable(var_name, default_value):
21 """Prompt the user for the given variable and return the entered value
22 or the given default.
23
24 :param str var_name: Variable of the context to query the user
25 :param default_value: Value that will be returned if no input happens
26 """
27 # Please see http://click.pocoo.org/4/api/#click.prompt
28 return click.prompt(var_name, default=default_value)
29
30
31 def read_user_yes_no(question, default_value):
32 """Prompt the user to reply with 'yes' or 'no' (or equivalent values).
33
34 Note:
35 Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'
36
37 :param str question: Question to the user
38 :param default_value: Value that will be returned if no input happens
39 """
40 # Please see http://click.pocoo.org/4/api/#click.prompt
41 return click.prompt(
42 question,
43 default=default_value,
44 type=click.BOOL
45 )
46
47
48 def read_user_choice(var_name, options):
49 """Prompt the user to choose from several options for the given variable.
50
51 The first item will be returned if no input happens.
52
53 :param str var_name: Variable as specified in the context
54 :param list options: Sequence of options that are available to select from
55 :return: Exactly one item of ``options`` that has been chosen by the user
56 """
57 # Please see http://click.pocoo.org/4/api/#click.prompt
58 if not isinstance(options, list):
59 raise TypeError
60
61 if not options:
62 raise ValueError
63
64 choice_map = OrderedDict(
65 (u'{}'.format(i), value) for i, value in enumerate(options, 1)
66 )
67 choices = choice_map.keys()
68 default = u'1'
69
70 choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]
71 prompt = u'\n'.join((
72 u'Select {}:'.format(var_name),
73 u'\n'.join(choice_lines),
74 u'Choose from {}'.format(u', '.join(choices))
75 ))
76
77 user_choice = click.prompt(
78 prompt, type=click.Choice(choices), default=default
79 )
80 return choice_map[user_choice]
81
82
83 def render_variable(env, raw, cookiecutter_dict):
84 if not isinstance(raw, basestring):
85 raw = str(raw)
86 template = env.from_string(raw)
87 rendered_template = template.render(cookiecutter=cookiecutter_dict)
88 return rendered_template
89
90
91 def prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):
92 """Prompt the user which option to choose from the given. Each of the
93 possible choices is rendered beforehand.
94 """
95 rendered_options = [
96 render_variable(env, raw, cookiecutter_dict) for raw in options
97 ]
98
99 if no_input:
100 return rendered_options[0]
101 return read_user_choice(key, rendered_options)
102
103
104 def prompt_for_config(context, no_input=False):
105 """
106 Prompts the user to enter new config, using context as a source for the
107 field names and sample values.
108
109 :param no_input: Prompt the user at command line for manual configuration?
110 """
111 cookiecutter_dict = {}
112 env = Environment()
113
114 for key, raw in iteritems(context[u'cookiecutter']):
115 if key.startswith(u'_'):
116 cookiecutter_dict[key] = raw
117 continue
118
119 if isinstance(raw, list):
120 # We are dealing with a choice variable
121 val = prompt_choice_for_config(
122 cookiecutter_dict, env, key, raw, no_input
123 )
124 else:
125 # We are dealing with a regular variable
126 val = render_variable(env, raw, cookiecutter_dict)
127
128 if not no_input:
129 val = read_user_variable(key, val)
130
131 cookiecutter_dict[key] = val
132 return cookiecutter_dict
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py
--- a/cookiecutter/prompt.py
+++ b/cookiecutter/prompt.py
@@ -81,6 +81,8 @@
def render_variable(env, raw, cookiecutter_dict):
+ if raw is None:
+ return None
if not isinstance(raw, basestring):
raw = str(raw)
template = env.from_string(raw)
| {"golden_diff": "diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -81,6 +81,8 @@\n \n \n def render_variable(env, raw, cookiecutter_dict):\n+ if raw is None:\n+ return None\n if not isinstance(raw, basestring):\n raw = str(raw)\n template = env.from_string(raw)\n", "issue": "No way to define options that have no defaults\nCurrently if you set a value in `cookiecutter.json` to `null` it becomes `None` and is then turned into the _string_ `'None'`.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\nfrom jinja2.environment import Environment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default\n )\n return choice_map[user_choice]\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n if not isinstance(raw, basestring):\n raw = str(raw)\n template = env.from_string(raw)\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = Environment()\n\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n else:\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport click\nfrom past.builtins import basestring\n\nfrom future.utils import iteritems\nfrom jinja2.environment import Environment\n\n\ndef read_user_variable(var_name, default_value):\n \"\"\"Prompt the user for the given variable and return the entered value\n or the given default.\n\n :param str var_name: Variable of the context to query the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(var_name, default=default_value)\n\n\ndef read_user_yes_no(question, default_value):\n \"\"\"Prompt the user to reply with 'yes' or 'no' (or equivalent values).\n\n Note:\n Possible choices are 'true', '1', 'yes', 'y' or 'false', '0', 'no', 'n'\n\n :param str question: Question to the user\n :param default_value: Value that will be returned if no input happens\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n return click.prompt(\n question,\n default=default_value,\n type=click.BOOL\n )\n\n\ndef read_user_choice(var_name, options):\n \"\"\"Prompt the user to choose from several options for the given variable.\n\n The first item will be returned if no input happens.\n\n :param str var_name: Variable as specified in the context\n :param list options: Sequence of options that are available to select from\n :return: Exactly one item of ``options`` that has been chosen by the user\n \"\"\"\n # Please see http://click.pocoo.org/4/api/#click.prompt\n if not isinstance(options, list):\n raise TypeError\n\n if not options:\n raise ValueError\n\n choice_map = OrderedDict(\n (u'{}'.format(i), value) for i, value in enumerate(options, 1)\n )\n choices = choice_map.keys()\n default = u'1'\n\n choice_lines = [u'{} - {}'.format(*c) for c in choice_map.items()]\n prompt = u'\\n'.join((\n u'Select {}:'.format(var_name),\n u'\\n'.join(choice_lines),\n u'Choose from {}'.format(u', '.join(choices))\n ))\n\n user_choice = click.prompt(\n prompt, type=click.Choice(choices), default=default\n )\n return choice_map[user_choice]\n\n\ndef render_variable(env, raw, cookiecutter_dict):\n if raw is None:\n return None\n if not isinstance(raw, basestring):\n raw = str(raw)\n template = env.from_string(raw)\n rendered_template = template.render(cookiecutter=cookiecutter_dict)\n return rendered_template\n\n\ndef prompt_choice_for_config(cookiecutter_dict, env, key, options, no_input):\n \"\"\"Prompt the user which option to choose from the given. Each of the\n possible choices is rendered beforehand.\n \"\"\"\n rendered_options = [\n render_variable(env, raw, cookiecutter_dict) for raw in options\n ]\n\n if no_input:\n return rendered_options[0]\n return read_user_choice(key, rendered_options)\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = Environment()\n\n for key, raw in iteritems(context[u'cookiecutter']):\n if key.startswith(u'_'):\n cookiecutter_dict[key] = raw\n continue\n\n if isinstance(raw, list):\n # We are dealing with a choice variable\n val = prompt_choice_for_config(\n cookiecutter_dict, env, key, raw, no_input\n )\n else:\n # We are dealing with a regular variable\n val = render_variable(env, raw, cookiecutter_dict)\n\n if not no_input:\n val = read_user_variable(key, val)\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n", "path": "cookiecutter/prompt.py"}]} | 1,531 | 100 |
gh_patches_debug_32212 | rasdani/github-patches | git_diff | localstack__localstack-9743 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: Unable to provision FIFO SNS Topic via CDK
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
When attempting to provision a FIFO SNS topic in LocalStack via CDK using `cdklocal`, e.g.:
```typescript
const topic = new sns.Topic(this, 'FIFOTopic', {
displayName: 'topic.fifo',
fifo: true,
contentBasedDeduplication: true,
});
```
The resulting topic created in LocalStack is not FIFO:

This doesn't appear to be an issue with `cdklocal`, because the template output does appear to have the correct properties:
```json
{
"Resources": {
"FIFOTopic5C947601": {
"Type": "AWS::SNS::Topic",
"Properties": {
"ContentBasedDeduplication": true,
"DisplayName": "topic.fifo",
"FifoTopic": true,
"TopicName": "SNSStack-FIFOTopic-99AA2860.fifo"
},
...
}
```
### Expected Behavior
A FIFO SNS Topic would be provisioned when setting `fifo: true` on the CDK construct.
### How are you starting LocalStack?
With the `localstack` script
### Steps To Reproduce
I created this git repository to provide an example duplicating the issue: https://github.com/tbellerose-godaddy/ls-fifo-sns-cdk-bug
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
localstack start -d
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
cdklocal bootstrap
cdklocal deploy '*'
### Environment
```markdown
- OS: macOS Ventura 13.5.2
- LocalStack: 2.3.2
```
### Anything else?
Creating a FIFO SNS Topic via the `awslocal-cli` works as expected. This is only an issue when creating via CDK.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/sns/resource_providers/aws_sns_topic.py`
Content:
```
1 # LocalStack Resource Provider Scaffolding v2
2 from __future__ import annotations
3
4 from pathlib import Path
5 from typing import Optional, TypedDict
6
7 import localstack.services.cloudformation.provider_utils as util
8 from localstack.services.cloudformation.resource_provider import (
9 OperationStatus,
10 ProgressEvent,
11 ResourceProvider,
12 ResourceRequest,
13 )
14 from localstack.utils.strings import short_uid
15
16
17 class SNSTopicProperties(TypedDict):
18 ContentBasedDeduplication: Optional[bool]
19 DataProtectionPolicy: Optional[dict]
20 DisplayName: Optional[str]
21 FifoTopic: Optional[bool]
22 KmsMasterKeyId: Optional[str]
23 SignatureVersion: Optional[str]
24 Subscription: Optional[list[Subscription]]
25 Tags: Optional[list[Tag]]
26 TopicArn: Optional[str]
27 TopicName: Optional[str]
28 TracingConfig: Optional[str]
29
30
31 class Subscription(TypedDict):
32 Endpoint: Optional[str]
33 Protocol: Optional[str]
34
35
36 class Tag(TypedDict):
37 Key: Optional[str]
38 Value: Optional[str]
39
40
41 REPEATED_INVOCATION = "repeated_invocation"
42
43
44 class SNSTopicProvider(ResourceProvider[SNSTopicProperties]):
45 TYPE = "AWS::SNS::Topic" # Autogenerated. Don't change
46 SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
47
48 def create(
49 self,
50 request: ResourceRequest[SNSTopicProperties],
51 ) -> ProgressEvent[SNSTopicProperties]:
52 """
53 Create a new resource.
54
55 Primary identifier fields:
56 - /properties/TopicArn
57
58
59
60 Create-only properties:
61 - /properties/TopicName
62 - /properties/FifoTopic
63
64 Read-only properties:
65 - /properties/TopicArn
66
67 IAM permissions required:
68 - sns:CreateTopic
69 - sns:TagResource
70 - sns:Subscribe
71 - sns:GetTopicAttributes
72 - sns:PutDataProtectionPolicy
73
74 """
75 model = request.desired_state
76 sns = request.aws_client_factory.sns
77 # TODO: validations and iam checks
78
79 attributes = {k: v for k, v in model.items() if v is not None if k != "TopicName"}
80
81 # following attributes need to be str instead of bool for boto to work
82 if attributes.get("FifoTopic") is not None:
83 attributes["FifoTopic"] = str(attributes.get("FifoTopic"))
84
85 if attributes.get("ContentBasedDeduplication") is not None:
86 attributes["ContentBasedDeduplication"] = str(
87 attributes.get("ContentBasedDeduplication")
88 )
89
90 subscriptions = []
91 if attributes.get("Subscription") is not None:
92 subscriptions = attributes["Subscription"]
93 del attributes["Subscription"]
94
95 tags = []
96 if attributes.get("Tags") is not None:
97 tags = attributes["Tags"]
98 del attributes["Tags"]
99
100 # in case cloudformation didn't provide topic name
101 if model.get("TopicName") is None:
102 model["TopicName"] = f"topic-{short_uid()}"
103
104 create_sns_response = sns.create_topic(Name=model["TopicName"], Attributes=attributes)
105 request.custom_context[REPEATED_INVOCATION] = True
106 model["TopicArn"] = create_sns_response["TopicArn"]
107
108 # now we add subscriptions if they exists
109 for subscription in subscriptions:
110 sns.subscribe(
111 TopicArn=model["TopicArn"],
112 Protocol=subscription["Protocol"],
113 Endpoint=subscription["Endpoint"],
114 )
115 if tags:
116 sns.tag_resource(ResourceArn=model["TopicArn"], Tags=tags)
117
118 return ProgressEvent(
119 status=OperationStatus.SUCCESS,
120 resource_model=model,
121 custom_context=request.custom_context,
122 )
123
124 def read(
125 self,
126 request: ResourceRequest[SNSTopicProperties],
127 ) -> ProgressEvent[SNSTopicProperties]:
128 """
129 Fetch resource information
130
131 IAM permissions required:
132 - sns:GetTopicAttributes
133 - sns:ListTagsForResource
134 - sns:ListSubscriptionsByTopic
135 - sns:GetDataProtectionPolicy
136 """
137 raise NotImplementedError
138
139 def delete(
140 self,
141 request: ResourceRequest[SNSTopicProperties],
142 ) -> ProgressEvent[SNSTopicProperties]:
143 """
144 Delete a resource
145
146 IAM permissions required:
147 - sns:DeleteTopic
148 """
149 model = request.desired_state
150 sns = request.aws_client_factory.sns
151 sns.delete_topic(TopicArn=model["TopicArn"])
152 return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={})
153
154 def update(
155 self,
156 request: ResourceRequest[SNSTopicProperties],
157 ) -> ProgressEvent[SNSTopicProperties]:
158 """
159 Update a resource
160
161 IAM permissions required:
162 - sns:SetTopicAttributes
163 - sns:TagResource
164 - sns:UntagResource
165 - sns:Subscribe
166 - sns:Unsubscribe
167 - sns:GetTopicAttributes
168 - sns:ListTagsForResource
169 - sns:ListSubscriptionsByTopic
170 - sns:GetDataProtectionPolicy
171 - sns:PutDataProtectionPolicy
172 """
173 raise NotImplementedError
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/sns/resource_providers/aws_sns_topic.py b/localstack/services/sns/resource_providers/aws_sns_topic.py
--- a/localstack/services/sns/resource_providers/aws_sns_topic.py
+++ b/localstack/services/sns/resource_providers/aws_sns_topic.py
@@ -11,7 +11,7 @@
ResourceProvider,
ResourceRequest,
)
-from localstack.utils.strings import short_uid
+from localstack.utils.strings import canonicalize_bool_to_str, short_uid
class SNSTopicProperties(TypedDict):
@@ -79,13 +79,11 @@
attributes = {k: v for k, v in model.items() if v is not None if k != "TopicName"}
# following attributes need to be str instead of bool for boto to work
- if attributes.get("FifoTopic") is not None:
- attributes["FifoTopic"] = str(attributes.get("FifoTopic"))
+ if (fifo_topic := attributes.get("FifoTopic")) is not None:
+ attributes["FifoTopic"] = canonicalize_bool_to_str(fifo_topic)
- if attributes.get("ContentBasedDeduplication") is not None:
- attributes["ContentBasedDeduplication"] = str(
- attributes.get("ContentBasedDeduplication")
- )
+ if (content_based_dedup := attributes.get("ContentBasedDeduplication")) is not None:
+ attributes["ContentBasedDeduplication"] = canonicalize_bool_to_str(content_based_dedup)
subscriptions = []
if attributes.get("Subscription") is not None:
@@ -99,7 +97,8 @@
# in case cloudformation didn't provide topic name
if model.get("TopicName") is None:
- model["TopicName"] = f"topic-{short_uid()}"
+ name = f"topic-{short_uid()}" if not fifo_topic else f"topic-{short_uid()}.fifo"
+ model["TopicName"] = name
create_sns_response = sns.create_topic(Name=model["TopicName"], Attributes=attributes)
request.custom_context[REPEATED_INVOCATION] = True
| {"golden_diff": "diff --git a/localstack/services/sns/resource_providers/aws_sns_topic.py b/localstack/services/sns/resource_providers/aws_sns_topic.py\n--- a/localstack/services/sns/resource_providers/aws_sns_topic.py\n+++ b/localstack/services/sns/resource_providers/aws_sns_topic.py\n@@ -11,7 +11,7 @@\n ResourceProvider,\n ResourceRequest,\n )\n-from localstack.utils.strings import short_uid\n+from localstack.utils.strings import canonicalize_bool_to_str, short_uid\n \n \n class SNSTopicProperties(TypedDict):\n@@ -79,13 +79,11 @@\n attributes = {k: v for k, v in model.items() if v is not None if k != \"TopicName\"}\n \n # following attributes need to be str instead of bool for boto to work\n- if attributes.get(\"FifoTopic\") is not None:\n- attributes[\"FifoTopic\"] = str(attributes.get(\"FifoTopic\"))\n+ if (fifo_topic := attributes.get(\"FifoTopic\")) is not None:\n+ attributes[\"FifoTopic\"] = canonicalize_bool_to_str(fifo_topic)\n \n- if attributes.get(\"ContentBasedDeduplication\") is not None:\n- attributes[\"ContentBasedDeduplication\"] = str(\n- attributes.get(\"ContentBasedDeduplication\")\n- )\n+ if (content_based_dedup := attributes.get(\"ContentBasedDeduplication\")) is not None:\n+ attributes[\"ContentBasedDeduplication\"] = canonicalize_bool_to_str(content_based_dedup)\n \n subscriptions = []\n if attributes.get(\"Subscription\") is not None:\n@@ -99,7 +97,8 @@\n \n # in case cloudformation didn't provide topic name\n if model.get(\"TopicName\") is None:\n- model[\"TopicName\"] = f\"topic-{short_uid()}\"\n+ name = f\"topic-{short_uid()}\" if not fifo_topic else f\"topic-{short_uid()}.fifo\"\n+ model[\"TopicName\"] = name\n \n create_sns_response = sns.create_topic(Name=model[\"TopicName\"], Attributes=attributes)\n request.custom_context[REPEATED_INVOCATION] = True\n", "issue": "bug: Unable to provision FIFO SNS Topic via CDK\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Current Behavior\r\n\r\nWhen attempting to provision a FIFO SNS topic in LocalStack via CDK using `cdklocal`, e.g.:\r\n\r\n```typescript\r\nconst topic = new sns.Topic(this, 'FIFOTopic', {\r\n displayName: 'topic.fifo',\r\n fifo: true,\r\n contentBasedDeduplication: true,\r\n});\r\n```\r\n\r\nThe resulting topic created in LocalStack is not FIFO:\r\n\r\n\r\n\r\nThis doesn't appear to be an issue with `cdklocal`, because the template output does appear to have the correct properties:\r\n\r\n```json\r\n{\r\n \"Resources\": {\r\n \"FIFOTopic5C947601\": {\r\n \"Type\": \"AWS::SNS::Topic\",\r\n \"Properties\": {\r\n \"ContentBasedDeduplication\": true,\r\n \"DisplayName\": \"topic.fifo\",\r\n \"FifoTopic\": true,\r\n \"TopicName\": \"SNSStack-FIFOTopic-99AA2860.fifo\"\r\n },\r\n ...\r\n \r\n }\r\n```\r\n\r\n\r\n### Expected Behavior\r\n\r\nA FIFO SNS Topic would be provisioned when setting `fifo: true` on the CDK construct.\r\n\r\n### How are you starting LocalStack?\r\n\r\nWith the `localstack` script\r\n\r\n### Steps To Reproduce\r\n\r\nI created this git repository to provide an example duplicating the issue: https://github.com/tbellerose-godaddy/ls-fifo-sns-cdk-bug\r\n\r\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\n localstack start -d\r\n\r\n#### Client commands (e.g., AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\n \r\n cdklocal bootstrap\r\n cdklocal deploy '*'\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS: macOS Ventura 13.5.2\r\n- LocalStack: 2.3.2\r\n```\r\n\r\n\r\n### Anything else?\r\n\r\nCreating a FIFO SNS Topic via the `awslocal-cli` works as expected. This is only an issue when creating via CDK.\n", "before_files": [{"content": "# LocalStack Resource Provider Scaffolding v2\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Optional, TypedDict\n\nimport localstack.services.cloudformation.provider_utils as util\nfrom localstack.services.cloudformation.resource_provider import (\n OperationStatus,\n ProgressEvent,\n ResourceProvider,\n ResourceRequest,\n)\nfrom localstack.utils.strings import short_uid\n\n\nclass SNSTopicProperties(TypedDict):\n ContentBasedDeduplication: Optional[bool]\n DataProtectionPolicy: Optional[dict]\n DisplayName: Optional[str]\n FifoTopic: Optional[bool]\n KmsMasterKeyId: Optional[str]\n SignatureVersion: Optional[str]\n Subscription: Optional[list[Subscription]]\n Tags: Optional[list[Tag]]\n TopicArn: Optional[str]\n TopicName: Optional[str]\n TracingConfig: Optional[str]\n\n\nclass Subscription(TypedDict):\n Endpoint: Optional[str]\n Protocol: Optional[str]\n\n\nclass Tag(TypedDict):\n Key: Optional[str]\n Value: Optional[str]\n\n\nREPEATED_INVOCATION = \"repeated_invocation\"\n\n\nclass SNSTopicProvider(ResourceProvider[SNSTopicProperties]):\n TYPE = \"AWS::SNS::Topic\" # Autogenerated. Don't change\n SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change\n\n def create(\n self,\n request: ResourceRequest[SNSTopicProperties],\n ) -> ProgressEvent[SNSTopicProperties]:\n \"\"\"\n Create a new resource.\n\n Primary identifier fields:\n - /properties/TopicArn\n\n\n\n Create-only properties:\n - /properties/TopicName\n - /properties/FifoTopic\n\n Read-only properties:\n - /properties/TopicArn\n\n IAM permissions required:\n - sns:CreateTopic\n - sns:TagResource\n - sns:Subscribe\n - sns:GetTopicAttributes\n - sns:PutDataProtectionPolicy\n\n \"\"\"\n model = request.desired_state\n sns = request.aws_client_factory.sns\n # TODO: validations and iam checks\n\n attributes = {k: v for k, v in model.items() if v is not None if k != \"TopicName\"}\n\n # following attributes need to be str instead of bool for boto to work\n if attributes.get(\"FifoTopic\") is not None:\n attributes[\"FifoTopic\"] = str(attributes.get(\"FifoTopic\"))\n\n if attributes.get(\"ContentBasedDeduplication\") is not None:\n attributes[\"ContentBasedDeduplication\"] = str(\n attributes.get(\"ContentBasedDeduplication\")\n )\n\n subscriptions = []\n if attributes.get(\"Subscription\") is not None:\n subscriptions = attributes[\"Subscription\"]\n del attributes[\"Subscription\"]\n\n tags = []\n if attributes.get(\"Tags\") is not None:\n tags = attributes[\"Tags\"]\n del attributes[\"Tags\"]\n\n # in case cloudformation didn't provide topic name\n if model.get(\"TopicName\") is None:\n model[\"TopicName\"] = f\"topic-{short_uid()}\"\n\n create_sns_response = sns.create_topic(Name=model[\"TopicName\"], Attributes=attributes)\n request.custom_context[REPEATED_INVOCATION] = True\n model[\"TopicArn\"] = create_sns_response[\"TopicArn\"]\n\n # now we add subscriptions if they exists\n for subscription in subscriptions:\n sns.subscribe(\n TopicArn=model[\"TopicArn\"],\n Protocol=subscription[\"Protocol\"],\n Endpoint=subscription[\"Endpoint\"],\n )\n if tags:\n sns.tag_resource(ResourceArn=model[\"TopicArn\"], Tags=tags)\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n def read(\n self,\n request: ResourceRequest[SNSTopicProperties],\n ) -> ProgressEvent[SNSTopicProperties]:\n \"\"\"\n Fetch resource information\n\n IAM permissions required:\n - sns:GetTopicAttributes\n - sns:ListTagsForResource\n - sns:ListSubscriptionsByTopic\n - sns:GetDataProtectionPolicy\n \"\"\"\n raise NotImplementedError\n\n def delete(\n self,\n request: ResourceRequest[SNSTopicProperties],\n ) -> ProgressEvent[SNSTopicProperties]:\n \"\"\"\n Delete a resource\n\n IAM permissions required:\n - sns:DeleteTopic\n \"\"\"\n model = request.desired_state\n sns = request.aws_client_factory.sns\n sns.delete_topic(TopicArn=model[\"TopicArn\"])\n return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={})\n\n def update(\n self,\n request: ResourceRequest[SNSTopicProperties],\n ) -> ProgressEvent[SNSTopicProperties]:\n \"\"\"\n Update a resource\n\n IAM permissions required:\n - sns:SetTopicAttributes\n - sns:TagResource\n - sns:UntagResource\n - sns:Subscribe\n - sns:Unsubscribe\n - sns:GetTopicAttributes\n - sns:ListTagsForResource\n - sns:ListSubscriptionsByTopic\n - sns:GetDataProtectionPolicy\n - sns:PutDataProtectionPolicy\n \"\"\"\n raise NotImplementedError\n", "path": "localstack/services/sns/resource_providers/aws_sns_topic.py"}], "after_files": [{"content": "# LocalStack Resource Provider Scaffolding v2\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Optional, TypedDict\n\nimport localstack.services.cloudformation.provider_utils as util\nfrom localstack.services.cloudformation.resource_provider import (\n OperationStatus,\n ProgressEvent,\n ResourceProvider,\n ResourceRequest,\n)\nfrom localstack.utils.strings import canonicalize_bool_to_str, short_uid\n\n\nclass SNSTopicProperties(TypedDict):\n ContentBasedDeduplication: Optional[bool]\n DataProtectionPolicy: Optional[dict]\n DisplayName: Optional[str]\n FifoTopic: Optional[bool]\n KmsMasterKeyId: Optional[str]\n SignatureVersion: Optional[str]\n Subscription: Optional[list[Subscription]]\n Tags: Optional[list[Tag]]\n TopicArn: Optional[str]\n TopicName: Optional[str]\n TracingConfig: Optional[str]\n\n\nclass Subscription(TypedDict):\n Endpoint: Optional[str]\n Protocol: Optional[str]\n\n\nclass Tag(TypedDict):\n Key: Optional[str]\n Value: Optional[str]\n\n\nREPEATED_INVOCATION = \"repeated_invocation\"\n\n\nclass SNSTopicProvider(ResourceProvider[SNSTopicProperties]):\n TYPE = \"AWS::SNS::Topic\" # Autogenerated. Don't change\n SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change\n\n def create(\n self,\n request: ResourceRequest[SNSTopicProperties],\n ) -> ProgressEvent[SNSTopicProperties]:\n \"\"\"\n Create a new resource.\n\n Primary identifier fields:\n - /properties/TopicArn\n\n\n\n Create-only properties:\n - /properties/TopicName\n - /properties/FifoTopic\n\n Read-only properties:\n - /properties/TopicArn\n\n IAM permissions required:\n - sns:CreateTopic\n - sns:TagResource\n - sns:Subscribe\n - sns:GetTopicAttributes\n - sns:PutDataProtectionPolicy\n\n \"\"\"\n model = request.desired_state\n sns = request.aws_client_factory.sns\n # TODO: validations and iam checks\n\n attributes = {k: v for k, v in model.items() if v is not None if k != \"TopicName\"}\n\n # following attributes need to be str instead of bool for boto to work\n if (fifo_topic := attributes.get(\"FifoTopic\")) is not None:\n attributes[\"FifoTopic\"] = canonicalize_bool_to_str(fifo_topic)\n\n if (content_based_dedup := attributes.get(\"ContentBasedDeduplication\")) is not None:\n attributes[\"ContentBasedDeduplication\"] = canonicalize_bool_to_str(content_based_dedup)\n\n subscriptions = []\n if attributes.get(\"Subscription\") is not None:\n subscriptions = attributes[\"Subscription\"]\n del attributes[\"Subscription\"]\n\n tags = []\n if attributes.get(\"Tags\") is not None:\n tags = attributes[\"Tags\"]\n del attributes[\"Tags\"]\n\n # in case cloudformation didn't provide topic name\n if model.get(\"TopicName\") is None:\n name = f\"topic-{short_uid()}\" if not fifo_topic else f\"topic-{short_uid()}.fifo\"\n model[\"TopicName\"] = name\n\n create_sns_response = sns.create_topic(Name=model[\"TopicName\"], Attributes=attributes)\n request.custom_context[REPEATED_INVOCATION] = True\n model[\"TopicArn\"] = create_sns_response[\"TopicArn\"]\n\n # now we add subscriptions if they exists\n for subscription in subscriptions:\n sns.subscribe(\n TopicArn=model[\"TopicArn\"],\n Protocol=subscription[\"Protocol\"],\n Endpoint=subscription[\"Endpoint\"],\n )\n if tags:\n sns.tag_resource(ResourceArn=model[\"TopicArn\"], Tags=tags)\n\n return ProgressEvent(\n status=OperationStatus.SUCCESS,\n resource_model=model,\n custom_context=request.custom_context,\n )\n\n def read(\n self,\n request: ResourceRequest[SNSTopicProperties],\n ) -> ProgressEvent[SNSTopicProperties]:\n \"\"\"\n Fetch resource information\n\n IAM permissions required:\n - sns:GetTopicAttributes\n - sns:ListTagsForResource\n - sns:ListSubscriptionsByTopic\n - sns:GetDataProtectionPolicy\n \"\"\"\n raise NotImplementedError\n\n def delete(\n self,\n request: ResourceRequest[SNSTopicProperties],\n ) -> ProgressEvent[SNSTopicProperties]:\n \"\"\"\n Delete a resource\n\n IAM permissions required:\n - sns:DeleteTopic\n \"\"\"\n model = request.desired_state\n sns = request.aws_client_factory.sns\n sns.delete_topic(TopicArn=model[\"TopicArn\"])\n return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={})\n\n def update(\n self,\n request: ResourceRequest[SNSTopicProperties],\n ) -> ProgressEvent[SNSTopicProperties]:\n \"\"\"\n Update a resource\n\n IAM permissions required:\n - sns:SetTopicAttributes\n - sns:TagResource\n - sns:UntagResource\n - sns:Subscribe\n - sns:Unsubscribe\n - sns:GetTopicAttributes\n - sns:ListTagsForResource\n - sns:ListSubscriptionsByTopic\n - sns:GetDataProtectionPolicy\n - sns:PutDataProtectionPolicy\n \"\"\"\n raise NotImplementedError\n", "path": "localstack/services/sns/resource_providers/aws_sns_topic.py"}]} | 2,337 | 478 |
gh_patches_debug_7835 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-3034 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Audio] Lavalink isn't properly restarted after unexpected shutdown
# Other bugs
When you kill java process that runs Lavalink it won't get restarted by ServerManager even though it says it's restarting it.
Bad logic is here: <https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/audio/manager.py#L59>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/cogs/audio/manager.py`
Content:
```
1 import itertools
2 import pathlib
3 import platform
4 import shutil
5 import asyncio
6 import asyncio.subprocess # disables for # https://github.com/PyCQA/pylint/issues/1469
7 import logging
8 import re
9 import sys
10 import tempfile
11 from typing import Optional, Tuple, ClassVar, List
12
13 import aiohttp
14 from tqdm import tqdm
15
16 from redbot.core import data_manager
17 from .errors import LavalinkDownloadFailed
18
19 JAR_VERSION = "3.2.1"
20 JAR_BUILD = 823
21 LAVALINK_DOWNLOAD_URL = (
22 f"https://github.com/Cog-Creators/Lavalink-Jars/releases/download/{JAR_VERSION}_{JAR_BUILD}/"
23 f"Lavalink.jar"
24 )
25 LAVALINK_DOWNLOAD_DIR = data_manager.cog_data_path(raw_name="Audio")
26 LAVALINK_JAR_FILE = LAVALINK_DOWNLOAD_DIR / "Lavalink.jar"
27
28 BUNDLED_APP_YML = pathlib.Path(__file__).parent / "data" / "application.yml"
29 LAVALINK_APP_YML = LAVALINK_DOWNLOAD_DIR / "application.yml"
30
31 READY_LINE_RE = re.compile(rb"Started Launcher in \S+ seconds")
32 BUILD_LINE_RE = re.compile(rb"Build:\s+(?P<build>\d+)")
33
34 log = logging.getLogger("red.audio.manager")
35
36
37 class ServerManager:
38
39 _java_available: ClassVar[Optional[bool]] = None
40 _java_version: ClassVar[Optional[Tuple[int, int]]] = None
41 _up_to_date: ClassVar[Optional[bool]] = None
42
43 _blacklisted_archs = []
44
45 def __init__(self) -> None:
46 self.ready = asyncio.Event()
47
48 self._proc: Optional[asyncio.subprocess.Process] = None # pylint:disable=no-member
49 self._monitor_task: Optional[asyncio.Task] = None
50 self._shutdown: bool = False
51
52 async def start(self) -> None:
53 arch_name = platform.machine()
54 if arch_name in self._blacklisted_archs:
55 raise asyncio.CancelledError(
56 "You are attempting to run Lavalink audio on an unsupported machine architecture."
57 )
58
59 if self._proc is not None:
60 if self._proc.returncode is None:
61 raise RuntimeError("Internal Lavalink server is already running")
62 else:
63 raise RuntimeError("Server manager has already been used - create another one")
64
65 await self.maybe_download_jar()
66
67 # Copy the application.yml across.
68 # For people to customise their Lavalink server configuration they need to run it
69 # externally
70 shutil.copyfile(BUNDLED_APP_YML, LAVALINK_APP_YML)
71
72 args = await self._get_jar_args()
73 self._proc = await asyncio.subprocess.create_subprocess_exec( # pylint:disable=no-member
74 *args,
75 cwd=str(LAVALINK_DOWNLOAD_DIR),
76 stdout=asyncio.subprocess.PIPE,
77 stderr=asyncio.subprocess.STDOUT,
78 )
79
80 log.info("Internal Lavalink server started. PID: %s", self._proc.pid)
81
82 try:
83 await asyncio.wait_for(self._wait_for_launcher(), timeout=120)
84 except asyncio.TimeoutError:
85 log.warning("Timeout occurred whilst waiting for internal Lavalink server to be ready")
86
87 self._monitor_task = asyncio.create_task(self._monitor())
88
89 @classmethod
90 async def _get_jar_args(cls) -> List[str]:
91 java_available, java_version = await cls._has_java()
92 if not java_available:
93 raise RuntimeError("You must install Java 1.8+ for Lavalink to run.")
94
95 if java_version == (1, 8):
96 extra_flags = ["-Dsun.zip.disableMemoryMapping=true"]
97 elif java_version >= (11, 0):
98 extra_flags = ["-Djdk.tls.client.protocols=TLSv1.2"]
99 else:
100 extra_flags = []
101
102 return ["java", *extra_flags, "-jar", str(LAVALINK_JAR_FILE)]
103
104 @classmethod
105 async def _has_java(cls) -> Tuple[bool, Optional[Tuple[int, int]]]:
106 if cls._java_available is not None:
107 # Return cached value if we've checked this before
108 return cls._java_available, cls._java_version
109 java_available = shutil.which("java") is not None
110 if not java_available:
111 cls.java_available = False
112 cls.java_version = None
113 else:
114 cls._java_version = version = await cls._get_java_version()
115 cls._java_available = (2, 0) > version >= (1, 8) or version >= (8, 0)
116 return cls._java_available, cls._java_version
117
118 @staticmethod
119 async def _get_java_version() -> Tuple[int, int]:
120 """
121 This assumes we've already checked that java exists.
122 """
123 _proc: asyncio.subprocess.Process = await asyncio.create_subprocess_exec( # pylint:disable=no-member
124 "java", "-version", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
125 )
126 # java -version outputs to stderr
127 _, err = await _proc.communicate()
128
129 version_info: str = err.decode("utf-8")
130 # We expect the output to look something like:
131 # $ java -version
132 # ...
133 # ... version "MAJOR.MINOR.PATCH[_BUILD]" ...
134 # ...
135 # We only care about the major and minor parts though.
136 version_line_re = re.compile(
137 r'version "(?P<major>\d+).(?P<minor>\d+).\d+(?:_\d+)?(?:-[A-Za-z0-9]+)?"'
138 )
139 short_version_re = re.compile(r'version "(?P<major>\d+)"')
140
141 lines = version_info.splitlines()
142 for line in lines:
143 match = version_line_re.search(line)
144 short_match = short_version_re.search(line)
145 if match:
146 return int(match["major"]), int(match["minor"])
147 elif short_match:
148 return int(short_match["major"]), 0
149
150 raise RuntimeError(
151 "The output of `java -version` was unexpected. Please report this issue on Red's "
152 "issue tracker."
153 )
154
155 async def _wait_for_launcher(self) -> None:
156 log.debug("Waiting for Lavalink server to be ready")
157 for i in itertools.cycle(range(50)):
158 line = await self._proc.stdout.readline()
159 if READY_LINE_RE.search(line):
160 self.ready.set()
161 break
162 if self._proc.returncode is not None:
163 log.critical("Internal lavalink server exited early")
164 if i == 49:
165 # Sleep after 50 lines to prevent busylooping
166 await asyncio.sleep(0.1)
167
168 async def _monitor(self) -> None:
169 while self._proc.returncode is None:
170 await asyncio.sleep(0.5)
171
172 # This task hasn't been cancelled - Lavalink was shut down by something else
173 log.info("Internal Lavalink jar shutdown unexpectedly")
174 if not self._has_java_error():
175 log.info("Restarting internal Lavalink server")
176 await self.start()
177 else:
178 log.critical(
179 "Your Java is borked. Please find the hs_err_pid%d.log file"
180 " in the Audio data folder and report this issue.",
181 self._proc.pid,
182 )
183
184 def _has_java_error(self) -> bool:
185 poss_error_file = LAVALINK_DOWNLOAD_DIR / "hs_err_pid{}.log".format(self._proc.pid)
186 return poss_error_file.exists()
187
188 async def shutdown(self) -> None:
189 if self._shutdown is True or self._proc is None:
190 # For convenience, calling this method more than once or calling it before starting it
191 # does nothing.
192 return
193 log.info("Shutting down internal Lavalink server")
194 if self._monitor_task is not None:
195 self._monitor_task.cancel()
196 self._proc.terminate()
197 await self._proc.wait()
198 self._shutdown = True
199
200 @staticmethod
201 async def _download_jar() -> None:
202 log.info("Downloading Lavalink.jar...")
203 async with aiohttp.ClientSession() as session:
204 async with session.get(LAVALINK_DOWNLOAD_URL) as response:
205 if response.status == 404:
206 # A 404 means our LAVALINK_DOWNLOAD_URL is invalid, so likely the jar version
207 # hasn't been published yet
208 raise LavalinkDownloadFailed(
209 f"Lavalink jar version {JAR_VERSION}_{JAR_BUILD} hasn't been published "
210 f"yet",
211 response=response,
212 should_retry=False,
213 )
214 elif 400 <= response.status < 600:
215 # Other bad responses should be raised but we should retry just incase
216 raise LavalinkDownloadFailed(response=response, should_retry=True)
217 fd, path = tempfile.mkstemp()
218 file = open(fd, "wb")
219 nbytes = 0
220 with tqdm(
221 desc="Lavalink.jar",
222 total=response.content_length,
223 file=sys.stdout,
224 unit="B",
225 unit_scale=True,
226 miniters=1,
227 dynamic_ncols=True,
228 leave=False,
229 ) as progress_bar:
230 try:
231 chunk = await response.content.read(1024)
232 while chunk:
233 chunk_size = file.write(chunk)
234 nbytes += chunk_size
235 progress_bar.update(chunk_size)
236 chunk = await response.content.read(1024)
237 file.flush()
238 finally:
239 file.close()
240
241 shutil.move(path, str(LAVALINK_JAR_FILE), copy_function=shutil.copyfile)
242
243 log.info("Successfully downloaded Lavalink.jar (%s bytes written)", format(nbytes, ","))
244
245 @classmethod
246 async def _is_up_to_date(cls):
247 if cls._up_to_date is True:
248 # Return cached value if we've checked this before
249 return True
250 args = await cls._get_jar_args()
251 args.append("--version")
252 _proc = await asyncio.subprocess.create_subprocess_exec( # pylint:disable=no-member
253 *args,
254 cwd=str(LAVALINK_DOWNLOAD_DIR),
255 stdout=asyncio.subprocess.PIPE,
256 stderr=asyncio.subprocess.STDOUT,
257 )
258 stdout = (await _proc.communicate())[0]
259 match = BUILD_LINE_RE.search(stdout)
260 if not match:
261 # Output is unexpected, suspect corrupted jarfile
262 return False
263 build = int(match["build"])
264 cls._up_to_date = build >= JAR_BUILD
265 return cls._up_to_date
266
267 @classmethod
268 async def maybe_download_jar(cls):
269 if not (LAVALINK_JAR_FILE.exists() and await cls._is_up_to_date()):
270 await cls._download_jar()
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redbot/cogs/audio/manager.py b/redbot/cogs/audio/manager.py
--- a/redbot/cogs/audio/manager.py
+++ b/redbot/cogs/audio/manager.py
@@ -59,7 +59,7 @@
if self._proc is not None:
if self._proc.returncode is None:
raise RuntimeError("Internal Lavalink server is already running")
- else:
+ elif self._shutdown:
raise RuntimeError("Server manager has already been used - create another one")
await self.maybe_download_jar()
| {"golden_diff": "diff --git a/redbot/cogs/audio/manager.py b/redbot/cogs/audio/manager.py\n--- a/redbot/cogs/audio/manager.py\n+++ b/redbot/cogs/audio/manager.py\n@@ -59,7 +59,7 @@\n if self._proc is not None:\n if self._proc.returncode is None:\n raise RuntimeError(\"Internal Lavalink server is already running\")\n- else:\n+ elif self._shutdown:\n raise RuntimeError(\"Server manager has already been used - create another one\")\n \n await self.maybe_download_jar()\n", "issue": "[Audio] Lavalink isn't properly restarted after unexpected shutdown\n# Other bugs\r\n\r\nWhen you kill java process that runs Lavalink it won't get restarted by ServerManager even though it says it's restarting it.\r\n\r\nBad logic is here: <https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/audio/manager.py#L59>\n", "before_files": [{"content": "import itertools\nimport pathlib\nimport platform\nimport shutil\nimport asyncio\nimport asyncio.subprocess # disables for # https://github.com/PyCQA/pylint/issues/1469\nimport logging\nimport re\nimport sys\nimport tempfile\nfrom typing import Optional, Tuple, ClassVar, List\n\nimport aiohttp\nfrom tqdm import tqdm\n\nfrom redbot.core import data_manager\nfrom .errors import LavalinkDownloadFailed\n\nJAR_VERSION = \"3.2.1\"\nJAR_BUILD = 823\nLAVALINK_DOWNLOAD_URL = (\n f\"https://github.com/Cog-Creators/Lavalink-Jars/releases/download/{JAR_VERSION}_{JAR_BUILD}/\"\n f\"Lavalink.jar\"\n)\nLAVALINK_DOWNLOAD_DIR = data_manager.cog_data_path(raw_name=\"Audio\")\nLAVALINK_JAR_FILE = LAVALINK_DOWNLOAD_DIR / \"Lavalink.jar\"\n\nBUNDLED_APP_YML = pathlib.Path(__file__).parent / \"data\" / \"application.yml\"\nLAVALINK_APP_YML = LAVALINK_DOWNLOAD_DIR / \"application.yml\"\n\nREADY_LINE_RE = re.compile(rb\"Started Launcher in \\S+ seconds\")\nBUILD_LINE_RE = re.compile(rb\"Build:\\s+(?P<build>\\d+)\")\n\nlog = logging.getLogger(\"red.audio.manager\")\n\n\nclass ServerManager:\n\n _java_available: ClassVar[Optional[bool]] = None\n _java_version: ClassVar[Optional[Tuple[int, int]]] = None\n _up_to_date: ClassVar[Optional[bool]] = None\n\n _blacklisted_archs = []\n\n def __init__(self) -> None:\n self.ready = asyncio.Event()\n\n self._proc: Optional[asyncio.subprocess.Process] = None # pylint:disable=no-member\n self._monitor_task: Optional[asyncio.Task] = None\n self._shutdown: bool = False\n\n async def start(self) -> None:\n arch_name = platform.machine()\n if arch_name in self._blacklisted_archs:\n raise asyncio.CancelledError(\n \"You are attempting to run Lavalink audio on an unsupported machine architecture.\"\n )\n\n if self._proc is not None:\n if self._proc.returncode is None:\n raise RuntimeError(\"Internal Lavalink server is already running\")\n else:\n raise RuntimeError(\"Server manager has already been used - create another one\")\n\n await self.maybe_download_jar()\n\n # Copy the application.yml across.\n # For people to customise their Lavalink server configuration they need to run it\n # externally\n shutil.copyfile(BUNDLED_APP_YML, LAVALINK_APP_YML)\n\n args = await self._get_jar_args()\n self._proc = await asyncio.subprocess.create_subprocess_exec( # pylint:disable=no-member\n *args,\n cwd=str(LAVALINK_DOWNLOAD_DIR),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT,\n )\n\n log.info(\"Internal Lavalink server started. PID: %s\", self._proc.pid)\n\n try:\n await asyncio.wait_for(self._wait_for_launcher(), timeout=120)\n except asyncio.TimeoutError:\n log.warning(\"Timeout occurred whilst waiting for internal Lavalink server to be ready\")\n\n self._monitor_task = asyncio.create_task(self._monitor())\n\n @classmethod\n async def _get_jar_args(cls) -> List[str]:\n java_available, java_version = await cls._has_java()\n if not java_available:\n raise RuntimeError(\"You must install Java 1.8+ for Lavalink to run.\")\n\n if java_version == (1, 8):\n extra_flags = [\"-Dsun.zip.disableMemoryMapping=true\"]\n elif java_version >= (11, 0):\n extra_flags = [\"-Djdk.tls.client.protocols=TLSv1.2\"]\n else:\n extra_flags = []\n\n return [\"java\", *extra_flags, \"-jar\", str(LAVALINK_JAR_FILE)]\n\n @classmethod\n async def _has_java(cls) -> Tuple[bool, Optional[Tuple[int, int]]]:\n if cls._java_available is not None:\n # Return cached value if we've checked this before\n return cls._java_available, cls._java_version\n java_available = shutil.which(\"java\") is not None\n if not java_available:\n cls.java_available = False\n cls.java_version = None\n else:\n cls._java_version = version = await cls._get_java_version()\n cls._java_available = (2, 0) > version >= (1, 8) or version >= (8, 0)\n return cls._java_available, cls._java_version\n\n @staticmethod\n async def _get_java_version() -> Tuple[int, int]:\n \"\"\"\n This assumes we've already checked that java exists.\n \"\"\"\n _proc: asyncio.subprocess.Process = await asyncio.create_subprocess_exec( # pylint:disable=no-member\n \"java\", \"-version\", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n # java -version outputs to stderr\n _, err = await _proc.communicate()\n\n version_info: str = err.decode(\"utf-8\")\n # We expect the output to look something like:\n # $ java -version\n # ...\n # ... version \"MAJOR.MINOR.PATCH[_BUILD]\" ...\n # ...\n # We only care about the major and minor parts though.\n version_line_re = re.compile(\n r'version \"(?P<major>\\d+).(?P<minor>\\d+).\\d+(?:_\\d+)?(?:-[A-Za-z0-9]+)?\"'\n )\n short_version_re = re.compile(r'version \"(?P<major>\\d+)\"')\n\n lines = version_info.splitlines()\n for line in lines:\n match = version_line_re.search(line)\n short_match = short_version_re.search(line)\n if match:\n return int(match[\"major\"]), int(match[\"minor\"])\n elif short_match:\n return int(short_match[\"major\"]), 0\n\n raise RuntimeError(\n \"The output of `java -version` was unexpected. Please report this issue on Red's \"\n \"issue tracker.\"\n )\n\n async def _wait_for_launcher(self) -> None:\n log.debug(\"Waiting for Lavalink server to be ready\")\n for i in itertools.cycle(range(50)):\n line = await self._proc.stdout.readline()\n if READY_LINE_RE.search(line):\n self.ready.set()\n break\n if self._proc.returncode is not None:\n log.critical(\"Internal lavalink server exited early\")\n if i == 49:\n # Sleep after 50 lines to prevent busylooping\n await asyncio.sleep(0.1)\n\n async def _monitor(self) -> None:\n while self._proc.returncode is None:\n await asyncio.sleep(0.5)\n\n # This task hasn't been cancelled - Lavalink was shut down by something else\n log.info(\"Internal Lavalink jar shutdown unexpectedly\")\n if not self._has_java_error():\n log.info(\"Restarting internal Lavalink server\")\n await self.start()\n else:\n log.critical(\n \"Your Java is borked. Please find the hs_err_pid%d.log file\"\n \" in the Audio data folder and report this issue.\",\n self._proc.pid,\n )\n\n def _has_java_error(self) -> bool:\n poss_error_file = LAVALINK_DOWNLOAD_DIR / \"hs_err_pid{}.log\".format(self._proc.pid)\n return poss_error_file.exists()\n\n async def shutdown(self) -> None:\n if self._shutdown is True or self._proc is None:\n # For convenience, calling this method more than once or calling it before starting it\n # does nothing.\n return\n log.info(\"Shutting down internal Lavalink server\")\n if self._monitor_task is not None:\n self._monitor_task.cancel()\n self._proc.terminate()\n await self._proc.wait()\n self._shutdown = True\n\n @staticmethod\n async def _download_jar() -> None:\n log.info(\"Downloading Lavalink.jar...\")\n async with aiohttp.ClientSession() as session:\n async with session.get(LAVALINK_DOWNLOAD_URL) as response:\n if response.status == 404:\n # A 404 means our LAVALINK_DOWNLOAD_URL is invalid, so likely the jar version\n # hasn't been published yet\n raise LavalinkDownloadFailed(\n f\"Lavalink jar version {JAR_VERSION}_{JAR_BUILD} hasn't been published \"\n f\"yet\",\n response=response,\n should_retry=False,\n )\n elif 400 <= response.status < 600:\n # Other bad responses should be raised but we should retry just incase\n raise LavalinkDownloadFailed(response=response, should_retry=True)\n fd, path = tempfile.mkstemp()\n file = open(fd, \"wb\")\n nbytes = 0\n with tqdm(\n desc=\"Lavalink.jar\",\n total=response.content_length,\n file=sys.stdout,\n unit=\"B\",\n unit_scale=True,\n miniters=1,\n dynamic_ncols=True,\n leave=False,\n ) as progress_bar:\n try:\n chunk = await response.content.read(1024)\n while chunk:\n chunk_size = file.write(chunk)\n nbytes += chunk_size\n progress_bar.update(chunk_size)\n chunk = await response.content.read(1024)\n file.flush()\n finally:\n file.close()\n\n shutil.move(path, str(LAVALINK_JAR_FILE), copy_function=shutil.copyfile)\n\n log.info(\"Successfully downloaded Lavalink.jar (%s bytes written)\", format(nbytes, \",\"))\n\n @classmethod\n async def _is_up_to_date(cls):\n if cls._up_to_date is True:\n # Return cached value if we've checked this before\n return True\n args = await cls._get_jar_args()\n args.append(\"--version\")\n _proc = await asyncio.subprocess.create_subprocess_exec( # pylint:disable=no-member\n *args,\n cwd=str(LAVALINK_DOWNLOAD_DIR),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT,\n )\n stdout = (await _proc.communicate())[0]\n match = BUILD_LINE_RE.search(stdout)\n if not match:\n # Output is unexpected, suspect corrupted jarfile\n return False\n build = int(match[\"build\"])\n cls._up_to_date = build >= JAR_BUILD\n return cls._up_to_date\n\n @classmethod\n async def maybe_download_jar(cls):\n if not (LAVALINK_JAR_FILE.exists() and await cls._is_up_to_date()):\n await cls._download_jar()\n", "path": "redbot/cogs/audio/manager.py"}], "after_files": [{"content": "import itertools\nimport pathlib\nimport platform\nimport shutil\nimport asyncio\nimport asyncio.subprocess # disables for # https://github.com/PyCQA/pylint/issues/1469\nimport logging\nimport re\nimport sys\nimport tempfile\nfrom typing import Optional, Tuple, ClassVar, List\n\nimport aiohttp\nfrom tqdm import tqdm\n\nfrom redbot.core import data_manager\nfrom .errors import LavalinkDownloadFailed\n\nJAR_VERSION = \"3.2.1\"\nJAR_BUILD = 823\nLAVALINK_DOWNLOAD_URL = (\n f\"https://github.com/Cog-Creators/Lavalink-Jars/releases/download/{JAR_VERSION}_{JAR_BUILD}/\"\n f\"Lavalink.jar\"\n)\nLAVALINK_DOWNLOAD_DIR = data_manager.cog_data_path(raw_name=\"Audio\")\nLAVALINK_JAR_FILE = LAVALINK_DOWNLOAD_DIR / \"Lavalink.jar\"\n\nBUNDLED_APP_YML = pathlib.Path(__file__).parent / \"data\" / \"application.yml\"\nLAVALINK_APP_YML = LAVALINK_DOWNLOAD_DIR / \"application.yml\"\n\nREADY_LINE_RE = re.compile(rb\"Started Launcher in \\S+ seconds\")\nBUILD_LINE_RE = re.compile(rb\"Build:\\s+(?P<build>\\d+)\")\n\nlog = logging.getLogger(\"red.audio.manager\")\n\n\nclass ServerManager:\n\n _java_available: ClassVar[Optional[bool]] = None\n _java_version: ClassVar[Optional[Tuple[int, int]]] = None\n _up_to_date: ClassVar[Optional[bool]] = None\n\n _blacklisted_archs = []\n\n def __init__(self) -> None:\n self.ready = asyncio.Event()\n\n self._proc: Optional[asyncio.subprocess.Process] = None # pylint:disable=no-member\n self._monitor_task: Optional[asyncio.Task] = None\n self._shutdown: bool = False\n\n async def start(self) -> None:\n arch_name = platform.machine()\n if arch_name in self._blacklisted_archs:\n raise asyncio.CancelledError(\n \"You are attempting to run Lavalink audio on an unsupported machine architecture.\"\n )\n\n if self._proc is not None:\n if self._proc.returncode is None:\n raise RuntimeError(\"Internal Lavalink server is already running\")\n elif self._shutdown:\n raise RuntimeError(\"Server manager has already been used - create another one\")\n\n await self.maybe_download_jar()\n\n # Copy the application.yml across.\n # For people to customise their Lavalink server configuration they need to run it\n # externally\n shutil.copyfile(BUNDLED_APP_YML, LAVALINK_APP_YML)\n\n args = await self._get_jar_args()\n self._proc = await asyncio.subprocess.create_subprocess_exec( # pylint:disable=no-member\n *args,\n cwd=str(LAVALINK_DOWNLOAD_DIR),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT,\n )\n\n log.info(\"Internal Lavalink server started. PID: %s\", self._proc.pid)\n\n try:\n await asyncio.wait_for(self._wait_for_launcher(), timeout=120)\n except asyncio.TimeoutError:\n log.warning(\"Timeout occurred whilst waiting for internal Lavalink server to be ready\")\n\n self._monitor_task = asyncio.create_task(self._monitor())\n\n @classmethod\n async def _get_jar_args(cls) -> List[str]:\n java_available, java_version = await cls._has_java()\n if not java_available:\n raise RuntimeError(\"You must install Java 1.8+ for Lavalink to run.\")\n\n if java_version == (1, 8):\n extra_flags = [\"-Dsun.zip.disableMemoryMapping=true\"]\n elif java_version >= (11, 0):\n extra_flags = [\"-Djdk.tls.client.protocols=TLSv1.2\"]\n else:\n extra_flags = []\n\n return [\"java\", *extra_flags, \"-jar\", str(LAVALINK_JAR_FILE)]\n\n @classmethod\n async def _has_java(cls) -> Tuple[bool, Optional[Tuple[int, int]]]:\n if cls._java_available is not None:\n # Return cached value if we've checked this before\n return cls._java_available, cls._java_version\n java_available = shutil.which(\"java\") is not None\n if not java_available:\n cls.java_available = False\n cls.java_version = None\n else:\n cls._java_version = version = await cls._get_java_version()\n cls._java_available = (2, 0) > version >= (1, 8) or version >= (8, 0)\n return cls._java_available, cls._java_version\n\n @staticmethod\n async def _get_java_version() -> Tuple[int, int]:\n \"\"\"\n This assumes we've already checked that java exists.\n \"\"\"\n _proc: asyncio.subprocess.Process = await asyncio.create_subprocess_exec( # pylint:disable=no-member\n \"java\", \"-version\", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE\n )\n # java -version outputs to stderr\n _, err = await _proc.communicate()\n\n version_info: str = err.decode(\"utf-8\")\n # We expect the output to look something like:\n # $ java -version\n # ...\n # ... version \"MAJOR.MINOR.PATCH[_BUILD]\" ...\n # ...\n # We only care about the major and minor parts though.\n version_line_re = re.compile(\n r'version \"(?P<major>\\d+).(?P<minor>\\d+).\\d+(?:_\\d+)?(?:-[A-Za-z0-9]+)?\"'\n )\n short_version_re = re.compile(r'version \"(?P<major>\\d+)\"')\n\n lines = version_info.splitlines()\n for line in lines:\n match = version_line_re.search(line)\n short_match = short_version_re.search(line)\n if match:\n return int(match[\"major\"]), int(match[\"minor\"])\n elif short_match:\n return int(short_match[\"major\"]), 0\n\n raise RuntimeError(\n \"The output of `java -version` was unexpected. Please report this issue on Red's \"\n \"issue tracker.\"\n )\n\n async def _wait_for_launcher(self) -> None:\n log.debug(\"Waiting for Lavalink server to be ready\")\n for i in itertools.cycle(range(50)):\n line = await self._proc.stdout.readline()\n if READY_LINE_RE.search(line):\n self.ready.set()\n break\n if self._proc.returncode is not None:\n log.critical(\"Internal lavalink server exited early\")\n if i == 49:\n # Sleep after 50 lines to prevent busylooping\n await asyncio.sleep(0.1)\n\n async def _monitor(self) -> None:\n while self._proc.returncode is None:\n await asyncio.sleep(0.5)\n\n # This task hasn't been cancelled - Lavalink was shut down by something else\n log.info(\"Internal Lavalink jar shutdown unexpectedly\")\n if not self._has_java_error():\n log.info(\"Restarting internal Lavalink server\")\n await self.start()\n else:\n log.critical(\n \"Your Java is borked. Please find the hs_err_pid%d.log file\"\n \" in the Audio data folder and report this issue.\",\n self._proc.pid,\n )\n\n def _has_java_error(self) -> bool:\n poss_error_file = LAVALINK_DOWNLOAD_DIR / \"hs_err_pid{}.log\".format(self._proc.pid)\n return poss_error_file.exists()\n\n async def shutdown(self) -> None:\n if self._shutdown is True or self._proc is None:\n # For convenience, calling this method more than once or calling it before starting it\n # does nothing.\n return\n log.info(\"Shutting down internal Lavalink server\")\n if self._monitor_task is not None:\n self._monitor_task.cancel()\n self._proc.terminate()\n await self._proc.wait()\n self._shutdown = True\n\n @staticmethod\n async def _download_jar() -> None:\n log.info(\"Downloading Lavalink.jar...\")\n async with aiohttp.ClientSession() as session:\n async with session.get(LAVALINK_DOWNLOAD_URL) as response:\n if response.status == 404:\n # A 404 means our LAVALINK_DOWNLOAD_URL is invalid, so likely the jar version\n # hasn't been published yet\n raise LavalinkDownloadFailed(\n f\"Lavalink jar version {JAR_VERSION}_{JAR_BUILD} hasn't been published \"\n f\"yet\",\n response=response,\n should_retry=False,\n )\n elif 400 <= response.status < 600:\n # Other bad responses should be raised but we should retry just incase\n raise LavalinkDownloadFailed(response=response, should_retry=True)\n fd, path = tempfile.mkstemp()\n file = open(fd, \"wb\")\n nbytes = 0\n with tqdm(\n desc=\"Lavalink.jar\",\n total=response.content_length,\n file=sys.stdout,\n unit=\"B\",\n unit_scale=True,\n miniters=1,\n dynamic_ncols=True,\n leave=False,\n ) as progress_bar:\n try:\n chunk = await response.content.read(1024)\n while chunk:\n chunk_size = file.write(chunk)\n nbytes += chunk_size\n progress_bar.update(chunk_size)\n chunk = await response.content.read(1024)\n file.flush()\n finally:\n file.close()\n\n shutil.move(path, str(LAVALINK_JAR_FILE), copy_function=shutil.copyfile)\n\n log.info(\"Successfully downloaded Lavalink.jar (%s bytes written)\", format(nbytes, \",\"))\n\n @classmethod\n async def _is_up_to_date(cls):\n if cls._up_to_date is True:\n # Return cached value if we've checked this before\n return True\n args = await cls._get_jar_args()\n args.append(\"--version\")\n _proc = await asyncio.subprocess.create_subprocess_exec( # pylint:disable=no-member\n *args,\n cwd=str(LAVALINK_DOWNLOAD_DIR),\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.STDOUT,\n )\n stdout = (await _proc.communicate())[0]\n match = BUILD_LINE_RE.search(stdout)\n if not match:\n # Output is unexpected, suspect corrupted jarfile\n return False\n build = int(match[\"build\"])\n cls._up_to_date = build >= JAR_BUILD\n return cls._up_to_date\n\n @classmethod\n async def maybe_download_jar(cls):\n if not (LAVALINK_JAR_FILE.exists() and await cls._is_up_to_date()):\n await cls._download_jar()\n", "path": "redbot/cogs/audio/manager.py"}]} | 3,419 | 124 |
gh_patches_debug_26873 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3025 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix API handling of multi-column UNIQUE and PK columns
See https://github.com/centerofci/mathesar/issues/2231#issuecomment-1380309139 for some insight.
Fix API handling of multi-column UNIQUE and PK columns
See https://github.com/centerofci/mathesar/issues/2231#issuecomment-1380309139 for some insight.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/serializers/constraints.py`
Content:
```
1 from psycopg.errors import DuplicateTable, UniqueViolation
2 from rest_framework import serializers, status
3
4 from db.constraints import utils as constraint_utils
5 from db.identifiers import is_identifier_too_long
6 from db.constraints.base import ForeignKeyConstraint, UniqueConstraint
7
8 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
9 from mathesar.api.exceptions.validation_exceptions.exceptions import (
10 ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,
11 InvalidTableName
12 )
13 from mathesar.api.serializers.shared_serializers import (
14 MathesarPolymorphicErrorMixin,
15 ReadWritePolymorphicSerializerMappingMixin,
16 )
17 from mathesar.models.base import Column, Constraint, Table
18
19
20 class Table_Filtered_Column_queryset(serializers.PrimaryKeyRelatedField):
21 def get_queryset(self):
22 table_id = self.context.get('table_id', None)
23 queryset = super(Table_Filtered_Column_queryset, self).get_queryset()
24 if table_id is None or not queryset:
25 return None
26 return queryset.filter(table__id=table_id)
27
28
29 class BaseConstraintSerializer(serializers.ModelSerializer):
30 name = serializers.CharField(required=False)
31 type = serializers.CharField()
32 columns = Table_Filtered_Column_queryset(queryset=Column.current_objects, many=True)
33
34 class Meta:
35 model = Constraint
36 fields = ['id', 'name', 'type', 'columns']
37
38 def construct_constraint_obj(self, table, data):
39 columns_attnum = [column.attnum for column in data.get('columns')]
40 if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:
41 return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)
42 return None
43
44 def create(self, validated_data):
45 table = self.context['table']
46 constraint_obj = self.construct_constraint_obj(table, validated_data)
47 # Additional check is needed because we support read operations for primary key constraint,
48 # but we don't support write operations
49 if constraint_obj is None:
50 constraint_type = validated_data.get('type', None)
51 raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')
52 try:
53 constraint = table.add_constraint(constraint_obj)
54 except DuplicateTable as e:
55 raise database_api_exceptions.DuplicateTableAPIException(
56 e,
57 message='Relation with the same name already exists',
58 status_code=status.HTTP_400_BAD_REQUEST
59 )
60 except UniqueViolation as e:
61 raise database_api_exceptions.UniqueViolationAPIException(
62 e,
63 status_code=status.HTTP_400_BAD_REQUEST
64 )
65 return constraint
66
67 def validate_name(self, name):
68 if is_identifier_too_long(name):
69 raise database_api_exceptions.IdentifierTooLong(field='name')
70 return name
71
72
73 class ForeignKeyConstraintSerializer(BaseConstraintSerializer):
74 class Meta:
75 model = Constraint
76 fields = BaseConstraintSerializer.Meta.fields + [
77 'referent_columns',
78 'referent_table',
79 'onupdate',
80 'ondelete',
81 'deferrable',
82 'match'
83 ]
84
85 referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)
86 referent_table = serializers.SerializerMethodField()
87 onupdate = serializers.ChoiceField(
88 choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],
89 required=False,
90 allow_null=True
91 )
92 ondelete = serializers.ChoiceField(
93 choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],
94 required=False,
95 allow_null=True
96 )
97 deferrable = serializers.BooleanField(allow_null=True, required=False)
98 match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)
99
100 def get_referent_table(self, obj):
101 return obj.referent_columns[0].table.id
102
103 def construct_constraint_obj(self, table, data):
104 columns_attnum = [column.attnum for column in data.get('columns')]
105 referent_columns = data.get('referent_columns')
106 referent_columns_attnum = [column.attnum for column in referent_columns]
107 constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']
108 constraint_options = {
109 constraint_options_field: data[constraint_options_field]
110 for constraint_options_field in constraint_options_fields if constraint_options_field in data
111 }
112 return ForeignKeyConstraint(
113 data.get('name', None),
114 table.oid,
115 columns_attnum,
116 referent_columns[0].table.oid,
117 referent_columns_attnum,
118 constraint_options
119 )
120
121
122 class ConstraintSerializer(
123 ReadWritePolymorphicSerializerMappingMixin,
124 MathesarPolymorphicErrorMixin,
125 serializers.ModelSerializer
126 ):
127 class Meta:
128 model = Constraint
129 fields = '__all__'
130
131 serializers_mapping = {
132 'foreignkey': ForeignKeyConstraintSerializer,
133 'primary': BaseConstraintSerializer,
134 'unique': BaseConstraintSerializer,
135 }
136
137 def get_mapping_field(self, data):
138 if isinstance(data, Constraint):
139 constraint_type = data.type
140 else:
141 constraint_type = data.get('type', None)
142 assert constraint_type is not None
143 return constraint_type
144
145 def create(self, validated_data):
146 serializer = self.get_serializer_class(self.get_mapping_field(validated_data))
147 return serializer.create(validated_data)
148
149 def run_validation(self, data):
150 if referent_table := data.get('referent_table', None):
151 referent_table_name = Table.current_objects.get(id=referent_table).name
152 if any(
153 invalid_char in referent_table_name
154 for invalid_char in ('(', ')')
155 ):
156 raise InvalidTableName(
157 referent_table_name,
158 field='referent_table'
159 )
160 constraint_type = data.get('type', None)
161 if constraint_type not in self.serializers_mapping.keys():
162 raise UnsupportedConstraintAPIException(constraint_type=constraint_type)
163 columns = data.get('columns', None)
164 if columns == []:
165 raise ConstraintColumnEmptyAPIException(field='columns')
166 return super(ConstraintSerializer, self).run_validation(data)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/serializers/constraints.py b/mathesar/api/serializers/constraints.py
--- a/mathesar/api/serializers/constraints.py
+++ b/mathesar/api/serializers/constraints.py
@@ -17,10 +17,17 @@
from mathesar.models.base import Column, Constraint, Table
-class Table_Filtered_Column_queryset(serializers.PrimaryKeyRelatedField):
+class TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):
+ """
+ Limits the accepted related primary key values to a specific table.
+ For example, if the PrimaryKeyRelatedField is instantiated with a
+ Column queryset, only columns in the "associated table" are
+ accepted. The "associated table" is defined by the context dict's
+ `table_id` value.
+ """
def get_queryset(self):
table_id = self.context.get('table_id', None)
- queryset = super(Table_Filtered_Column_queryset, self).get_queryset()
+ queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()
if table_id is None or not queryset:
return None
return queryset.filter(table__id=table_id)
@@ -29,7 +36,7 @@
class BaseConstraintSerializer(serializers.ModelSerializer):
name = serializers.CharField(required=False)
type = serializers.CharField()
- columns = Table_Filtered_Column_queryset(queryset=Column.current_objects, many=True)
+ columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)
class Meta:
model = Constraint
| {"golden_diff": "diff --git a/mathesar/api/serializers/constraints.py b/mathesar/api/serializers/constraints.py\n--- a/mathesar/api/serializers/constraints.py\n+++ b/mathesar/api/serializers/constraints.py\n@@ -17,10 +17,17 @@\n from mathesar.models.base import Column, Constraint, Table\n \n \n-class Table_Filtered_Column_queryset(serializers.PrimaryKeyRelatedField):\n+class TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n+ \"\"\"\n+ Limits the accepted related primary key values to a specific table.\n+ For example, if the PrimaryKeyRelatedField is instantiated with a\n+ Column queryset, only columns in the \"associated table\" are\n+ accepted. The \"associated table\" is defined by the context dict's\n+ `table_id` value.\n+ \"\"\"\n def get_queryset(self):\n table_id = self.context.get('table_id', None)\n- queryset = super(Table_Filtered_Column_queryset, self).get_queryset()\n+ queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()\n if table_id is None or not queryset:\n return None\n return queryset.filter(table__id=table_id)\n@@ -29,7 +36,7 @@\n class BaseConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n- columns = Table_Filtered_Column_queryset(queryset=Column.current_objects, many=True)\n+ columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)\n \n class Meta:\n model = Constraint\n", "issue": "Fix API handling of multi-column UNIQUE and PK columns\nSee https://github.com/centerofci/mathesar/issues/2231#issuecomment-1380309139 for some insight.\nFix API handling of multi-column UNIQUE and PK columns\nSee https://github.com/centerofci/mathesar/issues/2231#issuecomment-1380309139 for some insight.\n", "before_files": [{"content": "from psycopg.errors import DuplicateTable, UniqueViolation\nfrom rest_framework import serializers, status\n\nfrom db.constraints import utils as constraint_utils\nfrom db.identifiers import is_identifier_too_long\nfrom db.constraints.base import ForeignKeyConstraint, UniqueConstraint\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,\n InvalidTableName\n)\nfrom mathesar.api.serializers.shared_serializers import (\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n)\nfrom mathesar.models.base import Column, Constraint, Table\n\n\nclass Table_Filtered_Column_queryset(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n table_id = self.context.get('table_id', None)\n queryset = super(Table_Filtered_Column_queryset, self).get_queryset()\n if table_id is None or not queryset:\n return None\n return queryset.filter(table__id=table_id)\n\n\nclass BaseConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n columns = Table_Filtered_Column_queryset(queryset=Column.current_objects, many=True)\n\n class Meta:\n model = Constraint\n fields = ['id', 'name', 'type', 'columns']\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:\n return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)\n return None\n\n def create(self, validated_data):\n table = self.context['table']\n constraint_obj = self.construct_constraint_obj(table, validated_data)\n # Additional check is needed because we support read operations for primary key constraint,\n # but we don't support write operations\n if constraint_obj is None:\n constraint_type = validated_data.get('type', None)\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')\n try:\n constraint = table.add_constraint(constraint_obj)\n except DuplicateTable as e:\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message='Relation with the same name already exists',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UniqueViolation as e:\n raise database_api_exceptions.UniqueViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n return constraint\n\n def validate_name(self, name):\n if is_identifier_too_long(name):\n raise database_api_exceptions.IdentifierTooLong(field='name')\n return name\n\n\nclass ForeignKeyConstraintSerializer(BaseConstraintSerializer):\n class Meta:\n model = Constraint\n fields = BaseConstraintSerializer.Meta.fields + [\n 'referent_columns',\n 'referent_table',\n 'onupdate',\n 'ondelete',\n 'deferrable',\n 'match'\n ]\n\n referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n referent_table = serializers.SerializerMethodField()\n onupdate = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n ondelete = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n deferrable = serializers.BooleanField(allow_null=True, required=False)\n match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)\n\n def get_referent_table(self, obj):\n return obj.referent_columns[0].table.id\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n referent_columns = data.get('referent_columns')\n referent_columns_attnum = [column.attnum for column in referent_columns]\n constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']\n constraint_options = {\n constraint_options_field: data[constraint_options_field]\n for constraint_options_field in constraint_options_fields if constraint_options_field in data\n }\n return ForeignKeyConstraint(\n data.get('name', None),\n table.oid,\n columns_attnum,\n referent_columns[0].table.oid,\n referent_columns_attnum,\n constraint_options\n )\n\n\nclass ConstraintSerializer(\n ReadWritePolymorphicSerializerMappingMixin,\n MathesarPolymorphicErrorMixin,\n serializers.ModelSerializer\n):\n class Meta:\n model = Constraint\n fields = '__all__'\n\n serializers_mapping = {\n 'foreignkey': ForeignKeyConstraintSerializer,\n 'primary': BaseConstraintSerializer,\n 'unique': BaseConstraintSerializer,\n }\n\n def get_mapping_field(self, data):\n if isinstance(data, Constraint):\n constraint_type = data.type\n else:\n constraint_type = data.get('type', None)\n assert constraint_type is not None\n return constraint_type\n\n def create(self, validated_data):\n serializer = self.get_serializer_class(self.get_mapping_field(validated_data))\n return serializer.create(validated_data)\n\n def run_validation(self, data):\n if referent_table := data.get('referent_table', None):\n referent_table_name = Table.current_objects.get(id=referent_table).name\n if any(\n invalid_char in referent_table_name\n for invalid_char in ('(', ')')\n ):\n raise InvalidTableName(\n referent_table_name,\n field='referent_table'\n )\n constraint_type = data.get('type', None)\n if constraint_type not in self.serializers_mapping.keys():\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type)\n columns = data.get('columns', None)\n if columns == []:\n raise ConstraintColumnEmptyAPIException(field='columns')\n return super(ConstraintSerializer, self).run_validation(data)\n", "path": "mathesar/api/serializers/constraints.py"}], "after_files": [{"content": "from psycopg.errors import DuplicateTable, UniqueViolation\nfrom rest_framework import serializers, status\n\nfrom db.constraints import utils as constraint_utils\nfrom db.identifiers import is_identifier_too_long\nfrom db.constraints.base import ForeignKeyConstraint, UniqueConstraint\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ConstraintColumnEmptyAPIException, UnsupportedConstraintAPIException,\n InvalidTableName\n)\nfrom mathesar.api.serializers.shared_serializers import (\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n)\nfrom mathesar.models.base import Column, Constraint, Table\n\n\nclass TableFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n \"\"\"\n Limits the accepted related primary key values to a specific table.\n For example, if the PrimaryKeyRelatedField is instantiated with a\n Column queryset, only columns in the \"associated table\" are\n accepted. The \"associated table\" is defined by the context dict's\n `table_id` value.\n \"\"\"\n def get_queryset(self):\n table_id = self.context.get('table_id', None)\n queryset = super(TableFilteredPrimaryKeyRelatedField, self).get_queryset()\n if table_id is None or not queryset:\n return None\n return queryset.filter(table__id=table_id)\n\n\nclass BaseConstraintSerializer(serializers.ModelSerializer):\n name = serializers.CharField(required=False)\n type = serializers.CharField()\n columns = TableFilteredPrimaryKeyRelatedField(queryset=Column.current_objects, many=True)\n\n class Meta:\n model = Constraint\n fields = ['id', 'name', 'type', 'columns']\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n if data.get('type') == constraint_utils.ConstraintType.UNIQUE.value:\n return UniqueConstraint(data.get('name', None), table.oid, columns_attnum)\n return None\n\n def create(self, validated_data):\n table = self.context['table']\n constraint_obj = self.construct_constraint_obj(table, validated_data)\n # Additional check is needed because we support read operations for primary key constraint,\n # but we don't support write operations\n if constraint_obj is None:\n constraint_type = validated_data.get('type', None)\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type, field='type')\n try:\n constraint = table.add_constraint(constraint_obj)\n except DuplicateTable as e:\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message='Relation with the same name already exists',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UniqueViolation as e:\n raise database_api_exceptions.UniqueViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n return constraint\n\n def validate_name(self, name):\n if is_identifier_too_long(name):\n raise database_api_exceptions.IdentifierTooLong(field='name')\n return name\n\n\nclass ForeignKeyConstraintSerializer(BaseConstraintSerializer):\n class Meta:\n model = Constraint\n fields = BaseConstraintSerializer.Meta.fields + [\n 'referent_columns',\n 'referent_table',\n 'onupdate',\n 'ondelete',\n 'deferrable',\n 'match'\n ]\n\n referent_columns = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), many=True)\n referent_table = serializers.SerializerMethodField()\n onupdate = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n ondelete = serializers.ChoiceField(\n choices=['RESTRICT', 'CASCADE', 'SET NULL', 'NO ACTION', 'SET DEFAULT'],\n required=False,\n allow_null=True\n )\n deferrable = serializers.BooleanField(allow_null=True, required=False)\n match = serializers.ChoiceField(choices=['SIMPLE', 'PARTIAL', 'FULL'], allow_null=True, required=False)\n\n def get_referent_table(self, obj):\n return obj.referent_columns[0].table.id\n\n def construct_constraint_obj(self, table, data):\n columns_attnum = [column.attnum for column in data.get('columns')]\n referent_columns = data.get('referent_columns')\n referent_columns_attnum = [column.attnum for column in referent_columns]\n constraint_options_fields = ['onupdate', 'ondelete', 'deferrable']\n constraint_options = {\n constraint_options_field: data[constraint_options_field]\n for constraint_options_field in constraint_options_fields if constraint_options_field in data\n }\n return ForeignKeyConstraint(\n data.get('name', None),\n table.oid,\n columns_attnum,\n referent_columns[0].table.oid,\n referent_columns_attnum,\n constraint_options\n )\n\n\nclass ConstraintSerializer(\n ReadWritePolymorphicSerializerMappingMixin,\n MathesarPolymorphicErrorMixin,\n serializers.ModelSerializer\n):\n class Meta:\n model = Constraint\n fields = '__all__'\n\n serializers_mapping = {\n 'foreignkey': ForeignKeyConstraintSerializer,\n 'primary': BaseConstraintSerializer,\n 'unique': BaseConstraintSerializer,\n }\n\n def get_mapping_field(self, data):\n if isinstance(data, Constraint):\n constraint_type = data.type\n else:\n constraint_type = data.get('type', None)\n assert constraint_type is not None\n return constraint_type\n\n def create(self, validated_data):\n serializer = self.get_serializer_class(self.get_mapping_field(validated_data))\n return serializer.create(validated_data)\n\n def run_validation(self, data):\n if referent_table := data.get('referent_table', None):\n referent_table_name = Table.current_objects.get(id=referent_table).name\n if any(\n invalid_char in referent_table_name\n for invalid_char in ('(', ')')\n ):\n raise InvalidTableName(\n referent_table_name,\n field='referent_table'\n )\n constraint_type = data.get('type', None)\n if constraint_type not in self.serializers_mapping.keys():\n raise UnsupportedConstraintAPIException(constraint_type=constraint_type)\n columns = data.get('columns', None)\n if columns == []:\n raise ConstraintColumnEmptyAPIException(field='columns')\n return super(ConstraintSerializer, self).run_validation(data)\n", "path": "mathesar/api/serializers/constraints.py"}]} | 2,042 | 331 |
gh_patches_debug_30077 | rasdani/github-patches | git_diff | chainer__chainer-1158 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mix CPU mode and GPU mode in one network
I want to use CPU mode for some functions and GPU mode for others in one network.
When I use a large number of vocabulary in EmbedID about >=1,000,000 words, it consumes large size of memory in GPU. In this situation, I need to use EmbedID in CPU, and to use other functions like LSTMs in GPU.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/array/copy.py`
Content:
```
1 from chainer import cuda
2 from chainer import function
3 from chainer.utils import type_check
4
5
6 class Copy(function.Function):
7
8 """Copy an input :class:`cupy.ndarray` onto another device."""
9
10 def __init__(self, out_device):
11 self.out_device = out_device
12
13 def check_type_forward(self, in_types):
14 type_check.expect(
15 in_types.size() == 1
16 )
17
18 def forward_cpu(self, x):
19 return x[0].copy(),
20
21 def forward_gpu(self, x):
22 return cuda.copy(x[0], out_device=self.out_device),
23
24 def backward_cpu(self, x, gy):
25 return gy[0].copy(),
26
27 def backward_gpu(self, x, gy):
28 return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),
29
30
31 def copy(x, dst):
32 """Copies the input variable onto the specified device.
33
34 This function copies the array of input variable onto the device specified
35 by ``dst`` if the original array is on GPU, and otherwise just copies the
36 array within host memory.
37
38 Args:
39 x (~chainer.Variable): Variable to be copied.
40 dst: Target device specifier.
41
42 Returns:
43 ~chainer.Variable: Output variable.
44
45 """
46 return Copy(dst)(x)
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/array/copy.py b/chainer/functions/array/copy.py
--- a/chainer/functions/array/copy.py
+++ b/chainer/functions/array/copy.py
@@ -16,24 +16,45 @@
)
def forward_cpu(self, x):
- return x[0].copy(),
+ if self.out_device == -1:
+ return x[0].copy(),
+ else:
+ return cuda.to_gpu(x[0], device=self.out_device),
def forward_gpu(self, x):
- return cuda.copy(x[0], out_device=self.out_device),
+ if self.out_device == -1:
+ return cuda.to_cpu(x[0]),
+ else:
+ return cuda.copy(x[0], out_device=self.out_device),
+
+ def backward(self, inputs, grad_outputs):
+ # In this function, `grad_outputs` contains cuda arrays even when
+ # `inputs` only contains numpy arrays.
+ if isinstance(inputs[0], cuda.ndarray):
+ return self.backward_gpu(inputs, grad_outputs)
+ else:
+ return self.backward_cpu(inputs, grad_outputs)
def backward_cpu(self, x, gy):
- return gy[0].copy(),
+ if self.out_device == -1:
+ return gy[0].copy(),
+ else:
+ return cuda.to_cpu(gy[0]),
def backward_gpu(self, x, gy):
- return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),
+ if self.out_device == -1:
+ return cuda.to_gpu(gy[0], device=cuda.get_device(x[0])),
+ else:
+ return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),
def copy(x, dst):
"""Copies the input variable onto the specified device.
This function copies the array of input variable onto the device specified
- by ``dst`` if the original array is on GPU, and otherwise just copies the
- array within host memory.
+ by ``dst``. When ``dst == -1``, it copies the array onto the host memory.
+ This function supports copies from host to device, from device to device
+ and from device to host.
Args:
x (~chainer.Variable): Variable to be copied.
| {"golden_diff": "diff --git a/chainer/functions/array/copy.py b/chainer/functions/array/copy.py\n--- a/chainer/functions/array/copy.py\n+++ b/chainer/functions/array/copy.py\n@@ -16,24 +16,45 @@\n )\n \n def forward_cpu(self, x):\n- return x[0].copy(),\n+ if self.out_device == -1:\n+ return x[0].copy(),\n+ else:\n+ return cuda.to_gpu(x[0], device=self.out_device),\n \n def forward_gpu(self, x):\n- return cuda.copy(x[0], out_device=self.out_device),\n+ if self.out_device == -1:\n+ return cuda.to_cpu(x[0]),\n+ else:\n+ return cuda.copy(x[0], out_device=self.out_device),\n+\n+ def backward(self, inputs, grad_outputs):\n+ # In this function, `grad_outputs` contains cuda arrays even when\n+ # `inputs` only contains numpy arrays.\n+ if isinstance(inputs[0], cuda.ndarray):\n+ return self.backward_gpu(inputs, grad_outputs)\n+ else:\n+ return self.backward_cpu(inputs, grad_outputs)\n \n def backward_cpu(self, x, gy):\n- return gy[0].copy(),\n+ if self.out_device == -1:\n+ return gy[0].copy(),\n+ else:\n+ return cuda.to_cpu(gy[0]),\n \n def backward_gpu(self, x, gy):\n- return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),\n+ if self.out_device == -1:\n+ return cuda.to_gpu(gy[0], device=cuda.get_device(x[0])),\n+ else:\n+ return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),\n \n \n def copy(x, dst):\n \"\"\"Copies the input variable onto the specified device.\n \n This function copies the array of input variable onto the device specified\n- by ``dst`` if the original array is on GPU, and otherwise just copies the\n- array within host memory.\n+ by ``dst``. When ``dst == -1``, it copies the array onto the host memory.\n+ This function supports copies from host to device, from device to device\n+ and from device to host.\n \n Args:\n x (~chainer.Variable): Variable to be copied.\n", "issue": "Mix CPU mode and GPU mode in one network\nI want to use CPU mode for some functions and GPU mode for others in one network.\nWhen I use a large number of vocabulary in EmbedID about >=1,000,000 words, it consumes large size of memory in GPU. In this situation, I need to use EmbedID in CPU, and to use other functions like LSTMs in GPU.\n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Copy(function.Function):\n\n \"\"\"Copy an input :class:`cupy.ndarray` onto another device.\"\"\"\n\n def __init__(self, out_device):\n self.out_device = out_device\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1\n )\n\n def forward_cpu(self, x):\n return x[0].copy(),\n\n def forward_gpu(self, x):\n return cuda.copy(x[0], out_device=self.out_device),\n\n def backward_cpu(self, x, gy):\n return gy[0].copy(),\n\n def backward_gpu(self, x, gy):\n return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),\n\n\ndef copy(x, dst):\n \"\"\"Copies the input variable onto the specified device.\n\n This function copies the array of input variable onto the device specified\n by ``dst`` if the original array is on GPU, and otherwise just copies the\n array within host memory.\n\n Args:\n x (~chainer.Variable): Variable to be copied.\n dst: Target device specifier.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Copy(dst)(x)\n", "path": "chainer/functions/array/copy.py"}], "after_files": [{"content": "from chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Copy(function.Function):\n\n \"\"\"Copy an input :class:`cupy.ndarray` onto another device.\"\"\"\n\n def __init__(self, out_device):\n self.out_device = out_device\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1\n )\n\n def forward_cpu(self, x):\n if self.out_device == -1:\n return x[0].copy(),\n else:\n return cuda.to_gpu(x[0], device=self.out_device),\n\n def forward_gpu(self, x):\n if self.out_device == -1:\n return cuda.to_cpu(x[0]),\n else:\n return cuda.copy(x[0], out_device=self.out_device),\n\n def backward(self, inputs, grad_outputs):\n # In this function, `grad_outputs` contains cuda arrays even when\n # `inputs` only contains numpy arrays.\n if isinstance(inputs[0], cuda.ndarray):\n return self.backward_gpu(inputs, grad_outputs)\n else:\n return self.backward_cpu(inputs, grad_outputs)\n\n def backward_cpu(self, x, gy):\n if self.out_device == -1:\n return gy[0].copy(),\n else:\n return cuda.to_cpu(gy[0]),\n\n def backward_gpu(self, x, gy):\n if self.out_device == -1:\n return cuda.to_gpu(gy[0], device=cuda.get_device(x[0])),\n else:\n return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),\n\n\ndef copy(x, dst):\n \"\"\"Copies the input variable onto the specified device.\n\n This function copies the array of input variable onto the device specified\n by ``dst``. When ``dst == -1``, it copies the array onto the host memory.\n This function supports copies from host to device, from device to device\n and from device to host.\n\n Args:\n x (~chainer.Variable): Variable to be copied.\n dst: Target device specifier.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Copy(dst)(x)\n", "path": "chainer/functions/array/copy.py"}]} | 717 | 515 |
gh_patches_debug_2915 | rasdani/github-patches | git_diff | pydantic__pydantic-6364 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pydantic.v1.parse_obj_as internally uses pydantic.main.create_model instead of pydantic.v1.main.create_model
### Initial Checks
- [X] I confirm that I'm using Pydantic V2 installed directly from the `main` branch, or equivalent
### Description
I was trying to migrate my codebase from V1 to V2 (mostly by replacing `import pydantic` with `import pydantic.v1`) and noticed that `pydantic.v1.parse_obj_as` was not working as intended and was leading to the following error:
```
Traceback (most recent call last):
File "/Users/sharathhuddar/workspace/django-rest-api/core/tests/test_types.py", line 177, in test_non_https_url
parse_obj_as(HttpsUrl, url)
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/v1/tools.py", line 37, in parse_obj_as
model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/v1/tools.py", line 30, in _get_parsing_type
return create_model(type_name, __root__=(type_, ...))
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/main.py", line 1319, in create_model
return meta(__model_name, resolved_bases, namespace, __pydantic_reset_parent_namespace__=False, **kwds)
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/_internal/_model_construction.py", line 96, in __new__
namespace, config_wrapper.ignored_types, class_vars, base_field_names
File "/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/_internal/_model_construction.py", line 279, in inspect_namespace
raise TypeError("To define root models, use `pydantic.RootModel` rather than a field called '__root__'")
TypeError: To define root models, use `pydantic.RootModel` rather than a field called '__root__'
```
On inspecting the source code, I noticed that `parse_obj_as` calls `_get_parsing_type` which inturn calls `pydantic.main.create_model` instead of `pydantic.v1.main.create_model`
The issue gets resolved on updating the import statement in `pydantic.v1.tools._get_parsing_type: 24` from `from pydantic.main import create_model` to `from pydantic.v1.main import create_model`
### Example Code
_No response_
### Python, Pydantic & OS Version
```Text
python -c "import pydantic.version; print(pydantic.version.version_info())"
pydantic version: 2.0
pydantic-core version: 2.0.1 release build profile
install path: /Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic
python version: 3.7.12 (default, Nov 22 2022, 14:45:00) [Clang 13.1.6 (clang-1316.0.21.2.5)]
platform: Darwin-22.2.0-x86_64-i386-64bit
optional deps. installed: ['email-validator', 'typing-extensions']
```
Selected Assignee: @lig
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydantic/tools.py`
Content:
```
1 import json
2 from functools import lru_cache
3 from pathlib import Path
4 from typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union
5
6 from .parse import Protocol, load_file, load_str_bytes
7 from .types import StrBytes
8 from .typing import display_as_type
9
10 __all__ = ('parse_file_as', 'parse_obj_as', 'parse_raw_as', 'schema_of', 'schema_json_of')
11
12 NameFactory = Union[str, Callable[[Type[Any]], str]]
13
14 if TYPE_CHECKING:
15 from .typing import DictStrAny
16
17
18 def _generate_parsing_type_name(type_: Any) -> str:
19 return f'ParsingModel[{display_as_type(type_)}]'
20
21
22 @lru_cache(maxsize=2048)
23 def _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:
24 from pydantic.main import create_model
25
26 if type_name is None:
27 type_name = _generate_parsing_type_name
28 if not isinstance(type_name, str):
29 type_name = type_name(type_)
30 return create_model(type_name, __root__=(type_, ...))
31
32
33 T = TypeVar('T')
34
35
36 def parse_obj_as(type_: Type[T], obj: Any, *, type_name: Optional[NameFactory] = None) -> T:
37 model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]
38 return model_type(__root__=obj).__root__
39
40
41 def parse_file_as(
42 type_: Type[T],
43 path: Union[str, Path],
44 *,
45 content_type: str = None,
46 encoding: str = 'utf8',
47 proto: Protocol = None,
48 allow_pickle: bool = False,
49 json_loads: Callable[[str], Any] = json.loads,
50 type_name: Optional[NameFactory] = None,
51 ) -> T:
52 obj = load_file(
53 path,
54 proto=proto,
55 content_type=content_type,
56 encoding=encoding,
57 allow_pickle=allow_pickle,
58 json_loads=json_loads,
59 )
60 return parse_obj_as(type_, obj, type_name=type_name)
61
62
63 def parse_raw_as(
64 type_: Type[T],
65 b: StrBytes,
66 *,
67 content_type: str = None,
68 encoding: str = 'utf8',
69 proto: Protocol = None,
70 allow_pickle: bool = False,
71 json_loads: Callable[[str], Any] = json.loads,
72 type_name: Optional[NameFactory] = None,
73 ) -> T:
74 obj = load_str_bytes(
75 b,
76 proto=proto,
77 content_type=content_type,
78 encoding=encoding,
79 allow_pickle=allow_pickle,
80 json_loads=json_loads,
81 )
82 return parse_obj_as(type_, obj, type_name=type_name)
83
84
85 def schema_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_kwargs: Any) -> 'DictStrAny':
86 """Generate a JSON schema (as dict) for the passed model or dynamically generated one"""
87 return _get_parsing_type(type_, type_name=title).schema(**schema_kwargs)
88
89
90 def schema_json_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_json_kwargs: Any) -> str:
91 """Generate a JSON schema (as JSON) for the passed model or dynamically generated one"""
92 return _get_parsing_type(type_, type_name=title).schema_json(**schema_json_kwargs)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydantic/tools.py b/pydantic/tools.py
--- a/pydantic/tools.py
+++ b/pydantic/tools.py
@@ -21,7 +21,7 @@
@lru_cache(maxsize=2048)
def _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:
- from pydantic.main import create_model
+ from .main import create_model
if type_name is None:
type_name = _generate_parsing_type_name
| {"golden_diff": "diff --git a/pydantic/tools.py b/pydantic/tools.py\n--- a/pydantic/tools.py\n+++ b/pydantic/tools.py\n@@ -21,7 +21,7 @@\n \n @lru_cache(maxsize=2048)\n def _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:\n- from pydantic.main import create_model\n+ from .main import create_model\n \n if type_name is None:\n type_name = _generate_parsing_type_name\n", "issue": "pydantic.v1.parse_obj_as internally uses pydantic.main.create_model instead of pydantic.v1.main.create_model\n### Initial Checks\r\n\r\n- [X] I confirm that I'm using Pydantic V2 installed directly from the `main` branch, or equivalent\r\n\r\n### Description\r\n\r\nI was trying to migrate my codebase from V1 to V2 (mostly by replacing `import pydantic` with `import pydantic.v1`) and noticed that `pydantic.v1.parse_obj_as` was not working as intended and was leading to the following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/sharathhuddar/workspace/django-rest-api/core/tests/test_types.py\", line 177, in test_non_https_url\r\n parse_obj_as(HttpsUrl, url)\r\n File \"/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/v1/tools.py\", line 37, in parse_obj_as\r\n model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]\r\n File \"/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/v1/tools.py\", line 30, in _get_parsing_type\r\n return create_model(type_name, __root__=(type_, ...))\r\n File \"/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/main.py\", line 1319, in create_model\r\n return meta(__model_name, resolved_bases, namespace, __pydantic_reset_parent_namespace__=False, **kwds)\r\n File \"/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/_internal/_model_construction.py\", line 96, in __new__\r\n namespace, config_wrapper.ignored_types, class_vars, base_field_names\r\n File \"/Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic/_internal/_model_construction.py\", line 279, in inspect_namespace\r\n raise TypeError(\"To define root models, use `pydantic.RootModel` rather than a field called '__root__'\")\r\nTypeError: To define root models, use `pydantic.RootModel` rather than a field called '__root__'\r\n```\r\n\r\nOn inspecting the source code, I noticed that `parse_obj_as` calls `_get_parsing_type` which inturn calls `pydantic.main.create_model` instead of `pydantic.v1.main.create_model`\r\n\r\n\r\nThe issue gets resolved on updating the import statement in `pydantic.v1.tools._get_parsing_type: 24` from `from pydantic.main import create_model` to `from pydantic.v1.main import create_model`\r\n\r\n### Example Code\r\n\r\n_No response_\r\n\r\n### Python, Pydantic & OS Version\r\n\r\n```Text\r\npython -c \"import pydantic.version; print(pydantic.version.version_info())\"\r\n\r\n pydantic version: 2.0\r\n pydantic-core version: 2.0.1 release build profile\r\n install path: /Users/sharathhuddar/workspace/django-rest-api/django-rest-api-3.7/lib/python3.7/site-packages/pydantic\r\n python version: 3.7.12 (default, Nov 22 2022, 14:45:00) [Clang 13.1.6 (clang-1316.0.21.2.5)]\r\n platform: Darwin-22.2.0-x86_64-i386-64bit\r\n optional deps. installed: ['email-validator', 'typing-extensions']\r\n```\r\n\r\n\r\nSelected Assignee: @lig\n", "before_files": [{"content": "import json\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union\n\nfrom .parse import Protocol, load_file, load_str_bytes\nfrom .types import StrBytes\nfrom .typing import display_as_type\n\n__all__ = ('parse_file_as', 'parse_obj_as', 'parse_raw_as', 'schema_of', 'schema_json_of')\n\nNameFactory = Union[str, Callable[[Type[Any]], str]]\n\nif TYPE_CHECKING:\n from .typing import DictStrAny\n\n\ndef _generate_parsing_type_name(type_: Any) -> str:\n return f'ParsingModel[{display_as_type(type_)}]'\n\n\n@lru_cache(maxsize=2048)\ndef _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:\n from pydantic.main import create_model\n\n if type_name is None:\n type_name = _generate_parsing_type_name\n if not isinstance(type_name, str):\n type_name = type_name(type_)\n return create_model(type_name, __root__=(type_, ...))\n\n\nT = TypeVar('T')\n\n\ndef parse_obj_as(type_: Type[T], obj: Any, *, type_name: Optional[NameFactory] = None) -> T:\n model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]\n return model_type(__root__=obj).__root__\n\n\ndef parse_file_as(\n type_: Type[T],\n path: Union[str, Path],\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n json_loads: Callable[[str], Any] = json.loads,\n type_name: Optional[NameFactory] = None,\n) -> T:\n obj = load_file(\n path,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=json_loads,\n )\n return parse_obj_as(type_, obj, type_name=type_name)\n\n\ndef parse_raw_as(\n type_: Type[T],\n b: StrBytes,\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n json_loads: Callable[[str], Any] = json.loads,\n type_name: Optional[NameFactory] = None,\n) -> T:\n obj = load_str_bytes(\n b,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=json_loads,\n )\n return parse_obj_as(type_, obj, type_name=type_name)\n\n\ndef schema_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_kwargs: Any) -> 'DictStrAny':\n \"\"\"Generate a JSON schema (as dict) for the passed model or dynamically generated one\"\"\"\n return _get_parsing_type(type_, type_name=title).schema(**schema_kwargs)\n\n\ndef schema_json_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_json_kwargs: Any) -> str:\n \"\"\"Generate a JSON schema (as JSON) for the passed model or dynamically generated one\"\"\"\n return _get_parsing_type(type_, type_name=title).schema_json(**schema_json_kwargs)\n", "path": "pydantic/tools.py"}], "after_files": [{"content": "import json\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union\n\nfrom .parse import Protocol, load_file, load_str_bytes\nfrom .types import StrBytes\nfrom .typing import display_as_type\n\n__all__ = ('parse_file_as', 'parse_obj_as', 'parse_raw_as', 'schema_of', 'schema_json_of')\n\nNameFactory = Union[str, Callable[[Type[Any]], str]]\n\nif TYPE_CHECKING:\n from .typing import DictStrAny\n\n\ndef _generate_parsing_type_name(type_: Any) -> str:\n return f'ParsingModel[{display_as_type(type_)}]'\n\n\n@lru_cache(maxsize=2048)\ndef _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any:\n from .main import create_model\n\n if type_name is None:\n type_name = _generate_parsing_type_name\n if not isinstance(type_name, str):\n type_name = type_name(type_)\n return create_model(type_name, __root__=(type_, ...))\n\n\nT = TypeVar('T')\n\n\ndef parse_obj_as(type_: Type[T], obj: Any, *, type_name: Optional[NameFactory] = None) -> T:\n model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type]\n return model_type(__root__=obj).__root__\n\n\ndef parse_file_as(\n type_: Type[T],\n path: Union[str, Path],\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n json_loads: Callable[[str], Any] = json.loads,\n type_name: Optional[NameFactory] = None,\n) -> T:\n obj = load_file(\n path,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=json_loads,\n )\n return parse_obj_as(type_, obj, type_name=type_name)\n\n\ndef parse_raw_as(\n type_: Type[T],\n b: StrBytes,\n *,\n content_type: str = None,\n encoding: str = 'utf8',\n proto: Protocol = None,\n allow_pickle: bool = False,\n json_loads: Callable[[str], Any] = json.loads,\n type_name: Optional[NameFactory] = None,\n) -> T:\n obj = load_str_bytes(\n b,\n proto=proto,\n content_type=content_type,\n encoding=encoding,\n allow_pickle=allow_pickle,\n json_loads=json_loads,\n )\n return parse_obj_as(type_, obj, type_name=type_name)\n\n\ndef schema_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_kwargs: Any) -> 'DictStrAny':\n \"\"\"Generate a JSON schema (as dict) for the passed model or dynamically generated one\"\"\"\n return _get_parsing_type(type_, type_name=title).schema(**schema_kwargs)\n\n\ndef schema_json_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_json_kwargs: Any) -> str:\n \"\"\"Generate a JSON schema (as JSON) for the passed model or dynamically generated one\"\"\"\n return _get_parsing_type(type_, type_name=title).schema_json(**schema_json_kwargs)\n", "path": "pydantic/tools.py"}]} | 2,059 | 120 |
gh_patches_debug_14117 | rasdani/github-patches | git_diff | tensorflow__tensor2tensor-1247 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot download MRPC data
### Description
I get `UnicodeDecodeError` when trying to generate the "MSR Paraphrase Corpus" data. It happens when using either `t2t-datagen` or `t2t-trainer`.
### Environment information
```
OS: macOS 10.13.4
$ pip freeze | grep tensor
mesh-tensorflow==0.0.4
tensor2tensor==1.11.0
tensorboard==1.12.0
tensorflow==1.12.0
tensorflow-metadata==0.9.0
tensorflow-probability==0.5.0
$ python -V
Python 3.6.4
```
### For bugs: reproduction and error logs
```
# Steps to reproduce:
$ t2t-datagen \
--data_dir=~/t2t_data/msr_paraphrase_corpus \
--tmp_dir=/tmp/t2t_tmp \
--problem=msr_paraphrase_corpus
```
```
# Error logs:
INFO:tensorflow:Generated 8152 Examples
INFO:tensorflow:Found vocab file: /Users/ywkim/t2t_data/msr_paraphrase_corpus/vocab.msr_paraphrase_corpus.8192.subwords
Traceback (most recent call last):
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/bin/t2t-datagen", line 28, in <module>
tf.app.run()
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/platform/app.py", line 125, in run
_sys.exit(main(argv))
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/bin/t2t-datagen", line 23, in main
t2t_datagen.main(argv)
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/bin/t2t_datagen.py", line 198, in main
generate_data_for_registered_problem(problem)
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/bin/t2t_datagen.py", line 260, in generate_data_for_registered_problem
problem.generate_data(data_dir, tmp_dir, task_id)
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/data_generators/text_problems.py", line 306, in generate_data
self.generate_encoded_samples(data_dir, tmp_dir, split)), paths)
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py", line 165, in generate_files
for case in generator:
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/data_generators/text_problems.py", line 542, in generate_encoded_samples
for sample in generator:
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/data_generators/mrpc.py", line 114, in generate_samples
for row in tf.gfile.Open(os.path.join(mrpc_dir, "dev_ids.tsv")):
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py", line 220, in __next__
return self.next()
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py", line 214, in next
retval = self.readline()
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py", line 184, in readline
return self._prepare_value(self._read_buf.ReadLineAsString())
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py", line 100, in _prepare_value
return compat.as_str_any(val)
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/util/compat.py", line 107, in as_str_any
return as_str(value)
File "/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/util/compat.py", line 80, in as_text
return bytes_or_text.decode(encoding)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa2 in position 12: invalid start byte
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensor2tensor/data_generators/mrpc.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2018 The Tensor2Tensor Authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Data generators for the MSR Paraphrase Corpus."""
17
18 from __future__ import absolute_import
19 from __future__ import division
20 from __future__ import print_function
21
22 import os
23 import six
24 from tensor2tensor.data_generators import generator_utils
25 from tensor2tensor.data_generators import problem
26 from tensor2tensor.data_generators import text_encoder
27 from tensor2tensor.data_generators import text_problems
28 from tensor2tensor.utils import registry
29 import tensorflow as tf
30
31 EOS = text_encoder.EOS
32
33
34 @registry.register_problem
35 class MSRParaphraseCorpus(text_problems.TextConcat2ClassProblem):
36 """MSR Paraphrase Identification problems."""
37
38 # Link to data from GLUE: https://gluebenchmark.com/tasks
39 DEV_IDS = ("https://firebasestorage.googleapis.com/v0/b/"
40 "mtl-sentence-representations.appspot.com/o/"
41 "data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-"
42 "4bd7-99a5-5e00222e0faf")
43 MRPC_TRAIN = ("https://s3.amazonaws.com/senteval/senteval_data/"
44 "msr_paraphrase_train.txt")
45 MRPC_TEST = ("https://s3.amazonaws.com/senteval/senteval_data/"
46 "msr_paraphrase_test.txt")
47 DATA_DIR = "MRPC"
48
49 @property
50 def is_generate_per_split(self):
51 return True
52
53 @property
54 def dataset_splits(self):
55 return [{
56 "split": problem.DatasetSplit.TRAIN,
57 "shards": 10,
58 }, {
59 "split": problem.DatasetSplit.EVAL,
60 "shards": 1,
61 }]
62
63 @property
64 def approx_vocab_size(self):
65 return 2**13 # 8k vocab suffices for this small dataset.
66
67 @property
68 def num_classes(self):
69 return 2
70
71 def class_labels(self, data_dir):
72 del data_dir
73 return ["not_paraphrase", "paraphrase"]
74
75 def _maybe_download_corpora(self, tmp_dir):
76 mrpc_dir = os.path.join(tmp_dir, self.DATA_DIR)
77 tf.gfile.MakeDirs(mrpc_dir)
78 mrpc_train_finalpath = os.path.join(mrpc_dir, "msr_paraphrase_train.txt")
79 mrpc_test_finalpath = os.path.join(mrpc_dir, "msr_paraphrase_test.txt")
80 mrpc_dev_ids_finalpath = os.path.join(mrpc_dir, "dev_ids.tsv")
81
82 def download_file(tdir, filepath, url):
83 if not tf.gfile.Exists(filepath):
84 generator_utils.maybe_download(tdir, filepath, url)
85
86 download_file(mrpc_dir, mrpc_train_finalpath, self.MRPC_TRAIN)
87 download_file(mrpc_dir, mrpc_test_finalpath, self.MRPC_TEST)
88 download_file(mrpc_dir, mrpc_dev_ids_finalpath, self.DEV_IDS)
89
90 return mrpc_dir
91
92 def example_generator(self, filename, dev_ids):
93 for idx, line in enumerate(tf.gfile.Open(filename, "rb")):
94 if idx == 0: continue # skip header
95 if six.PY2:
96 line = unicode(line.strip(), "utf-8")
97 else:
98 line = line.strip().decode("utf-8")
99 l, id1, id2, s1, s2 = line.split("\t")
100 if dev_ids and [id1, id2] not in dev_ids:
101 continue
102 inputs = [[s1, s2], [s2, s1]]
103 for inp in inputs:
104 yield {
105 "inputs": inp,
106 "label": int(l)
107 }
108
109 def generate_samples(self, data_dir, tmp_dir, dataset_split):
110 mrpc_dir = self._maybe_download_corpora(tmp_dir)
111 filesplit = "msr_paraphrase_train.txt"
112 dev_ids = []
113 if dataset_split != problem.DatasetSplit.TRAIN:
114 for row in tf.gfile.Open(os.path.join(mrpc_dir, "dev_ids.tsv")):
115 dev_ids.append(row.strip().split("\t"))
116
117 filename = os.path.join(mrpc_dir, filesplit)
118 for example in self.example_generator(filename, dev_ids):
119 yield example
120
121
122 @registry.register_problem
123 class MSRParaphraseCorpusCharacters(MSRParaphraseCorpus):
124 """MSR Paraphrase Identification problems, character level"""
125
126 @property
127 def vocab_type(self):
128 return text_problems.VocabType.CHARACTER
129
130 def global_task_id(self):
131 return problem.TaskID.EN_SIM
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensor2tensor/data_generators/mrpc.py b/tensor2tensor/data_generators/mrpc.py
--- a/tensor2tensor/data_generators/mrpc.py
+++ b/tensor2tensor/data_generators/mrpc.py
@@ -38,8 +38,8 @@
# Link to data from GLUE: https://gluebenchmark.com/tasks
DEV_IDS = ("https://firebasestorage.googleapis.com/v0/b/"
"mtl-sentence-representations.appspot.com/o/"
- "data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-"
- "4bd7-99a5-5e00222e0faf")
+ "data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-"
+ "48f4-b431-7480817f1adc")
MRPC_TRAIN = ("https://s3.amazonaws.com/senteval/senteval_data/"
"msr_paraphrase_train.txt")
MRPC_TEST = ("https://s3.amazonaws.com/senteval/senteval_data/"
| {"golden_diff": "diff --git a/tensor2tensor/data_generators/mrpc.py b/tensor2tensor/data_generators/mrpc.py\n--- a/tensor2tensor/data_generators/mrpc.py\n+++ b/tensor2tensor/data_generators/mrpc.py\n@@ -38,8 +38,8 @@\n # Link to data from GLUE: https://gluebenchmark.com/tasks\n DEV_IDS = (\"https://firebasestorage.googleapis.com/v0/b/\"\n \"mtl-sentence-representations.appspot.com/o/\"\n- \"data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-\"\n- \"4bd7-99a5-5e00222e0faf\")\n+ \"data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-\"\n+ \"48f4-b431-7480817f1adc\")\n MRPC_TRAIN = (\"https://s3.amazonaws.com/senteval/senteval_data/\"\n \"msr_paraphrase_train.txt\")\n MRPC_TEST = (\"https://s3.amazonaws.com/senteval/senteval_data/\"\n", "issue": "Cannot download MRPC data\n### Description\r\n\r\nI get `UnicodeDecodeError` when trying to generate the \"MSR Paraphrase Corpus\" data. It happens when using either `t2t-datagen` or `t2t-trainer`.\r\n\r\n### Environment information\r\n\r\n```\r\nOS: macOS 10.13.4\r\n\r\n$ pip freeze | grep tensor\r\nmesh-tensorflow==0.0.4\r\ntensor2tensor==1.11.0\r\ntensorboard==1.12.0\r\ntensorflow==1.12.0\r\ntensorflow-metadata==0.9.0\r\ntensorflow-probability==0.5.0\r\n\r\n$ python -V\r\nPython 3.6.4\r\n```\r\n\r\n### For bugs: reproduction and error logs\r\n\r\n```\r\n# Steps to reproduce:\r\n$ t2t-datagen \\ \r\n --data_dir=~/t2t_data/msr_paraphrase_corpus \\\r\n --tmp_dir=/tmp/t2t_tmp \\\r\n --problem=msr_paraphrase_corpus\r\n```\r\n\r\n```\r\n# Error logs:\r\nINFO:tensorflow:Generated 8152 Examples\r\nINFO:tensorflow:Found vocab file: /Users/ywkim/t2t_data/msr_paraphrase_corpus/vocab.msr_paraphrase_corpus.8192.subwords \r\nTraceback (most recent call last):\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/bin/t2t-datagen\", line 28, in <module> \r\n tf.app.run()\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/platform/app.py\", line 125, in run \r\n _sys.exit(main(argv))\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/bin/t2t-datagen\", line 23, in main \r\n t2t_datagen.main(argv)\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/bin/t2t_datagen.py\", line 198, in main \r\n generate_data_for_registered_problem(problem)\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/bin/t2t_datagen.py\", line 260, in generate_data_for_registered_problem\r\n problem.generate_data(data_dir, tmp_dir, task_id)\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/data_generators/text_problems.py\", line 306, in generate_data \r\n self.generate_encoded_samples(data_dir, tmp_dir, split)), paths)\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py\", line 165, in generate_files\r\n for case in generator:\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/data_generators/text_problems.py\", line 542, in generate_encoded_samples\r\n for sample in generator:\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensor2tensor/data_generators/mrpc.py\", line 114, in generate_samples \r\n for row in tf.gfile.Open(os.path.join(mrpc_dir, \"dev_ids.tsv\")):\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py\", line 220, in __next__ \r\n return self.next()\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py\", line 214, in next \r\n retval = self.readline()\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py\", line 184, in readline \r\n return self._prepare_value(self._read_buf.ReadLineAsString())\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py\", line 100, in _prepare_value \r\n return compat.as_str_any(val)\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/util/compat.py\", line 107, in as_str_any \r\n return as_str(value)\r\n File \"/Users/ywkim/.local/share/virtualenvs/rally-f4OA2-t-/lib/python3.6/site-packages/tensorflow/python/util/compat.py\", line 80, in as_text \r\n return bytes_or_text.decode(encoding)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xa2 in position 12: invalid start byte\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for the MSR Paraphrase Corpus.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport six\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.utils import registry\nimport tensorflow as tf\n\nEOS = text_encoder.EOS\n\n\[email protected]_problem\nclass MSRParaphraseCorpus(text_problems.TextConcat2ClassProblem):\n \"\"\"MSR Paraphrase Identification problems.\"\"\"\n\n # Link to data from GLUE: https://gluebenchmark.com/tasks\n DEV_IDS = (\"https://firebasestorage.googleapis.com/v0/b/\"\n \"mtl-sentence-representations.appspot.com/o/\"\n \"data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-\"\n \"4bd7-99a5-5e00222e0faf\")\n MRPC_TRAIN = (\"https://s3.amazonaws.com/senteval/senteval_data/\"\n \"msr_paraphrase_train.txt\")\n MRPC_TEST = (\"https://s3.amazonaws.com/senteval/senteval_data/\"\n \"msr_paraphrase_test.txt\")\n DATA_DIR = \"MRPC\"\n\n @property\n def is_generate_per_split(self):\n return True\n\n @property\n def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 10,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]\n\n @property\n def approx_vocab_size(self):\n return 2**13 # 8k vocab suffices for this small dataset.\n\n @property\n def num_classes(self):\n return 2\n\n def class_labels(self, data_dir):\n del data_dir\n return [\"not_paraphrase\", \"paraphrase\"]\n\n def _maybe_download_corpora(self, tmp_dir):\n mrpc_dir = os.path.join(tmp_dir, self.DATA_DIR)\n tf.gfile.MakeDirs(mrpc_dir)\n mrpc_train_finalpath = os.path.join(mrpc_dir, \"msr_paraphrase_train.txt\")\n mrpc_test_finalpath = os.path.join(mrpc_dir, \"msr_paraphrase_test.txt\")\n mrpc_dev_ids_finalpath = os.path.join(mrpc_dir, \"dev_ids.tsv\")\n\n def download_file(tdir, filepath, url):\n if not tf.gfile.Exists(filepath):\n generator_utils.maybe_download(tdir, filepath, url)\n\n download_file(mrpc_dir, mrpc_train_finalpath, self.MRPC_TRAIN)\n download_file(mrpc_dir, mrpc_test_finalpath, self.MRPC_TEST)\n download_file(mrpc_dir, mrpc_dev_ids_finalpath, self.DEV_IDS)\n\n return mrpc_dir\n\n def example_generator(self, filename, dev_ids):\n for idx, line in enumerate(tf.gfile.Open(filename, \"rb\")):\n if idx == 0: continue # skip header\n if six.PY2:\n line = unicode(line.strip(), \"utf-8\")\n else:\n line = line.strip().decode(\"utf-8\")\n l, id1, id2, s1, s2 = line.split(\"\\t\")\n if dev_ids and [id1, id2] not in dev_ids:\n continue\n inputs = [[s1, s2], [s2, s1]]\n for inp in inputs:\n yield {\n \"inputs\": inp,\n \"label\": int(l)\n }\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n mrpc_dir = self._maybe_download_corpora(tmp_dir)\n filesplit = \"msr_paraphrase_train.txt\"\n dev_ids = []\n if dataset_split != problem.DatasetSplit.TRAIN:\n for row in tf.gfile.Open(os.path.join(mrpc_dir, \"dev_ids.tsv\")):\n dev_ids.append(row.strip().split(\"\\t\"))\n\n filename = os.path.join(mrpc_dir, filesplit)\n for example in self.example_generator(filename, dev_ids):\n yield example\n\n\[email protected]_problem\nclass MSRParaphraseCorpusCharacters(MSRParaphraseCorpus):\n \"\"\"MSR Paraphrase Identification problems, character level\"\"\"\n\n @property\n def vocab_type(self):\n return text_problems.VocabType.CHARACTER\n\n def global_task_id(self):\n return problem.TaskID.EN_SIM\n", "path": "tensor2tensor/data_generators/mrpc.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for the MSR Paraphrase Corpus.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport six\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.utils import registry\nimport tensorflow as tf\n\nEOS = text_encoder.EOS\n\n\[email protected]_problem\nclass MSRParaphraseCorpus(text_problems.TextConcat2ClassProblem):\n \"\"\"MSR Paraphrase Identification problems.\"\"\"\n\n # Link to data from GLUE: https://gluebenchmark.com/tasks\n DEV_IDS = (\"https://firebasestorage.googleapis.com/v0/b/\"\n \"mtl-sentence-representations.appspot.com/o/\"\n \"data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-\"\n \"48f4-b431-7480817f1adc\")\n MRPC_TRAIN = (\"https://s3.amazonaws.com/senteval/senteval_data/\"\n \"msr_paraphrase_train.txt\")\n MRPC_TEST = (\"https://s3.amazonaws.com/senteval/senteval_data/\"\n \"msr_paraphrase_test.txt\")\n DATA_DIR = \"MRPC\"\n\n @property\n def is_generate_per_split(self):\n return True\n\n @property\n def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 10,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]\n\n @property\n def approx_vocab_size(self):\n return 2**13 # 8k vocab suffices for this small dataset.\n\n @property\n def num_classes(self):\n return 2\n\n def class_labels(self, data_dir):\n del data_dir\n return [\"not_paraphrase\", \"paraphrase\"]\n\n def _maybe_download_corpora(self, tmp_dir):\n mrpc_dir = os.path.join(tmp_dir, self.DATA_DIR)\n tf.gfile.MakeDirs(mrpc_dir)\n mrpc_train_finalpath = os.path.join(mrpc_dir, \"msr_paraphrase_train.txt\")\n mrpc_test_finalpath = os.path.join(mrpc_dir, \"msr_paraphrase_test.txt\")\n mrpc_dev_ids_finalpath = os.path.join(mrpc_dir, \"dev_ids.tsv\")\n\n def download_file(tdir, filepath, url):\n if not tf.gfile.Exists(filepath):\n generator_utils.maybe_download(tdir, filepath, url)\n\n download_file(mrpc_dir, mrpc_train_finalpath, self.MRPC_TRAIN)\n download_file(mrpc_dir, mrpc_test_finalpath, self.MRPC_TEST)\n download_file(mrpc_dir, mrpc_dev_ids_finalpath, self.DEV_IDS)\n\n return mrpc_dir\n\n def example_generator(self, filename, dev_ids):\n for idx, line in enumerate(tf.gfile.Open(filename, \"rb\")):\n if idx == 0: continue # skip header\n if six.PY2:\n line = unicode(line.strip(), \"utf-8\")\n else:\n line = line.strip().decode(\"utf-8\")\n l, id1, id2, s1, s2 = line.split(\"\\t\")\n if dev_ids and [id1, id2] not in dev_ids:\n continue\n inputs = [[s1, s2], [s2, s1]]\n for inp in inputs:\n yield {\n \"inputs\": inp,\n \"label\": int(l)\n }\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n mrpc_dir = self._maybe_download_corpora(tmp_dir)\n filesplit = \"msr_paraphrase_train.txt\"\n dev_ids = []\n if dataset_split != problem.DatasetSplit.TRAIN:\n for row in tf.gfile.Open(os.path.join(mrpc_dir, \"dev_ids.tsv\")):\n dev_ids.append(row.strip().split(\"\\t\"))\n\n filename = os.path.join(mrpc_dir, filesplit)\n for example in self.example_generator(filename, dev_ids):\n yield example\n\n\[email protected]_problem\nclass MSRParaphraseCorpusCharacters(MSRParaphraseCorpus):\n \"\"\"MSR Paraphrase Identification problems, character level\"\"\"\n\n @property\n def vocab_type(self):\n return text_problems.VocabType.CHARACTER\n\n def global_task_id(self):\n return problem.TaskID.EN_SIM\n", "path": "tensor2tensor/data_generators/mrpc.py"}]} | 2,883 | 269 |
gh_patches_debug_21586 | rasdani/github-patches | git_diff | CTFd__CTFd-278 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Searching teams from the admin panel
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/admin/teams.py`
Content:
```
1 from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint
2 from CTFd.utils import admins_only, is_admin, cache
3 from CTFd.models import db, Teams, Solves, Awards, Unlocks, Containers, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, DatabaseError
4 from passlib.hash import bcrypt_sha256
5 from sqlalchemy.sql import not_
6
7 from CTFd import utils
8
9 admin_teams = Blueprint('admin_teams', __name__)
10
11
12 @admin_teams.route('/admin/teams', defaults={'page': '1'})
13 @admin_teams.route('/admin/teams/<int:page>')
14 @admins_only
15 def admin_teams_view(page):
16 page = abs(int(page))
17 results_per_page = 50
18 page_start = results_per_page * (page - 1)
19 page_end = results_per_page * (page - 1) + results_per_page
20
21 teams = Teams.query.order_by(Teams.id.asc()).slice(page_start, page_end).all()
22 count = db.session.query(db.func.count(Teams.id)).first()[0]
23 pages = int(count / results_per_page) + (count % results_per_page > 0)
24 return render_template('admin/teams.html', teams=teams, pages=pages, curr_page=page)
25
26
27 @admin_teams.route('/admin/team/<int:teamid>', methods=['GET', 'POST'])
28 @admins_only
29 def admin_team(teamid):
30 user = Teams.query.filter_by(id=teamid).first_or_404()
31
32 if request.method == 'GET':
33 solves = Solves.query.filter_by(teamid=teamid).all()
34 solve_ids = [s.chalid for s in solves]
35 missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()
36 last_seen = db.func.max(Tracking.date).label('last_seen')
37 addrs = db.session.query(Tracking.ip, last_seen) \
38 .filter_by(team=teamid) \
39 .group_by(Tracking.ip) \
40 .order_by(last_seen.desc()).all()
41 wrong_keys = WrongKeys.query.filter_by(teamid=teamid).order_by(WrongKeys.date.asc()).all()
42 awards = Awards.query.filter_by(teamid=teamid).order_by(Awards.date.asc()).all()
43 score = user.score(admin=True)
44 place = user.place(admin=True)
45 return render_template('admin/team.html', solves=solves, team=user, addrs=addrs, score=score, missing=missing,
46 place=place, wrong_keys=wrong_keys, awards=awards)
47 elif request.method == 'POST':
48 admin_user = request.form.get('admin', None)
49 if admin_user:
50 admin_user = True if admin_user == 'true' else False
51 user.admin = admin_user
52 # Set user.banned to hide admins from scoreboard
53 user.banned = admin_user
54 db.session.commit()
55 db.session.close()
56 return jsonify({'data': ['success']})
57
58 verified = request.form.get('verified', None)
59 if verified:
60 verified = True if verified == 'true' else False
61 user.verified = verified
62 db.session.commit()
63 db.session.close()
64 return jsonify({'data': ['success']})
65
66 name = request.form.get('name', None)
67 password = request.form.get('password', None)
68 email = request.form.get('email', None)
69 website = request.form.get('website', None)
70 affiliation = request.form.get('affiliation', None)
71 country = request.form.get('country', None)
72
73 errors = []
74
75 name_used = Teams.query.filter(Teams.name == name).first()
76 if name_used and int(name_used.id) != int(teamid):
77 errors.append('That name is taken')
78
79 email_used = Teams.query.filter(Teams.email == email).first()
80 if email_used and int(email_used.id) != int(teamid):
81 errors.append('That email is taken')
82
83 if errors:
84 db.session.close()
85 return jsonify({'data': errors})
86 else:
87 user.name = name
88 if email:
89 user.email = email
90 if password:
91 user.password = bcrypt_sha256.encrypt(password)
92 user.website = website
93 user.affiliation = affiliation
94 user.country = country
95 db.session.commit()
96 db.session.close()
97 return jsonify({'data': ['success']})
98
99
100 @admin_teams.route('/admin/team/<int:teamid>/mail', methods=['POST'])
101 @admins_only
102 def email_user(teamid):
103 message = request.form.get('msg', None)
104 team = Teams.query.filter(Teams.id == teamid).first()
105 if message and team:
106 if utils.sendmail(team.email, message):
107 return '1'
108 return '0'
109
110
111 @admin_teams.route('/admin/team/<int:teamid>/ban', methods=['POST'])
112 @admins_only
113 def ban(teamid):
114 user = Teams.query.filter_by(id=teamid).first_or_404()
115 user.banned = True
116 db.session.commit()
117 db.session.close()
118 return redirect(url_for('admin_scoreboard.admin_scoreboard_view'))
119
120
121 @admin_teams.route('/admin/team/<int:teamid>/unban', methods=['POST'])
122 @admins_only
123 def unban(teamid):
124 user = Teams.query.filter_by(id=teamid).first_or_404()
125 user.banned = False
126 db.session.commit()
127 db.session.close()
128 return redirect(url_for('admin_scoreboard.admin_scoreboard_view'))
129
130
131 @admin_teams.route('/admin/team/<int:teamid>/delete', methods=['POST'])
132 @admins_only
133 def delete_team(teamid):
134 try:
135 Unlocks.query.filter_by(teamid=teamid).delete()
136 Awards.query.filter_by(teamid=teamid).delete()
137 WrongKeys.query.filter_by(teamid=teamid).delete()
138 Solves.query.filter_by(teamid=teamid).delete()
139 Tracking.query.filter_by(team=teamid).delete()
140 Teams.query.filter_by(id=teamid).delete()
141 db.session.commit()
142 db.session.close()
143 except DatabaseError:
144 return '0'
145 else:
146 return '1'
147
148
149 @admin_teams.route('/admin/solves/<teamid>', methods=['GET'])
150 @admins_only
151 def admin_solves(teamid="all"):
152 if teamid == "all":
153 solves = Solves.query.all()
154 else:
155 solves = Solves.query.filter_by(teamid=teamid).all()
156 awards = Awards.query.filter_by(teamid=teamid).all()
157 db.session.close()
158 json_data = {'solves': []}
159 for x in solves:
160 json_data['solves'].append({
161 'id': x.id,
162 'chal': x.chal.name,
163 'chalid': x.chalid,
164 'team': x.teamid,
165 'value': x.chal.value,
166 'category': x.chal.category,
167 'time': utils.unix_time(x.date)
168 })
169 for award in awards:
170 json_data['solves'].append({
171 'chal': award.name,
172 'chalid': None,
173 'team': award.teamid,
174 'value': award.value,
175 'category': award.category or "Award",
176 'time': utils.unix_time(award.date)
177 })
178 json_data['solves'].sort(key=lambda k: k['time'])
179 return jsonify(json_data)
180
181
182 @admin_teams.route('/admin/fails/all', defaults={'teamid': 'all'}, methods=['GET'])
183 @admin_teams.route('/admin/fails/<int:teamid>', methods=['GET'])
184 @admins_only
185 def admin_fails(teamid):
186 if teamid == "all":
187 fails = WrongKeys.query.join(Teams, WrongKeys.teamid == Teams.id).filter(Teams.banned == False).count()
188 solves = Solves.query.join(Teams, Solves.teamid == Teams.id).filter(Teams.banned == False).count()
189 db.session.close()
190 json_data = {'fails': str(fails), 'solves': str(solves)}
191 return jsonify(json_data)
192 else:
193 fails = WrongKeys.query.filter_by(teamid=teamid).count()
194 solves = Solves.query.filter_by(teamid=teamid).count()
195 db.session.close()
196 json_data = {'fails': str(fails), 'solves': str(solves)}
197 return jsonify(json_data)
198
199
200 @admin_teams.route('/admin/solves/<int:teamid>/<int:chalid>/solve', methods=['POST'])
201 @admins_only
202 def create_solve(teamid, chalid):
203 solve = Solves(chalid=chalid, teamid=teamid, ip='127.0.0.1', flag='MARKED_AS_SOLVED_BY_ADMIN')
204 db.session.add(solve)
205 db.session.commit()
206 db.session.close()
207 return '1'
208
209
210 @admin_teams.route('/admin/solves/<int:keyid>/delete', methods=['POST'])
211 @admins_only
212 def delete_solve(keyid):
213 solve = Solves.query.filter_by(id=keyid).first_or_404()
214 db.session.delete(solve)
215 db.session.commit()
216 db.session.close()
217 return '1'
218
219
220 @admin_teams.route('/admin/wrong_keys/<int:keyid>/delete', methods=['POST'])
221 @admins_only
222 def delete_wrong_key(keyid):
223 wrong_key = WrongKeys.query.filter_by(id=keyid).first_or_404()
224 db.session.delete(wrong_key)
225 db.session.commit()
226 db.session.close()
227 return '1'
228
229
230 @admin_teams.route('/admin/awards/<int:award_id>/delete', methods=['POST'])
231 @admins_only
232 def delete_award(award_id):
233 award = Awards.query.filter_by(id=award_id).first_or_404()
234 db.session.delete(award)
235 db.session.commit()
236 db.session.close()
237 return '1'
238
239
240 @admin_teams.route('/admin/teams/<int:teamid>/awards', methods=['GET'])
241 @admins_only
242 def admin_awards(teamid):
243 awards = Awards.query.filter_by(teamid=teamid).all()
244
245 awards_list = []
246 for award in awards:
247 awards_list.append({
248 'id': award.id,
249 'name': award.name,
250 'description': award.description,
251 'date': award.date,
252 'value': award.value,
253 'category': award.category,
254 'icon': award.icon
255 })
256 json_data = {'awards': awards_list}
257 return jsonify(json_data)
258
259
260 @admin_teams.route('/admin/awards/add', methods=['POST'])
261 @admins_only
262 def create_award():
263 try:
264 teamid = request.form['teamid']
265 name = request.form.get('name', 'Award')
266 value = request.form.get('value', 0)
267 award = Awards(teamid, name, value)
268 award.description = request.form.get('description')
269 award.category = request.form.get('category')
270 db.session.add(award)
271 db.session.commit()
272 db.session.close()
273 return '1'
274 except Exception as e:
275 print(e)
276 return '0'
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/admin/teams.py b/CTFd/admin/teams.py
--- a/CTFd/admin/teams.py
+++ b/CTFd/admin/teams.py
@@ -13,6 +13,27 @@
@admin_teams.route('/admin/teams/<int:page>')
@admins_only
def admin_teams_view(page):
+ q = request.args.get('q')
+ if q:
+ field = request.args.get('field')
+ teams = []
+ errors = []
+ if field == 'id':
+ if q.isnumeric():
+ teams = Teams.query.filter(Teams.id == q).order_by(Teams.id.asc()).all()
+ else:
+ teams = []
+ errors.append('Your ID search term is not numeric')
+ elif field == 'name':
+ teams = Teams.query.filter(Teams.name.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()
+ elif field == 'email':
+ teams = Teams.query.filter(Teams.email.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()
+ elif field == 'affiliation':
+ teams = Teams.query.filter(Teams.affiliation.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()
+ elif field == 'country':
+ teams = Teams.query.filter(Teams.country.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()
+ return render_template('admin/teams.html', teams=teams, pages=None, curr_page=None, q=q, field=field)
+
page = abs(int(page))
results_per_page = 50
page_start = results_per_page * (page - 1)
| {"golden_diff": "diff --git a/CTFd/admin/teams.py b/CTFd/admin/teams.py\n--- a/CTFd/admin/teams.py\n+++ b/CTFd/admin/teams.py\n@@ -13,6 +13,27 @@\n @admin_teams.route('/admin/teams/<int:page>')\n @admins_only\n def admin_teams_view(page):\n+ q = request.args.get('q')\n+ if q:\n+ field = request.args.get('field')\n+ teams = []\n+ errors = []\n+ if field == 'id':\n+ if q.isnumeric():\n+ teams = Teams.query.filter(Teams.id == q).order_by(Teams.id.asc()).all()\n+ else:\n+ teams = []\n+ errors.append('Your ID search term is not numeric')\n+ elif field == 'name':\n+ teams = Teams.query.filter(Teams.name.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()\n+ elif field == 'email':\n+ teams = Teams.query.filter(Teams.email.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()\n+ elif field == 'affiliation':\n+ teams = Teams.query.filter(Teams.affiliation.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()\n+ elif field == 'country':\n+ teams = Teams.query.filter(Teams.country.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()\n+ return render_template('admin/teams.html', teams=teams, pages=None, curr_page=None, q=q, field=field)\n+\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n", "issue": "Searching teams from the admin panel\n\n", "before_files": [{"content": "from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint\nfrom CTFd.utils import admins_only, is_admin, cache\nfrom CTFd.models import db, Teams, Solves, Awards, Unlocks, Containers, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, DatabaseError\nfrom passlib.hash import bcrypt_sha256\nfrom sqlalchemy.sql import not_\n\nfrom CTFd import utils\n\nadmin_teams = Blueprint('admin_teams', __name__)\n\n\n@admin_teams.route('/admin/teams', defaults={'page': '1'})\n@admin_teams.route('/admin/teams/<int:page>')\n@admins_only\ndef admin_teams_view(page):\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n teams = Teams.query.order_by(Teams.id.asc()).slice(page_start, page_end).all()\n count = db.session.query(db.func.count(Teams.id)).first()[0]\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n return render_template('admin/teams.html', teams=teams, pages=pages, curr_page=page)\n\n\n@admin_teams.route('/admin/team/<int:teamid>', methods=['GET', 'POST'])\n@admins_only\ndef admin_team(teamid):\n user = Teams.query.filter_by(id=teamid).first_or_404()\n\n if request.method == 'GET':\n solves = Solves.query.filter_by(teamid=teamid).all()\n solve_ids = [s.chalid for s in solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n last_seen = db.func.max(Tracking.date).label('last_seen')\n addrs = db.session.query(Tracking.ip, last_seen) \\\n .filter_by(team=teamid) \\\n .group_by(Tracking.ip) \\\n .order_by(last_seen.desc()).all()\n wrong_keys = WrongKeys.query.filter_by(teamid=teamid).order_by(WrongKeys.date.asc()).all()\n awards = Awards.query.filter_by(teamid=teamid).order_by(Awards.date.asc()).all()\n score = user.score(admin=True)\n place = user.place(admin=True)\n return render_template('admin/team.html', solves=solves, team=user, addrs=addrs, score=score, missing=missing,\n place=place, wrong_keys=wrong_keys, awards=awards)\n elif request.method == 'POST':\n admin_user = request.form.get('admin', None)\n if admin_user:\n admin_user = True if admin_user == 'true' else False\n user.admin = admin_user\n # Set user.banned to hide admins from scoreboard\n user.banned = admin_user\n db.session.commit()\n db.session.close()\n return jsonify({'data': ['success']})\n\n verified = request.form.get('verified', None)\n if verified:\n verified = True if verified == 'true' else False\n user.verified = verified\n db.session.commit()\n db.session.close()\n return jsonify({'data': ['success']})\n\n name = request.form.get('name', None)\n password = request.form.get('password', None)\n email = request.form.get('email', None)\n website = request.form.get('website', None)\n affiliation = request.form.get('affiliation', None)\n country = request.form.get('country', None)\n\n errors = []\n\n name_used = Teams.query.filter(Teams.name == name).first()\n if name_used and int(name_used.id) != int(teamid):\n errors.append('That name is taken')\n\n email_used = Teams.query.filter(Teams.email == email).first()\n if email_used and int(email_used.id) != int(teamid):\n errors.append('That email is taken')\n\n if errors:\n db.session.close()\n return jsonify({'data': errors})\n else:\n user.name = name\n if email:\n user.email = email\n if password:\n user.password = bcrypt_sha256.encrypt(password)\n user.website = website\n user.affiliation = affiliation\n user.country = country\n db.session.commit()\n db.session.close()\n return jsonify({'data': ['success']})\n\n\n@admin_teams.route('/admin/team/<int:teamid>/mail', methods=['POST'])\n@admins_only\ndef email_user(teamid):\n message = request.form.get('msg', None)\n team = Teams.query.filter(Teams.id == teamid).first()\n if message and team:\n if utils.sendmail(team.email, message):\n return '1'\n return '0'\n\n\n@admin_teams.route('/admin/team/<int:teamid>/ban', methods=['POST'])\n@admins_only\ndef ban(teamid):\n user = Teams.query.filter_by(id=teamid).first_or_404()\n user.banned = True\n db.session.commit()\n db.session.close()\n return redirect(url_for('admin_scoreboard.admin_scoreboard_view'))\n\n\n@admin_teams.route('/admin/team/<int:teamid>/unban', methods=['POST'])\n@admins_only\ndef unban(teamid):\n user = Teams.query.filter_by(id=teamid).first_or_404()\n user.banned = False\n db.session.commit()\n db.session.close()\n return redirect(url_for('admin_scoreboard.admin_scoreboard_view'))\n\n\n@admin_teams.route('/admin/team/<int:teamid>/delete', methods=['POST'])\n@admins_only\ndef delete_team(teamid):\n try:\n Unlocks.query.filter_by(teamid=teamid).delete()\n Awards.query.filter_by(teamid=teamid).delete()\n WrongKeys.query.filter_by(teamid=teamid).delete()\n Solves.query.filter_by(teamid=teamid).delete()\n Tracking.query.filter_by(team=teamid).delete()\n Teams.query.filter_by(id=teamid).delete()\n db.session.commit()\n db.session.close()\n except DatabaseError:\n return '0'\n else:\n return '1'\n\n\n@admin_teams.route('/admin/solves/<teamid>', methods=['GET'])\n@admins_only\ndef admin_solves(teamid=\"all\"):\n if teamid == \"all\":\n solves = Solves.query.all()\n else:\n solves = Solves.query.filter_by(teamid=teamid).all()\n awards = Awards.query.filter_by(teamid=teamid).all()\n db.session.close()\n json_data = {'solves': []}\n for x in solves:\n json_data['solves'].append({\n 'id': x.id,\n 'chal': x.chal.name,\n 'chalid': x.chalid,\n 'team': x.teamid,\n 'value': x.chal.value,\n 'category': x.chal.category,\n 'time': utils.unix_time(x.date)\n })\n for award in awards:\n json_data['solves'].append({\n 'chal': award.name,\n 'chalid': None,\n 'team': award.teamid,\n 'value': award.value,\n 'category': award.category or \"Award\",\n 'time': utils.unix_time(award.date)\n })\n json_data['solves'].sort(key=lambda k: k['time'])\n return jsonify(json_data)\n\n\n@admin_teams.route('/admin/fails/all', defaults={'teamid': 'all'}, methods=['GET'])\n@admin_teams.route('/admin/fails/<int:teamid>', methods=['GET'])\n@admins_only\ndef admin_fails(teamid):\n if teamid == \"all\":\n fails = WrongKeys.query.join(Teams, WrongKeys.teamid == Teams.id).filter(Teams.banned == False).count()\n solves = Solves.query.join(Teams, Solves.teamid == Teams.id).filter(Teams.banned == False).count()\n db.session.close()\n json_data = {'fails': str(fails), 'solves': str(solves)}\n return jsonify(json_data)\n else:\n fails = WrongKeys.query.filter_by(teamid=teamid).count()\n solves = Solves.query.filter_by(teamid=teamid).count()\n db.session.close()\n json_data = {'fails': str(fails), 'solves': str(solves)}\n return jsonify(json_data)\n\n\n@admin_teams.route('/admin/solves/<int:teamid>/<int:chalid>/solve', methods=['POST'])\n@admins_only\ndef create_solve(teamid, chalid):\n solve = Solves(chalid=chalid, teamid=teamid, ip='127.0.0.1', flag='MARKED_AS_SOLVED_BY_ADMIN')\n db.session.add(solve)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_teams.route('/admin/solves/<int:keyid>/delete', methods=['POST'])\n@admins_only\ndef delete_solve(keyid):\n solve = Solves.query.filter_by(id=keyid).first_or_404()\n db.session.delete(solve)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_teams.route('/admin/wrong_keys/<int:keyid>/delete', methods=['POST'])\n@admins_only\ndef delete_wrong_key(keyid):\n wrong_key = WrongKeys.query.filter_by(id=keyid).first_or_404()\n db.session.delete(wrong_key)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_teams.route('/admin/awards/<int:award_id>/delete', methods=['POST'])\n@admins_only\ndef delete_award(award_id):\n award = Awards.query.filter_by(id=award_id).first_or_404()\n db.session.delete(award)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_teams.route('/admin/teams/<int:teamid>/awards', methods=['GET'])\n@admins_only\ndef admin_awards(teamid):\n awards = Awards.query.filter_by(teamid=teamid).all()\n\n awards_list = []\n for award in awards:\n awards_list.append({\n 'id': award.id,\n 'name': award.name,\n 'description': award.description,\n 'date': award.date,\n 'value': award.value,\n 'category': award.category,\n 'icon': award.icon\n })\n json_data = {'awards': awards_list}\n return jsonify(json_data)\n\n\n@admin_teams.route('/admin/awards/add', methods=['POST'])\n@admins_only\ndef create_award():\n try:\n teamid = request.form['teamid']\n name = request.form.get('name', 'Award')\n value = request.form.get('value', 0)\n award = Awards(teamid, name, value)\n award.description = request.form.get('description')\n award.category = request.form.get('category')\n db.session.add(award)\n db.session.commit()\n db.session.close()\n return '1'\n except Exception as e:\n print(e)\n return '0'\n", "path": "CTFd/admin/teams.py"}], "after_files": [{"content": "from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint\nfrom CTFd.utils import admins_only, is_admin, cache\nfrom CTFd.models import db, Teams, Solves, Awards, Unlocks, Containers, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, DatabaseError\nfrom passlib.hash import bcrypt_sha256\nfrom sqlalchemy.sql import not_\n\nfrom CTFd import utils\n\nadmin_teams = Blueprint('admin_teams', __name__)\n\n\n@admin_teams.route('/admin/teams', defaults={'page': '1'})\n@admin_teams.route('/admin/teams/<int:page>')\n@admins_only\ndef admin_teams_view(page):\n q = request.args.get('q')\n if q:\n field = request.args.get('field')\n teams = []\n errors = []\n if field == 'id':\n if q.isnumeric():\n teams = Teams.query.filter(Teams.id == q).order_by(Teams.id.asc()).all()\n else:\n teams = []\n errors.append('Your ID search term is not numeric')\n elif field == 'name':\n teams = Teams.query.filter(Teams.name.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()\n elif field == 'email':\n teams = Teams.query.filter(Teams.email.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()\n elif field == 'affiliation':\n teams = Teams.query.filter(Teams.affiliation.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()\n elif field == 'country':\n teams = Teams.query.filter(Teams.country.like('%{}%'.format(q))).order_by(Teams.id.asc()).all()\n return render_template('admin/teams.html', teams=teams, pages=None, curr_page=None, q=q, field=field)\n\n page = abs(int(page))\n results_per_page = 50\n page_start = results_per_page * (page - 1)\n page_end = results_per_page * (page - 1) + results_per_page\n\n teams = Teams.query.order_by(Teams.id.asc()).slice(page_start, page_end).all()\n count = db.session.query(db.func.count(Teams.id)).first()[0]\n pages = int(count / results_per_page) + (count % results_per_page > 0)\n return render_template('admin/teams.html', teams=teams, pages=pages, curr_page=page)\n\n\n@admin_teams.route('/admin/team/<int:teamid>', methods=['GET', 'POST'])\n@admins_only\ndef admin_team(teamid):\n user = Teams.query.filter_by(id=teamid).first_or_404()\n\n if request.method == 'GET':\n solves = Solves.query.filter_by(teamid=teamid).all()\n solve_ids = [s.chalid for s in solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n last_seen = db.func.max(Tracking.date).label('last_seen')\n addrs = db.session.query(Tracking.ip, last_seen) \\\n .filter_by(team=teamid) \\\n .group_by(Tracking.ip) \\\n .order_by(last_seen.desc()).all()\n wrong_keys = WrongKeys.query.filter_by(teamid=teamid).order_by(WrongKeys.date.asc()).all()\n awards = Awards.query.filter_by(teamid=teamid).order_by(Awards.date.asc()).all()\n score = user.score(admin=True)\n place = user.place(admin=True)\n return render_template('admin/team.html', solves=solves, team=user, addrs=addrs, score=score, missing=missing,\n place=place, wrong_keys=wrong_keys, awards=awards)\n elif request.method == 'POST':\n admin_user = request.form.get('admin', None)\n if admin_user:\n admin_user = True if admin_user == 'true' else False\n user.admin = admin_user\n # Set user.banned to hide admins from scoreboard\n user.banned = admin_user\n db.session.commit()\n db.session.close()\n return jsonify({'data': ['success']})\n\n verified = request.form.get('verified', None)\n if verified:\n verified = True if verified == 'true' else False\n user.verified = verified\n db.session.commit()\n db.session.close()\n return jsonify({'data': ['success']})\n\n name = request.form.get('name', None)\n password = request.form.get('password', None)\n email = request.form.get('email', None)\n website = request.form.get('website', None)\n affiliation = request.form.get('affiliation', None)\n country = request.form.get('country', None)\n\n errors = []\n\n name_used = Teams.query.filter(Teams.name == name).first()\n if name_used and int(name_used.id) != int(teamid):\n errors.append('That name is taken')\n\n email_used = Teams.query.filter(Teams.email == email).first()\n if email_used and int(email_used.id) != int(teamid):\n errors.append('That email is taken')\n\n if errors:\n db.session.close()\n return jsonify({'data': errors})\n else:\n user.name = name\n if email:\n user.email = email\n if password:\n user.password = bcrypt_sha256.encrypt(password)\n user.website = website\n user.affiliation = affiliation\n user.country = country\n db.session.commit()\n db.session.close()\n return jsonify({'data': ['success']})\n\n\n@admin_teams.route('/admin/team/<int:teamid>/mail', methods=['POST'])\n@admins_only\ndef email_user(teamid):\n message = request.form.get('msg', None)\n team = Teams.query.filter(Teams.id == teamid).first()\n if message and team:\n if utils.sendmail(team.email, message):\n return '1'\n return '0'\n\n\n@admin_teams.route('/admin/team/<int:teamid>/ban', methods=['POST'])\n@admins_only\ndef ban(teamid):\n user = Teams.query.filter_by(id=teamid).first_or_404()\n user.banned = True\n db.session.commit()\n db.session.close()\n return redirect(url_for('admin_scoreboard.admin_scoreboard_view'))\n\n\n@admin_teams.route('/admin/team/<int:teamid>/unban', methods=['POST'])\n@admins_only\ndef unban(teamid):\n user = Teams.query.filter_by(id=teamid).first_or_404()\n user.banned = False\n db.session.commit()\n db.session.close()\n return redirect(url_for('admin_scoreboard.admin_scoreboard_view'))\n\n\n@admin_teams.route('/admin/team/<int:teamid>/delete', methods=['POST'])\n@admins_only\ndef delete_team(teamid):\n try:\n Unlocks.query.filter_by(teamid=teamid).delete()\n Awards.query.filter_by(teamid=teamid).delete()\n WrongKeys.query.filter_by(teamid=teamid).delete()\n Solves.query.filter_by(teamid=teamid).delete()\n Tracking.query.filter_by(team=teamid).delete()\n Teams.query.filter_by(id=teamid).delete()\n db.session.commit()\n db.session.close()\n except DatabaseError:\n return '0'\n else:\n return '1'\n\n\n@admin_teams.route('/admin/solves/<teamid>', methods=['GET'])\n@admins_only\ndef admin_solves(teamid=\"all\"):\n if teamid == \"all\":\n solves = Solves.query.all()\n else:\n solves = Solves.query.filter_by(teamid=teamid).all()\n awards = Awards.query.filter_by(teamid=teamid).all()\n db.session.close()\n json_data = {'solves': []}\n for x in solves:\n json_data['solves'].append({\n 'id': x.id,\n 'chal': x.chal.name,\n 'chalid': x.chalid,\n 'team': x.teamid,\n 'value': x.chal.value,\n 'category': x.chal.category,\n 'time': utils.unix_time(x.date)\n })\n for award in awards:\n json_data['solves'].append({\n 'chal': award.name,\n 'chalid': None,\n 'team': award.teamid,\n 'value': award.value,\n 'category': award.category or \"Award\",\n 'time': utils.unix_time(award.date)\n })\n json_data['solves'].sort(key=lambda k: k['time'])\n return jsonify(json_data)\n\n\n@admin_teams.route('/admin/fails/all', defaults={'teamid': 'all'}, methods=['GET'])\n@admin_teams.route('/admin/fails/<int:teamid>', methods=['GET'])\n@admins_only\ndef admin_fails(teamid):\n if teamid == \"all\":\n fails = WrongKeys.query.join(Teams, WrongKeys.teamid == Teams.id).filter(Teams.banned == False).count()\n solves = Solves.query.join(Teams, Solves.teamid == Teams.id).filter(Teams.banned == False).count()\n db.session.close()\n json_data = {'fails': str(fails), 'solves': str(solves)}\n return jsonify(json_data)\n else:\n fails = WrongKeys.query.filter_by(teamid=teamid).count()\n solves = Solves.query.filter_by(teamid=teamid).count()\n db.session.close()\n json_data = {'fails': str(fails), 'solves': str(solves)}\n return jsonify(json_data)\n\n\n@admin_teams.route('/admin/solves/<int:teamid>/<int:chalid>/solve', methods=['POST'])\n@admins_only\ndef create_solve(teamid, chalid):\n solve = Solves(chalid=chalid, teamid=teamid, ip='127.0.0.1', flag='MARKED_AS_SOLVED_BY_ADMIN')\n db.session.add(solve)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_teams.route('/admin/solves/<int:keyid>/delete', methods=['POST'])\n@admins_only\ndef delete_solve(keyid):\n solve = Solves.query.filter_by(id=keyid).first_or_404()\n db.session.delete(solve)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_teams.route('/admin/wrong_keys/<int:keyid>/delete', methods=['POST'])\n@admins_only\ndef delete_wrong_key(keyid):\n wrong_key = WrongKeys.query.filter_by(id=keyid).first_or_404()\n db.session.delete(wrong_key)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_teams.route('/admin/awards/<int:award_id>/delete', methods=['POST'])\n@admins_only\ndef delete_award(award_id):\n award = Awards.query.filter_by(id=award_id).first_or_404()\n db.session.delete(award)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_teams.route('/admin/teams/<int:teamid>/awards', methods=['GET'])\n@admins_only\ndef admin_awards(teamid):\n awards = Awards.query.filter_by(teamid=teamid).all()\n\n awards_list = []\n for award in awards:\n awards_list.append({\n 'id': award.id,\n 'name': award.name,\n 'description': award.description,\n 'date': award.date,\n 'value': award.value,\n 'category': award.category,\n 'icon': award.icon\n })\n json_data = {'awards': awards_list}\n return jsonify(json_data)\n\n\n@admin_teams.route('/admin/awards/add', methods=['POST'])\n@admins_only\ndef create_award():\n try:\n teamid = request.form['teamid']\n name = request.form.get('name', 'Award')\n value = request.form.get('value', 0)\n award = Awards(teamid, name, value)\n award.description = request.form.get('description')\n award.category = request.form.get('category')\n db.session.add(award)\n db.session.commit()\n db.session.close()\n return '1'\n except Exception as e:\n print(e)\n return '0'\n", "path": "CTFd/admin/teams.py"}]} | 3,406 | 383 |
gh_patches_debug_9811 | rasdani/github-patches | git_diff | svthalia__concrexit-1966 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Saving food event with non-member registrations crashes trying to send push notification
Sentry Issue: [CONCREXIT-8P](https://sentry.io/organizations/thalia/issues/2613494929/?referrer=github_integration)
```
NotNullViolation: null value in column "user_id" of relation "pushnotifications_message_users" violates not-null constraint
DETAIL: Failing row contains (549181, 1156, null).
File "django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
IntegrityError: null value in column "user_id" of relation "pushnotifications_message_users" violates not-null constraint
DETAIL: Failing row contains (549181, 1156, null).
(26 additional frame(s) were not displayed)
...
File "django/db/backends/utils.py", line 66, in execute
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
File "django/db/backends/utils.py", line 75, in _execute_with_wrappers
return executor(sql, params, many, context)
File "django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
File "django/db/utils.py", line 90, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/pizzas/models.py`
Content:
```
1 """The models defined by the pizzas package."""
2 from django.core.exceptions import ValidationError, ObjectDoesNotExist
3 from django.db import models
4 from django.db.models import Q
5 from django.utils import timezone
6 from django.utils.translation import gettext_lazy as _
7
8 from events.models import Event
9 import members
10 from members.models import Member
11 from payments.models import Payment
12 from payments.services import delete_payment
13 from pushnotifications.models import ScheduledMessage, Category
14
15
16 class CurrentEventManager(models.Manager):
17 """Only shows available products."""
18
19 def get_queryset(self):
20 return (
21 super()
22 .get_queryset()
23 .filter(
24 end__gt=timezone.now() - timezone.timedelta(hours=8),
25 start__lte=timezone.now() + timezone.timedelta(hours=8),
26 )
27 )
28
29
30 class FoodEvent(models.Model):
31 """Describes an event where food can be ordered."""
32
33 objects = models.Manager()
34 current_objects = CurrentEventManager()
35
36 start = models.DateTimeField(_("Order from"))
37 end = models.DateTimeField(_("Order until"))
38 event = models.OneToOneField(
39 Event, on_delete=models.CASCADE, related_name="food_event"
40 )
41
42 send_notification = models.BooleanField(
43 _("Send an order notification"), default=True
44 )
45 end_reminder = models.OneToOneField(ScheduledMessage, models.CASCADE, null=True)
46
47 tpay_allowed = models.BooleanField(_("Allow Thalia Pay"), default=True)
48
49 @property
50 def title(self):
51 return self.event.title
52
53 @property
54 def in_the_future(self):
55 return self.start > timezone.now()
56
57 @property
58 def has_ended(self):
59 return self.end < timezone.now()
60
61 @property
62 def just_ended(self):
63 return (
64 self.has_ended and self.end + timezone.timedelta(hours=8) > timezone.now()
65 )
66
67 @classmethod
68 def current(cls):
69 """Get the currently relevant pizza event: the first one that starts within 8 hours from now."""
70 try:
71 events = FoodEvent.current_objects.order_by("start")
72 if events.count() > 1:
73 return events.exclude(end__lt=timezone.now()).first()
74 return events.get()
75 except FoodEvent.DoesNotExist:
76 return None
77
78 def __init__(self, *args, **kwargs):
79 super().__init__(*args, **kwargs)
80 self._end = self.end
81
82 def validate_unique(self, exclude=None):
83 super().validate_unique(exclude)
84 for other in FoodEvent.objects.filter(
85 Q(end__gte=self.start, end__lte=self.end)
86 | Q(start=self.start, start__lte=self.start)
87 ):
88 if other.pk == self.pk:
89 continue
90 raise ValidationError(
91 {
92 "start": _("This event cannot overlap with {}.").format(other),
93 "end": _("This event cannot overlap with {}.").format(other),
94 }
95 )
96
97 def clean(self):
98 super().clean()
99
100 if self.start >= self.end:
101 raise ValidationError(
102 {
103 "start": _("The start is after the end of this event."),
104 "end": _("The end is before the start of this event."),
105 }
106 )
107
108 def save(self, **kwargs):
109 if self.send_notification and not self.end_reminder:
110 end_reminder = ScheduledMessage()
111 end_reminder.title = f"{self.event.title}: Order food"
112 end_reminder.body = "You can order food for 10 more minutes"
113 end_reminder.category = Category.objects.get(key=Category.PIZZA)
114 end_reminder.time = self.end - timezone.timedelta(minutes=10)
115 end_reminder.save()
116
117 if self.event.registration_required:
118 end_reminder.users.set(
119 self.event.registrations.select_related("member").values_list(
120 "member", flat=True
121 )
122 )
123 else:
124 end_reminder.users.set(Member.current_members.all())
125
126 self.end_reminder = end_reminder
127 elif self.send_notification and self.end_reminder and self._end != self.end:
128 self.end_reminder.time = self.end
129 self.end_reminder.save()
130 elif not self.send_notification and self.end_reminder:
131 end_reminder = self.end_reminder
132 self.end_reminder = None
133 if not end_reminder.sent:
134 end_reminder.delete()
135
136 super().save(**kwargs)
137
138 def delete(self, using=None, keep_parents=False):
139 if self.end_reminder is not None and not self.end_reminder.sent:
140 self.end_reminder.delete()
141 return super().delete(using, keep_parents)
142
143 def __str__(self):
144 return "Food for " + str(self.event)
145
146 class Meta:
147 ordering = ("-start",)
148
149
150 class AvailableProductManager(models.Manager):
151 """Only shows available products."""
152
153 def get_queryset(self):
154 return super().get_queryset().filter(available=True)
155
156
157 class Product(models.Model):
158 """Describes a product."""
159
160 objects = models.Manager()
161 available_products = AvailableProductManager()
162
163 name = models.CharField(max_length=50)
164 description = models.TextField()
165 price = models.DecimalField(max_digits=5, decimal_places=2)
166 available = models.BooleanField(default=True)
167 restricted = models.BooleanField(
168 default=False,
169 help_text=_(
170 "Only allow to be ordered by people with the "
171 "'order restricted products' permission."
172 ),
173 )
174
175 def __str__(self):
176 return self.name
177
178 class Meta:
179 ordering = ("name",)
180 permissions = (("order_restricted_products", _("Order restricted products")),)
181
182
183 class FoodOrder(models.Model):
184 """Describes an order of an item during a food event."""
185
186 member = models.ForeignKey(
187 members.models.Member, on_delete=models.CASCADE, blank=True, null=True,
188 )
189
190 name = models.CharField(
191 verbose_name=_("name"),
192 max_length=50,
193 help_text=_("Use this for non-members"),
194 null=True,
195 blank=True,
196 )
197
198 payment = models.OneToOneField(
199 verbose_name=_("payment"),
200 to="payments.Payment",
201 related_name="food_order",
202 on_delete=models.SET_NULL,
203 blank=True,
204 null=True,
205 )
206
207 product = models.ForeignKey(
208 verbose_name=_("product"), to=Product, on_delete=models.PROTECT,
209 )
210
211 food_event = models.ForeignKey(
212 verbose_name=_("event"),
213 to=FoodEvent,
214 on_delete=models.CASCADE,
215 related_name="orders",
216 )
217
218 def clean(self):
219 if (self.member is None and not self.name) or (self.member and self.name):
220 raise ValidationError(
221 {
222 "member": _("Either specify a member or a name"),
223 "name": _("Either specify a member or a name"),
224 }
225 )
226
227 @property
228 def member_name(self):
229 if self.member is not None:
230 return self.member.get_full_name()
231 return self.name
232
233 @property
234 def member_last_name(self):
235 if self.member is not None:
236 return self.member.last_name
237 return " ".join(self.name.split(" ")[1:])
238
239 @property
240 def member_first_name(self):
241 if self.member is not None:
242 return self.member.first_name
243 return self.name.strip(" ").split(" ")[0]
244
245 @property
246 def can_be_changed(self):
247 try:
248 return (
249 not self.payment or self.payment.type == Payment.TPAY
250 ) and not self.food_event.has_ended
251 except ObjectDoesNotExist:
252 return False
253
254 def delete(self, using=None, keep_parents=False):
255 if self.payment is not None and self.can_be_changed:
256 delete_payment(self)
257 return super().delete(using, keep_parents)
258
259 class Meta:
260 unique_together = (
261 "food_event",
262 "member",
263 )
264
265 def __str__(self):
266 return _("Food order by {member_name}: {product}").format(
267 member_name=self.member_name, product=self.product
268 )
269
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/pizzas/models.py b/website/pizzas/models.py
--- a/website/pizzas/models.py
+++ b/website/pizzas/models.py
@@ -116,9 +116,9 @@
if self.event.registration_required:
end_reminder.users.set(
- self.event.registrations.select_related("member").values_list(
- "member", flat=True
- )
+ self.event.registrations.filter(member__isnull=False)
+ .select_related("member")
+ .values_list("member", flat=True)
)
else:
end_reminder.users.set(Member.current_members.all())
| {"golden_diff": "diff --git a/website/pizzas/models.py b/website/pizzas/models.py\n--- a/website/pizzas/models.py\n+++ b/website/pizzas/models.py\n@@ -116,9 +116,9 @@\n \n if self.event.registration_required:\n end_reminder.users.set(\n- self.event.registrations.select_related(\"member\").values_list(\n- \"member\", flat=True\n- )\n+ self.event.registrations.filter(member__isnull=False)\n+ .select_related(\"member\")\n+ .values_list(\"member\", flat=True)\n )\n else:\n end_reminder.users.set(Member.current_members.all())\n", "issue": "Saving food event with non-member registrations crashes trying to send push notification\n\nSentry Issue: [CONCREXIT-8P](https://sentry.io/organizations/thalia/issues/2613494929/?referrer=github_integration)\n\n```\nNotNullViolation: null value in column \"user_id\" of relation \"pushnotifications_message_users\" violates not-null constraint\nDETAIL: Failing row contains (549181, 1156, null).\n\n File \"django/db/backends/utils.py\", line 84, in _execute\n return self.cursor.execute(sql, params)\n\nIntegrityError: null value in column \"user_id\" of relation \"pushnotifications_message_users\" violates not-null constraint\nDETAIL: Failing row contains (549181, 1156, null).\n\n(26 additional frame(s) were not displayed)\n...\n File \"django/db/backends/utils.py\", line 66, in execute\n return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)\n File \"django/db/backends/utils.py\", line 75, in _execute_with_wrappers\n return executor(sql, params, many, context)\n File \"django/db/backends/utils.py\", line 84, in _execute\n return self.cursor.execute(sql, params)\n File \"django/db/utils.py\", line 90, in __exit__\n raise dj_exc_value.with_traceback(traceback) from exc_value\n File \"django/db/backends/utils.py\", line 84, in _execute\n return self.cursor.execute(sql, params)\n```\n", "before_files": [{"content": "\"\"\"The models defined by the pizzas package.\"\"\"\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom events.models import Event\nimport members\nfrom members.models import Member\nfrom payments.models import Payment\nfrom payments.services import delete_payment\nfrom pushnotifications.models import ScheduledMessage, Category\n\n\nclass CurrentEventManager(models.Manager):\n \"\"\"Only shows available products.\"\"\"\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .filter(\n end__gt=timezone.now() - timezone.timedelta(hours=8),\n start__lte=timezone.now() + timezone.timedelta(hours=8),\n )\n )\n\n\nclass FoodEvent(models.Model):\n \"\"\"Describes an event where food can be ordered.\"\"\"\n\n objects = models.Manager()\n current_objects = CurrentEventManager()\n\n start = models.DateTimeField(_(\"Order from\"))\n end = models.DateTimeField(_(\"Order until\"))\n event = models.OneToOneField(\n Event, on_delete=models.CASCADE, related_name=\"food_event\"\n )\n\n send_notification = models.BooleanField(\n _(\"Send an order notification\"), default=True\n )\n end_reminder = models.OneToOneField(ScheduledMessage, models.CASCADE, null=True)\n\n tpay_allowed = models.BooleanField(_(\"Allow Thalia Pay\"), default=True)\n\n @property\n def title(self):\n return self.event.title\n\n @property\n def in_the_future(self):\n return self.start > timezone.now()\n\n @property\n def has_ended(self):\n return self.end < timezone.now()\n\n @property\n def just_ended(self):\n return (\n self.has_ended and self.end + timezone.timedelta(hours=8) > timezone.now()\n )\n\n @classmethod\n def current(cls):\n \"\"\"Get the currently relevant pizza event: the first one that starts within 8 hours from now.\"\"\"\n try:\n events = FoodEvent.current_objects.order_by(\"start\")\n if events.count() > 1:\n return events.exclude(end__lt=timezone.now()).first()\n return events.get()\n except FoodEvent.DoesNotExist:\n return None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._end = self.end\n\n def validate_unique(self, exclude=None):\n super().validate_unique(exclude)\n for other in FoodEvent.objects.filter(\n Q(end__gte=self.start, end__lte=self.end)\n | Q(start=self.start, start__lte=self.start)\n ):\n if other.pk == self.pk:\n continue\n raise ValidationError(\n {\n \"start\": _(\"This event cannot overlap with {}.\").format(other),\n \"end\": _(\"This event cannot overlap with {}.\").format(other),\n }\n )\n\n def clean(self):\n super().clean()\n\n if self.start >= self.end:\n raise ValidationError(\n {\n \"start\": _(\"The start is after the end of this event.\"),\n \"end\": _(\"The end is before the start of this event.\"),\n }\n )\n\n def save(self, **kwargs):\n if self.send_notification and not self.end_reminder:\n end_reminder = ScheduledMessage()\n end_reminder.title = f\"{self.event.title}: Order food\"\n end_reminder.body = \"You can order food for 10 more minutes\"\n end_reminder.category = Category.objects.get(key=Category.PIZZA)\n end_reminder.time = self.end - timezone.timedelta(minutes=10)\n end_reminder.save()\n\n if self.event.registration_required:\n end_reminder.users.set(\n self.event.registrations.select_related(\"member\").values_list(\n \"member\", flat=True\n )\n )\n else:\n end_reminder.users.set(Member.current_members.all())\n\n self.end_reminder = end_reminder\n elif self.send_notification and self.end_reminder and self._end != self.end:\n self.end_reminder.time = self.end\n self.end_reminder.save()\n elif not self.send_notification and self.end_reminder:\n end_reminder = self.end_reminder\n self.end_reminder = None\n if not end_reminder.sent:\n end_reminder.delete()\n\n super().save(**kwargs)\n\n def delete(self, using=None, keep_parents=False):\n if self.end_reminder is not None and not self.end_reminder.sent:\n self.end_reminder.delete()\n return super().delete(using, keep_parents)\n\n def __str__(self):\n return \"Food for \" + str(self.event)\n\n class Meta:\n ordering = (\"-start\",)\n\n\nclass AvailableProductManager(models.Manager):\n \"\"\"Only shows available products.\"\"\"\n\n def get_queryset(self):\n return super().get_queryset().filter(available=True)\n\n\nclass Product(models.Model):\n \"\"\"Describes a product.\"\"\"\n\n objects = models.Manager()\n available_products = AvailableProductManager()\n\n name = models.CharField(max_length=50)\n description = models.TextField()\n price = models.DecimalField(max_digits=5, decimal_places=2)\n available = models.BooleanField(default=True)\n restricted = models.BooleanField(\n default=False,\n help_text=_(\n \"Only allow to be ordered by people with the \"\n \"'order restricted products' permission.\"\n ),\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = (\"name\",)\n permissions = ((\"order_restricted_products\", _(\"Order restricted products\")),)\n\n\nclass FoodOrder(models.Model):\n \"\"\"Describes an order of an item during a food event.\"\"\"\n\n member = models.ForeignKey(\n members.models.Member, on_delete=models.CASCADE, blank=True, null=True,\n )\n\n name = models.CharField(\n verbose_name=_(\"name\"),\n max_length=50,\n help_text=_(\"Use this for non-members\"),\n null=True,\n blank=True,\n )\n\n payment = models.OneToOneField(\n verbose_name=_(\"payment\"),\n to=\"payments.Payment\",\n related_name=\"food_order\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n\n product = models.ForeignKey(\n verbose_name=_(\"product\"), to=Product, on_delete=models.PROTECT,\n )\n\n food_event = models.ForeignKey(\n verbose_name=_(\"event\"),\n to=FoodEvent,\n on_delete=models.CASCADE,\n related_name=\"orders\",\n )\n\n def clean(self):\n if (self.member is None and not self.name) or (self.member and self.name):\n raise ValidationError(\n {\n \"member\": _(\"Either specify a member or a name\"),\n \"name\": _(\"Either specify a member or a name\"),\n }\n )\n\n @property\n def member_name(self):\n if self.member is not None:\n return self.member.get_full_name()\n return self.name\n\n @property\n def member_last_name(self):\n if self.member is not None:\n return self.member.last_name\n return \" \".join(self.name.split(\" \")[1:])\n\n @property\n def member_first_name(self):\n if self.member is not None:\n return self.member.first_name\n return self.name.strip(\" \").split(\" \")[0]\n\n @property\n def can_be_changed(self):\n try:\n return (\n not self.payment or self.payment.type == Payment.TPAY\n ) and not self.food_event.has_ended\n except ObjectDoesNotExist:\n return False\n\n def delete(self, using=None, keep_parents=False):\n if self.payment is not None and self.can_be_changed:\n delete_payment(self)\n return super().delete(using, keep_parents)\n\n class Meta:\n unique_together = (\n \"food_event\",\n \"member\",\n )\n\n def __str__(self):\n return _(\"Food order by {member_name}: {product}\").format(\n member_name=self.member_name, product=self.product\n )\n", "path": "website/pizzas/models.py"}], "after_files": [{"content": "\"\"\"The models defined by the pizzas package.\"\"\"\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom events.models import Event\nimport members\nfrom members.models import Member\nfrom payments.models import Payment\nfrom payments.services import delete_payment\nfrom pushnotifications.models import ScheduledMessage, Category\n\n\nclass CurrentEventManager(models.Manager):\n \"\"\"Only shows available products.\"\"\"\n\n def get_queryset(self):\n return (\n super()\n .get_queryset()\n .filter(\n end__gt=timezone.now() - timezone.timedelta(hours=8),\n start__lte=timezone.now() + timezone.timedelta(hours=8),\n )\n )\n\n\nclass FoodEvent(models.Model):\n \"\"\"Describes an event where food can be ordered.\"\"\"\n\n objects = models.Manager()\n current_objects = CurrentEventManager()\n\n start = models.DateTimeField(_(\"Order from\"))\n end = models.DateTimeField(_(\"Order until\"))\n event = models.OneToOneField(\n Event, on_delete=models.CASCADE, related_name=\"food_event\"\n )\n\n send_notification = models.BooleanField(\n _(\"Send an order notification\"), default=True\n )\n end_reminder = models.OneToOneField(ScheduledMessage, models.CASCADE, null=True)\n\n tpay_allowed = models.BooleanField(_(\"Allow Thalia Pay\"), default=True)\n\n @property\n def title(self):\n return self.event.title\n\n @property\n def in_the_future(self):\n return self.start > timezone.now()\n\n @property\n def has_ended(self):\n return self.end < timezone.now()\n\n @property\n def just_ended(self):\n return (\n self.has_ended and self.end + timezone.timedelta(hours=8) > timezone.now()\n )\n\n @classmethod\n def current(cls):\n \"\"\"Get the currently relevant pizza event: the first one that starts within 8 hours from now.\"\"\"\n try:\n events = FoodEvent.current_objects.order_by(\"start\")\n if events.count() > 1:\n return events.exclude(end__lt=timezone.now()).first()\n return events.get()\n except FoodEvent.DoesNotExist:\n return None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._end = self.end\n\n def validate_unique(self, exclude=None):\n super().validate_unique(exclude)\n for other in FoodEvent.objects.filter(\n Q(end__gte=self.start, end__lte=self.end)\n | Q(start=self.start, start__lte=self.start)\n ):\n if other.pk == self.pk:\n continue\n raise ValidationError(\n {\n \"start\": _(\"This event cannot overlap with {}.\").format(other),\n \"end\": _(\"This event cannot overlap with {}.\").format(other),\n }\n )\n\n def clean(self):\n super().clean()\n\n if self.start >= self.end:\n raise ValidationError(\n {\n \"start\": _(\"The start is after the end of this event.\"),\n \"end\": _(\"The end is before the start of this event.\"),\n }\n )\n\n def save(self, **kwargs):\n if self.send_notification and not self.end_reminder:\n end_reminder = ScheduledMessage()\n end_reminder.title = f\"{self.event.title}: Order food\"\n end_reminder.body = \"You can order food for 10 more minutes\"\n end_reminder.category = Category.objects.get(key=Category.PIZZA)\n end_reminder.time = self.end - timezone.timedelta(minutes=10)\n end_reminder.save()\n\n if self.event.registration_required:\n end_reminder.users.set(\n self.event.registrations.filter(member__isnull=False)\n .select_related(\"member\")\n .values_list(\"member\", flat=True)\n )\n else:\n end_reminder.users.set(Member.current_members.all())\n\n self.end_reminder = end_reminder\n elif self.send_notification and self.end_reminder and self._end != self.end:\n self.end_reminder.time = self.end\n self.end_reminder.save()\n elif not self.send_notification and self.end_reminder:\n end_reminder = self.end_reminder\n self.end_reminder = None\n if not end_reminder.sent:\n end_reminder.delete()\n\n super().save(**kwargs)\n\n def delete(self, using=None, keep_parents=False):\n if self.end_reminder is not None and not self.end_reminder.sent:\n self.end_reminder.delete()\n return super().delete(using, keep_parents)\n\n def __str__(self):\n return \"Food for \" + str(self.event)\n\n class Meta:\n ordering = (\"-start\",)\n\n\nclass AvailableProductManager(models.Manager):\n \"\"\"Only shows available products.\"\"\"\n\n def get_queryset(self):\n return super().get_queryset().filter(available=True)\n\n\nclass Product(models.Model):\n \"\"\"Describes a product.\"\"\"\n\n objects = models.Manager()\n available_products = AvailableProductManager()\n\n name = models.CharField(max_length=50)\n description = models.TextField()\n price = models.DecimalField(max_digits=5, decimal_places=2)\n available = models.BooleanField(default=True)\n restricted = models.BooleanField(\n default=False,\n help_text=_(\n \"Only allow to be ordered by people with the \"\n \"'order restricted products' permission.\"\n ),\n )\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = (\"name\",)\n permissions = ((\"order_restricted_products\", _(\"Order restricted products\")),)\n\n\nclass FoodOrder(models.Model):\n \"\"\"Describes an order of an item during a food event.\"\"\"\n\n member = models.ForeignKey(\n members.models.Member, on_delete=models.CASCADE, blank=True, null=True,\n )\n\n name = models.CharField(\n verbose_name=_(\"name\"),\n max_length=50,\n help_text=_(\"Use this for non-members\"),\n null=True,\n blank=True,\n )\n\n payment = models.OneToOneField(\n verbose_name=_(\"payment\"),\n to=\"payments.Payment\",\n related_name=\"food_order\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n\n product = models.ForeignKey(\n verbose_name=_(\"product\"), to=Product, on_delete=models.PROTECT,\n )\n\n food_event = models.ForeignKey(\n verbose_name=_(\"event\"),\n to=FoodEvent,\n on_delete=models.CASCADE,\n related_name=\"orders\",\n )\n\n def clean(self):\n if (self.member is None and not self.name) or (self.member and self.name):\n raise ValidationError(\n {\n \"member\": _(\"Either specify a member or a name\"),\n \"name\": _(\"Either specify a member or a name\"),\n }\n )\n\n @property\n def member_name(self):\n if self.member is not None:\n return self.member.get_full_name()\n return self.name\n\n @property\n def member_last_name(self):\n if self.member is not None:\n return self.member.last_name\n return \" \".join(self.name.split(\" \")[1:])\n\n @property\n def member_first_name(self):\n if self.member is not None:\n return self.member.first_name\n return self.name.strip(\" \").split(\" \")[0]\n\n @property\n def can_be_changed(self):\n try:\n return (\n not self.payment or self.payment.type == Payment.TPAY\n ) and not self.food_event.has_ended\n except ObjectDoesNotExist:\n return False\n\n def delete(self, using=None, keep_parents=False):\n if self.payment is not None and self.can_be_changed:\n delete_payment(self)\n return super().delete(using, keep_parents)\n\n class Meta:\n unique_together = (\n \"food_event\",\n \"member\",\n )\n\n def __str__(self):\n return _(\"Food order by {member_name}: {product}\").format(\n member_name=self.member_name, product=self.product\n )\n", "path": "website/pizzas/models.py"}]} | 3,001 | 140 |
gh_patches_debug_63143 | rasdani/github-patches | git_diff | frappe__frappe-12805 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
It is not possible to return 404 from portal pages
<!--
Welcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:
1. This tracker should only be used to report bugs and request features / enhancements to Frappe
- For questions and general support, use https://stackoverflow.com/questions/tagged/frappe
- For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
the original discussion.
3. When making a bug report, make sure you provide all required information. The easier it is for
maintainers to reproduce, the faster it'll be fixed.
4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR 😉
-->
## Description of the issue
There is no way to return 404 from the portal pages.
When enquired about it in the _Frappe Framework Public_ Telegram group, @netchampfaris suggested to `raise frappe.PageDoesNotExistError`. But that didn't work either.
It looks like the [render.py][1] is handling the `PageDoesNotExistError`, but the [context.py][2] is handling only DoesNotExistError. Because of this, raising none of those exceptions work.
[1]: https://github.com/frappe/frappe/blob/develop/frappe/website/render.py#L50
[2]: https://github.com/frappe/frappe/blob/develop/frappe/website/context.py#L61
## Context information (for bug reports)
**Output of `bench version`**
```
(paste here)
```
## Steps to reproduce the issue
1. Create a portal page with `raise frappe.PageDoesNotExist()` in the `get_context` function
2. Open that portal page in the browser
3. You'll see a portal page (or 500 if a variable that is defined after the raise statement is used) instead of 404
### Observed result
a portal page with 200 OK (or 500 if a variable that is defined after the raise statement is used)
### Expected result
404 Not Found
### Stacktrace / full error message
Not applicable.
## Additional information
OS version / distribution: Mac OS X 11.2.3
```
$ bench version
community 0.0.1
frappe 13.x.x-develop
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/website/context.py`
Content:
```
1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
2 # MIT License. See license.txt
3
4 from __future__ import unicode_literals
5 import frappe, os, json
6
7 from frappe.website.doctype.website_settings.website_settings import get_website_settings
8 from frappe.website.router import get_page_context
9 from frappe.model.document import Document
10
11 def get_context(path, args=None):
12 if args and args.source:
13 context = args
14 else:
15 context = get_page_context(path)
16 if args:
17 context.update(args)
18
19 if hasattr(frappe.local, 'request'):
20 # for <body data-path=""> (remove leading slash)
21 # path could be overriden in render.resolve_from_map
22 context["path"] = frappe.local.request.path.strip('/ ')
23 else:
24 context["path"] = path
25
26 context.canonical = frappe.utils.get_url(frappe.utils.escape_html(context.path))
27 context.route = context.path
28 context = build_context(context)
29
30 # set using frappe.respond_as_web_page
31 if hasattr(frappe.local, 'response') and frappe.local.response.get('context'):
32 context.update(frappe.local.response.context)
33
34 # to be able to inspect the context dict
35 # Use the macro "inspect" from macros.html
36 context._context_dict = context
37
38 context.developer_mode = frappe.conf.developer_mode
39
40 return context
41
42 def update_controller_context(context, controller):
43 module = frappe.get_module(controller)
44
45 if module:
46 # get config fields
47 for prop in ("base_template_path", "template", "no_cache", "sitemap",
48 "condition_field"):
49 if hasattr(module, prop):
50 context[prop] = getattr(module, prop)
51
52 if hasattr(module, "get_context"):
53 import inspect
54 try:
55 if inspect.getargspec(module.get_context).args:
56 ret = module.get_context(context)
57 else:
58 ret = module.get_context()
59 if ret:
60 context.update(ret)
61 except (frappe.PermissionError, frappe.DoesNotExistError, frappe.Redirect):
62 raise
63 except:
64 if not frappe.flags.in_migrate:
65 frappe.errprint(frappe.utils.get_traceback())
66
67 if hasattr(module, "get_children"):
68 context.children = module.get_children(context)
69
70
71 def build_context(context):
72 """get_context method of doc or module is supposed to render
73 content templates and push it into context"""
74 context = frappe._dict(context)
75
76 if not "url_prefix" in context:
77 context.url_prefix = ""
78
79 if context.url_prefix and context.url_prefix[-1]!='/':
80 context.url_prefix += '/'
81
82 # for backward compatibility
83 context.docs_base_url = '/docs'
84
85 context.update(get_website_settings(context))
86 context.update(frappe.local.conf.get("website_context") or {})
87
88 # provide doc
89 if context.doc:
90 context.update(context.doc.as_dict())
91 context.update(context.doc.get_website_properties())
92
93 if not context.template:
94 context.template = context.doc.meta.get_web_template()
95
96 if hasattr(context.doc, "get_context"):
97 ret = context.doc.get_context(context)
98
99 if ret:
100 context.update(ret)
101
102 for prop in ("no_cache", "sitemap"):
103 if not prop in context:
104 context[prop] = getattr(context.doc, prop, False)
105
106 elif context.controller:
107 # controller based context
108 update_controller_context(context, context.controller)
109
110 # controller context extensions
111 context_controller_hooks = frappe.get_hooks("extend_website_page_controller_context") or {}
112 for controller, extension in context_controller_hooks.items():
113 if isinstance(extension, list):
114 for ext in extension:
115 if controller == context.controller:
116 update_controller_context(context, ext)
117 else:
118 update_controller_context(context, extension)
119
120 add_metatags(context)
121 add_sidebar_and_breadcrumbs(context)
122
123 # determine templates to be used
124 if not context.base_template_path:
125 app_base = frappe.get_hooks("base_template")
126 context.base_template_path = app_base[-1] if app_base else "templates/base.html"
127
128 if context.title_prefix and context.title and not context.title.startswith(context.title_prefix):
129 context.title = '{0} - {1}'.format(context.title_prefix, context.title)
130
131 # apply context from hooks
132 update_website_context = frappe.get_hooks('update_website_context')
133 for method in update_website_context:
134 values = frappe.get_attr(method)(context)
135 if values:
136 context.update(values)
137
138 return context
139
140 def load_sidebar(context, sidebar_json_path):
141 with open(sidebar_json_path, 'r') as sidebarfile:
142 try:
143 sidebar_json = sidebarfile.read()
144 context.sidebar_items = json.loads(sidebar_json)
145 context.show_sidebar = 1
146 except json.decoder.JSONDecodeError:
147 frappe.throw('Invalid Sidebar JSON at ' + sidebar_json_path)
148
149 def get_sidebar_json_path(path, look_for=False):
150 '''
151 Get _sidebar.json path from directory path
152
153 :param path: path of the current diretory
154 :param look_for: if True, look for _sidebar.json going upwards from given path
155
156 :return: _sidebar.json path
157 '''
158 if os.path.split(path)[1] == 'www' or path == '/' or not path:
159 return ''
160
161 sidebar_json_path = os.path.join(path, '_sidebar.json')
162 if os.path.exists(sidebar_json_path):
163 return sidebar_json_path
164 else:
165 if look_for:
166 return get_sidebar_json_path(os.path.split(path)[0], look_for)
167 else:
168 return ''
169
170 def add_sidebar_and_breadcrumbs(context):
171 '''Add sidebar and breadcrumbs to context'''
172 from frappe.website.router import get_page_info_from_template
173 if context.show_sidebar:
174 context.no_cache = 1
175 add_sidebar_data(context)
176 else:
177 if context.basepath:
178 hooks = frappe.get_hooks('look_for_sidebar_json')
179 look_for_sidebar_json = hooks[0] if hooks else 0
180 sidebar_json_path = get_sidebar_json_path(
181 context.basepath,
182 look_for_sidebar_json
183 )
184 if sidebar_json_path:
185 load_sidebar(context, sidebar_json_path)
186
187 if context.add_breadcrumbs and not context.parents:
188 if context.basepath:
189 parent_path = os.path.dirname(context.path).rstrip('/')
190 page_info = get_page_info_from_template(parent_path)
191 if page_info:
192 context.parents = [dict(route=parent_path, title=page_info.title)]
193
194 def add_sidebar_data(context):
195 from frappe.utils.user import get_fullname_and_avatar
196 import frappe.www.list
197
198 if context.show_sidebar and context.website_sidebar:
199 context.sidebar_items = frappe.get_all('Website Sidebar Item',
200 filters=dict(parent=context.website_sidebar), fields=['title', 'route', '`group`'],
201 order_by='idx asc')
202
203 if not context.sidebar_items:
204 sidebar_items = frappe.cache().hget('portal_menu_items', frappe.session.user)
205 if sidebar_items == None:
206 sidebar_items = []
207 roles = frappe.get_roles()
208 portal_settings = frappe.get_doc('Portal Settings', 'Portal Settings')
209
210 def add_items(sidebar_items, items):
211 for d in items:
212 if d.get('enabled') and ((not d.get('role')) or d.get('role') in roles):
213 sidebar_items.append(d.as_dict() if isinstance(d, Document) else d)
214
215 if not portal_settings.hide_standard_menu:
216 add_items(sidebar_items, portal_settings.get('menu'))
217
218 if portal_settings.custom_menu:
219 add_items(sidebar_items, portal_settings.get('custom_menu'))
220
221 items_via_hooks = frappe.get_hooks('portal_menu_items')
222 if items_via_hooks:
223 for i in items_via_hooks: i['enabled'] = 1
224 add_items(sidebar_items, items_via_hooks)
225
226 frappe.cache().hset('portal_menu_items', frappe.session.user, sidebar_items)
227
228 context.sidebar_items = sidebar_items
229
230 info = get_fullname_and_avatar(frappe.session.user)
231 context["fullname"] = info.fullname
232 context["user_image"] = info.avatar
233 context["user"] = info.name
234
235
236 def add_metatags(context):
237 tags = frappe._dict(context.get("metatags") or {})
238
239 if "og:type" not in tags:
240 tags["og:type"] = "article"
241
242 if "title" not in tags and context.title:
243 tags["title"] = context.title
244
245 title = tags.get("name") or tags.get("title")
246 if title:
247 tags["og:title"] = tags["twitter:title"] = title
248 tags["twitter:card"] = "summary"
249
250 if "description" not in tags and context.description:
251 tags["description"] = context.description
252
253 description = tags.get("description")
254 if description:
255 tags["og:description"] = tags["twitter:description"] = description
256
257 if "image" not in tags and context.image:
258 tags["image"] = context.image
259
260 image = tags.get("image")
261 if image:
262 tags["og:image"] = tags["twitter:image"] = tags["image"] = frappe.utils.get_url(image)
263 tags['twitter:card'] = "summary_large_image"
264
265 if "author" not in tags and context.author:
266 tags["author"] = context.author
267
268 tags["og:url"] = tags["url"] = frappe.utils.get_url(context.path)
269
270 if "published_on" not in tags and context.published_on:
271 tags["published_on"] = context.published_on
272
273 if "published_on" in tags:
274 tags["datePublished"] = tags["published_on"]
275 del tags["published_on"]
276
277 tags["language"] = frappe.local.lang or "en"
278
279 # Get meta tags from Website Route meta
280 # they can override the defaults set above
281 route = context.path
282 if route == '':
283 # homepage
284 route = frappe.db.get_single_value('Website Settings', 'home_page')
285
286 route_exists = (route
287 and not route.endswith(('.js', '.css'))
288 and frappe.db.exists('Website Route Meta', route))
289
290 if route_exists:
291 website_route_meta = frappe.get_doc('Website Route Meta', route)
292 for meta_tag in website_route_meta.meta_tags:
293 d = meta_tag.get_meta_dict()
294 tags.update(d)
295
296 # update tags in context
297 context.metatags = tags
298
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/frappe/website/context.py b/frappe/website/context.py
--- a/frappe/website/context.py
+++ b/frappe/website/context.py
@@ -58,7 +58,7 @@
ret = module.get_context()
if ret:
context.update(ret)
- except (frappe.PermissionError, frappe.DoesNotExistError, frappe.Redirect):
+ except (frappe.PermissionError, frappe.PageDoesNotExistError, frappe.Redirect):
raise
except:
if not frappe.flags.in_migrate:
| {"golden_diff": "diff --git a/frappe/website/context.py b/frappe/website/context.py\n--- a/frappe/website/context.py\n+++ b/frappe/website/context.py\n@@ -58,7 +58,7 @@\n \t\t\t\t\tret = module.get_context()\n \t\t\t\tif ret:\n \t\t\t\t\tcontext.update(ret)\n-\t\t\texcept (frappe.PermissionError, frappe.DoesNotExistError, frappe.Redirect):\n+\t\t\texcept (frappe.PermissionError, frappe.PageDoesNotExistError, frappe.Redirect):\n \t\t\t\traise\n \t\t\texcept:\n \t\t\t\tif not frappe.flags.in_migrate:\n", "issue": "It is not possible to return 404 from portal pages\n<!--\r\nWelcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to Frappe\r\n - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe\r\n - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR \ud83d\ude09\r\n-->\r\n\r\n## Description of the issue\r\n\r\nThere is no way to return 404 from the portal pages. \r\n\r\nWhen enquired about it in the _Frappe Framework Public_ Telegram group, @netchampfaris suggested to `raise frappe.PageDoesNotExistError`. But that didn't work either. \r\n\r\nIt looks like the [render.py][1] is handling the `PageDoesNotExistError`, but the [context.py][2] is handling only DoesNotExistError. Because of this, raising none of those exceptions work.\r\n\r\n[1]: https://github.com/frappe/frappe/blob/develop/frappe/website/render.py#L50 \r\n[2]: https://github.com/frappe/frappe/blob/develop/frappe/website/context.py#L61\r\n\r\n## Context information (for bug reports)\r\n\r\n**Output of `bench version`**\r\n```\r\n(paste here)\r\n```\r\n\r\n## Steps to reproduce the issue\r\n\r\n1. Create a portal page with `raise frappe.PageDoesNotExist()` in the `get_context` function\r\n2. Open that portal page in the browser\r\n3. You'll see a portal page (or 500 if a variable that is defined after the raise statement is used) instead of 404\r\n\r\n### Observed result\r\n\r\na portal page with 200 OK (or 500 if a variable that is defined after the raise statement is used)\r\n\r\n### Expected result\r\n\r\n404 Not Found\r\n\r\n### Stacktrace / full error message\r\n\r\nNot applicable.\r\n\r\n## Additional information\r\n\r\nOS version / distribution: Mac OS X 11.2.3\r\n\r\n```\r\n$ bench version\r\ncommunity 0.0.1\r\nfrappe 13.x.x-develop\r\n```\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe, os, json\n\nfrom frappe.website.doctype.website_settings.website_settings import get_website_settings\nfrom frappe.website.router import get_page_context\nfrom frappe.model.document import Document\n\ndef get_context(path, args=None):\n\tif args and args.source:\n\t\tcontext = args\n\telse:\n\t\tcontext = get_page_context(path)\n\t\tif args:\n\t\t\tcontext.update(args)\n\n\tif hasattr(frappe.local, 'request'):\n\t\t# for <body data-path=\"\"> (remove leading slash)\n\t\t# path could be overriden in render.resolve_from_map\n\t\tcontext[\"path\"] = frappe.local.request.path.strip('/ ')\n\telse:\n\t\tcontext[\"path\"] = path\n\n\tcontext.canonical = frappe.utils.get_url(frappe.utils.escape_html(context.path))\n\tcontext.route = context.path\n\tcontext = build_context(context)\n\n\t# set using frappe.respond_as_web_page\n\tif hasattr(frappe.local, 'response') and frappe.local.response.get('context'):\n\t\tcontext.update(frappe.local.response.context)\n\n\t# to be able to inspect the context dict\n\t# Use the macro \"inspect\" from macros.html\n\tcontext._context_dict = context\n\n\tcontext.developer_mode = frappe.conf.developer_mode\n\n\treturn context\n\ndef update_controller_context(context, controller):\n\tmodule = frappe.get_module(controller)\n\n\tif module:\n\t\t# get config fields\n\t\tfor prop in (\"base_template_path\", \"template\", \"no_cache\", \"sitemap\",\n\t\t\t\"condition_field\"):\n\t\t\tif hasattr(module, prop):\n\t\t\t\tcontext[prop] = getattr(module, prop)\n\n\t\tif hasattr(module, \"get_context\"):\n\t\t\timport inspect\n\t\t\ttry:\n\t\t\t\tif inspect.getargspec(module.get_context).args:\n\t\t\t\t\tret = module.get_context(context)\n\t\t\t\telse:\n\t\t\t\t\tret = module.get_context()\n\t\t\t\tif ret:\n\t\t\t\t\tcontext.update(ret)\n\t\t\texcept (frappe.PermissionError, frappe.DoesNotExistError, frappe.Redirect):\n\t\t\t\traise\n\t\t\texcept:\n\t\t\t\tif not frappe.flags.in_migrate:\n\t\t\t\t\tfrappe.errprint(frappe.utils.get_traceback())\n\n\t\tif hasattr(module, \"get_children\"):\n\t\t\tcontext.children = module.get_children(context)\n\n\ndef build_context(context):\n\t\"\"\"get_context method of doc or module is supposed to render\n\t\tcontent templates and push it into context\"\"\"\n\tcontext = frappe._dict(context)\n\n\tif not \"url_prefix\" in context:\n\t\tcontext.url_prefix = \"\"\n\n\tif context.url_prefix and context.url_prefix[-1]!='/':\n\t\tcontext.url_prefix += '/'\n\n\t# for backward compatibility\n\tcontext.docs_base_url = '/docs'\n\n\tcontext.update(get_website_settings(context))\n\tcontext.update(frappe.local.conf.get(\"website_context\") or {})\n\n\t# provide doc\n\tif context.doc:\n\t\tcontext.update(context.doc.as_dict())\n\t\tcontext.update(context.doc.get_website_properties())\n\n\t\tif not context.template:\n\t\t\tcontext.template = context.doc.meta.get_web_template()\n\n\t\tif hasattr(context.doc, \"get_context\"):\n\t\t\tret = context.doc.get_context(context)\n\n\t\t\tif ret:\n\t\t\t\tcontext.update(ret)\n\n\t\tfor prop in (\"no_cache\", \"sitemap\"):\n\t\t\tif not prop in context:\n\t\t\t\tcontext[prop] = getattr(context.doc, prop, False)\n\n\telif context.controller:\n\t\t# controller based context\n\t\tupdate_controller_context(context, context.controller)\n\n\t\t# controller context extensions\n\t\tcontext_controller_hooks = frappe.get_hooks(\"extend_website_page_controller_context\") or {}\n\t\tfor controller, extension in context_controller_hooks.items():\n\t\t\tif isinstance(extension, list):\n\t\t\t\tfor ext in extension:\n\t\t\t\t\tif controller == context.controller:\n\t\t\t\t\t\tupdate_controller_context(context, ext)\n\t\t\telse:\n\t\t\t\tupdate_controller_context(context, extension)\n\n\tadd_metatags(context)\n\tadd_sidebar_and_breadcrumbs(context)\n\n\t# determine templates to be used\n\tif not context.base_template_path:\n\t\tapp_base = frappe.get_hooks(\"base_template\")\n\t\tcontext.base_template_path = app_base[-1] if app_base else \"templates/base.html\"\n\n\tif context.title_prefix and context.title and not context.title.startswith(context.title_prefix):\n\t\tcontext.title = '{0} - {1}'.format(context.title_prefix, context.title)\n\n\t# apply context from hooks\n\tupdate_website_context = frappe.get_hooks('update_website_context')\n\tfor method in update_website_context:\n\t\tvalues = frappe.get_attr(method)(context)\n\t\tif values:\n\t\t\tcontext.update(values)\n\n\treturn context\n\ndef load_sidebar(context, sidebar_json_path):\n\twith open(sidebar_json_path, 'r') as sidebarfile:\n\t\ttry:\n\t\t\tsidebar_json = sidebarfile.read()\n\t\t\tcontext.sidebar_items = json.loads(sidebar_json)\n\t\t\tcontext.show_sidebar = 1\n\t\texcept json.decoder.JSONDecodeError:\n\t\t\tfrappe.throw('Invalid Sidebar JSON at ' + sidebar_json_path)\n\ndef get_sidebar_json_path(path, look_for=False):\n\t'''\n\t\tGet _sidebar.json path from directory path\n\n\t\t:param path: path of the current diretory\n\t\t:param look_for: if True, look for _sidebar.json going upwards from given path\n\n\t\t:return: _sidebar.json path\n\t'''\n\tif os.path.split(path)[1] == 'www' or path == '/' or not path:\n\t\treturn ''\n\n\tsidebar_json_path = os.path.join(path, '_sidebar.json')\n\tif os.path.exists(sidebar_json_path):\n\t\treturn sidebar_json_path\n\telse:\n\t\tif look_for:\n\t\t\treturn get_sidebar_json_path(os.path.split(path)[0], look_for)\n\t\telse:\n\t\t\treturn ''\n\ndef add_sidebar_and_breadcrumbs(context):\n\t'''Add sidebar and breadcrumbs to context'''\n\tfrom frappe.website.router import get_page_info_from_template\n\tif context.show_sidebar:\n\t\tcontext.no_cache = 1\n\t\tadd_sidebar_data(context)\n\telse:\n\t\tif context.basepath:\n\t\t\thooks = frappe.get_hooks('look_for_sidebar_json')\n\t\t\tlook_for_sidebar_json = hooks[0] if hooks else 0\n\t\t\tsidebar_json_path = get_sidebar_json_path(\n\t\t\t\tcontext.basepath,\n\t\t\t\tlook_for_sidebar_json\n\t\t\t)\n\t\t\tif sidebar_json_path:\n\t\t\t\tload_sidebar(context, sidebar_json_path)\n\n\tif context.add_breadcrumbs and not context.parents:\n\t\tif context.basepath:\n\t\t\tparent_path = os.path.dirname(context.path).rstrip('/')\n\t\t\tpage_info = get_page_info_from_template(parent_path)\n\t\t\tif page_info:\n\t\t\t\tcontext.parents = [dict(route=parent_path, title=page_info.title)]\n\ndef add_sidebar_data(context):\n\tfrom frappe.utils.user import get_fullname_and_avatar\n\timport frappe.www.list\n\n\tif context.show_sidebar and context.website_sidebar:\n\t\tcontext.sidebar_items = frappe.get_all('Website Sidebar Item',\n\t\t\tfilters=dict(parent=context.website_sidebar), fields=['title', 'route', '`group`'],\n\t\t\torder_by='idx asc')\n\n\tif not context.sidebar_items:\n\t\tsidebar_items = frappe.cache().hget('portal_menu_items', frappe.session.user)\n\t\tif sidebar_items == None:\n\t\t\tsidebar_items = []\n\t\t\troles = frappe.get_roles()\n\t\t\tportal_settings = frappe.get_doc('Portal Settings', 'Portal Settings')\n\n\t\t\tdef add_items(sidebar_items, items):\n\t\t\t\tfor d in items:\n\t\t\t\t\tif d.get('enabled') and ((not d.get('role')) or d.get('role') in roles):\n\t\t\t\t\t\tsidebar_items.append(d.as_dict() if isinstance(d, Document) else d)\n\n\t\t\tif not portal_settings.hide_standard_menu:\n\t\t\t\tadd_items(sidebar_items, portal_settings.get('menu'))\n\n\t\t\tif portal_settings.custom_menu:\n\t\t\t\tadd_items(sidebar_items, portal_settings.get('custom_menu'))\n\n\t\t\titems_via_hooks = frappe.get_hooks('portal_menu_items')\n\t\t\tif items_via_hooks:\n\t\t\t\tfor i in items_via_hooks: i['enabled'] = 1\n\t\t\t\tadd_items(sidebar_items, items_via_hooks)\n\n\t\t\tfrappe.cache().hset('portal_menu_items', frappe.session.user, sidebar_items)\n\n\t\tcontext.sidebar_items = sidebar_items\n\n\tinfo = get_fullname_and_avatar(frappe.session.user)\n\tcontext[\"fullname\"] = info.fullname\n\tcontext[\"user_image\"] = info.avatar\n\tcontext[\"user\"] = info.name\n\n\ndef add_metatags(context):\n\ttags = frappe._dict(context.get(\"metatags\") or {})\n\n\tif \"og:type\" not in tags:\n\t\ttags[\"og:type\"] = \"article\"\n\n\tif \"title\" not in tags and context.title:\n\t\ttags[\"title\"] = context.title\n\n\ttitle = tags.get(\"name\") or tags.get(\"title\")\n\tif title:\n\t\ttags[\"og:title\"] = tags[\"twitter:title\"] = title\n\t\ttags[\"twitter:card\"] = \"summary\"\n\n\tif \"description\" not in tags and context.description:\n\t\ttags[\"description\"] = context.description\n\n\tdescription = tags.get(\"description\")\n\tif description:\n\t\ttags[\"og:description\"] = tags[\"twitter:description\"] = description\n\n\tif \"image\" not in tags and context.image:\n\t\ttags[\"image\"] = context.image\n\n\timage = tags.get(\"image\")\n\tif image:\n\t\ttags[\"og:image\"] = tags[\"twitter:image\"] = tags[\"image\"] = frappe.utils.get_url(image)\n\t\ttags['twitter:card'] = \"summary_large_image\"\n\n\tif \"author\" not in tags and context.author:\n\t\ttags[\"author\"] = context.author\n\n\ttags[\"og:url\"] = tags[\"url\"] = frappe.utils.get_url(context.path)\n\n\tif \"published_on\" not in tags and context.published_on:\n\t\ttags[\"published_on\"] = context.published_on\n\n\tif \"published_on\" in tags:\n\t\ttags[\"datePublished\"] = tags[\"published_on\"]\n\t\tdel tags[\"published_on\"]\n\n\ttags[\"language\"] = frappe.local.lang or \"en\"\n\n\t# Get meta tags from Website Route meta\n\t# they can override the defaults set above\n\troute = context.path\n\tif route == '':\n\t\t# homepage\n\t\troute = frappe.db.get_single_value('Website Settings', 'home_page')\n\n\troute_exists = (route\n\t\tand not route.endswith(('.js', '.css'))\n\t\tand frappe.db.exists('Website Route Meta', route))\n\n\tif route_exists:\n\t\twebsite_route_meta = frappe.get_doc('Website Route Meta', route)\n\t\tfor meta_tag in website_route_meta.meta_tags:\n\t\t\td = meta_tag.get_meta_dict()\n\t\t\ttags.update(d)\n\n\t# update tags in context\n\tcontext.metatags = tags\n", "path": "frappe/website/context.py"}], "after_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe, os, json\n\nfrom frappe.website.doctype.website_settings.website_settings import get_website_settings\nfrom frappe.website.router import get_page_context\nfrom frappe.model.document import Document\n\ndef get_context(path, args=None):\n\tif args and args.source:\n\t\tcontext = args\n\telse:\n\t\tcontext = get_page_context(path)\n\t\tif args:\n\t\t\tcontext.update(args)\n\n\tif hasattr(frappe.local, 'request'):\n\t\t# for <body data-path=\"\"> (remove leading slash)\n\t\t# path could be overriden in render.resolve_from_map\n\t\tcontext[\"path\"] = frappe.local.request.path.strip('/ ')\n\telse:\n\t\tcontext[\"path\"] = path\n\n\tcontext.canonical = frappe.utils.get_url(frappe.utils.escape_html(context.path))\n\tcontext.route = context.path\n\tcontext = build_context(context)\n\n\t# set using frappe.respond_as_web_page\n\tif hasattr(frappe.local, 'response') and frappe.local.response.get('context'):\n\t\tcontext.update(frappe.local.response.context)\n\n\t# to be able to inspect the context dict\n\t# Use the macro \"inspect\" from macros.html\n\tcontext._context_dict = context\n\n\tcontext.developer_mode = frappe.conf.developer_mode\n\n\treturn context\n\ndef update_controller_context(context, controller):\n\tmodule = frappe.get_module(controller)\n\n\tif module:\n\t\t# get config fields\n\t\tfor prop in (\"base_template_path\", \"template\", \"no_cache\", \"sitemap\",\n\t\t\t\"condition_field\"):\n\t\t\tif hasattr(module, prop):\n\t\t\t\tcontext[prop] = getattr(module, prop)\n\n\t\tif hasattr(module, \"get_context\"):\n\t\t\timport inspect\n\t\t\ttry:\n\t\t\t\tif inspect.getargspec(module.get_context).args:\n\t\t\t\t\tret = module.get_context(context)\n\t\t\t\telse:\n\t\t\t\t\tret = module.get_context()\n\t\t\t\tif ret:\n\t\t\t\t\tcontext.update(ret)\n\t\t\texcept (frappe.PermissionError, frappe.PageDoesNotExistError, frappe.Redirect):\n\t\t\t\traise\n\t\t\texcept:\n\t\t\t\tif not frappe.flags.in_migrate:\n\t\t\t\t\tfrappe.errprint(frappe.utils.get_traceback())\n\n\t\tif hasattr(module, \"get_children\"):\n\t\t\tcontext.children = module.get_children(context)\n\n\ndef build_context(context):\n\t\"\"\"get_context method of doc or module is supposed to render\n\t\tcontent templates and push it into context\"\"\"\n\tcontext = frappe._dict(context)\n\n\tif not \"url_prefix\" in context:\n\t\tcontext.url_prefix = \"\"\n\n\tif context.url_prefix and context.url_prefix[-1]!='/':\n\t\tcontext.url_prefix += '/'\n\n\t# for backward compatibility\n\tcontext.docs_base_url = '/docs'\n\n\tcontext.update(get_website_settings(context))\n\tcontext.update(frappe.local.conf.get(\"website_context\") or {})\n\n\t# provide doc\n\tif context.doc:\n\t\tcontext.update(context.doc.as_dict())\n\t\tcontext.update(context.doc.get_website_properties())\n\n\t\tif not context.template:\n\t\t\tcontext.template = context.doc.meta.get_web_template()\n\n\t\tif hasattr(context.doc, \"get_context\"):\n\t\t\tret = context.doc.get_context(context)\n\n\t\t\tif ret:\n\t\t\t\tcontext.update(ret)\n\n\t\tfor prop in (\"no_cache\", \"sitemap\"):\n\t\t\tif not prop in context:\n\t\t\t\tcontext[prop] = getattr(context.doc, prop, False)\n\n\telif context.controller:\n\t\t# controller based context\n\t\tupdate_controller_context(context, context.controller)\n\n\t\t# controller context extensions\n\t\tcontext_controller_hooks = frappe.get_hooks(\"extend_website_page_controller_context\") or {}\n\t\tfor controller, extension in context_controller_hooks.items():\n\t\t\tif isinstance(extension, list):\n\t\t\t\tfor ext in extension:\n\t\t\t\t\tif controller == context.controller:\n\t\t\t\t\t\tupdate_controller_context(context, ext)\n\t\t\telse:\n\t\t\t\tupdate_controller_context(context, extension)\n\n\tadd_metatags(context)\n\tadd_sidebar_and_breadcrumbs(context)\n\n\t# determine templates to be used\n\tif not context.base_template_path:\n\t\tapp_base = frappe.get_hooks(\"base_template\")\n\t\tcontext.base_template_path = app_base[-1] if app_base else \"templates/base.html\"\n\n\tif context.title_prefix and context.title and not context.title.startswith(context.title_prefix):\n\t\tcontext.title = '{0} - {1}'.format(context.title_prefix, context.title)\n\n\t# apply context from hooks\n\tupdate_website_context = frappe.get_hooks('update_website_context')\n\tfor method in update_website_context:\n\t\tvalues = frappe.get_attr(method)(context)\n\t\tif values:\n\t\t\tcontext.update(values)\n\n\treturn context\n\ndef load_sidebar(context, sidebar_json_path):\n\twith open(sidebar_json_path, 'r') as sidebarfile:\n\t\ttry:\n\t\t\tsidebar_json = sidebarfile.read()\n\t\t\tcontext.sidebar_items = json.loads(sidebar_json)\n\t\t\tcontext.show_sidebar = 1\n\t\texcept json.decoder.JSONDecodeError:\n\t\t\tfrappe.throw('Invalid Sidebar JSON at ' + sidebar_json_path)\n\ndef get_sidebar_json_path(path, look_for=False):\n\t'''\n\t\tGet _sidebar.json path from directory path\n\n\t\t:param path: path of the current diretory\n\t\t:param look_for: if True, look for _sidebar.json going upwards from given path\n\n\t\t:return: _sidebar.json path\n\t'''\n\tif os.path.split(path)[1] == 'www' or path == '/' or not path:\n\t\treturn ''\n\n\tsidebar_json_path = os.path.join(path, '_sidebar.json')\n\tif os.path.exists(sidebar_json_path):\n\t\treturn sidebar_json_path\n\telse:\n\t\tif look_for:\n\t\t\treturn get_sidebar_json_path(os.path.split(path)[0], look_for)\n\t\telse:\n\t\t\treturn ''\n\ndef add_sidebar_and_breadcrumbs(context):\n\t'''Add sidebar and breadcrumbs to context'''\n\tfrom frappe.website.router import get_page_info_from_template\n\tif context.show_sidebar:\n\t\tcontext.no_cache = 1\n\t\tadd_sidebar_data(context)\n\telse:\n\t\tif context.basepath:\n\t\t\thooks = frappe.get_hooks('look_for_sidebar_json')\n\t\t\tlook_for_sidebar_json = hooks[0] if hooks else 0\n\t\t\tsidebar_json_path = get_sidebar_json_path(\n\t\t\t\tcontext.basepath,\n\t\t\t\tlook_for_sidebar_json\n\t\t\t)\n\t\t\tif sidebar_json_path:\n\t\t\t\tload_sidebar(context, sidebar_json_path)\n\n\tif context.add_breadcrumbs and not context.parents:\n\t\tif context.basepath:\n\t\t\tparent_path = os.path.dirname(context.path).rstrip('/')\n\t\t\tpage_info = get_page_info_from_template(parent_path)\n\t\t\tif page_info:\n\t\t\t\tcontext.parents = [dict(route=parent_path, title=page_info.title)]\n\ndef add_sidebar_data(context):\n\tfrom frappe.utils.user import get_fullname_and_avatar\n\timport frappe.www.list\n\n\tif context.show_sidebar and context.website_sidebar:\n\t\tcontext.sidebar_items = frappe.get_all('Website Sidebar Item',\n\t\t\tfilters=dict(parent=context.website_sidebar), fields=['title', 'route', '`group`'],\n\t\t\torder_by='idx asc')\n\n\tif not context.sidebar_items:\n\t\tsidebar_items = frappe.cache().hget('portal_menu_items', frappe.session.user)\n\t\tif sidebar_items == None:\n\t\t\tsidebar_items = []\n\t\t\troles = frappe.get_roles()\n\t\t\tportal_settings = frappe.get_doc('Portal Settings', 'Portal Settings')\n\n\t\t\tdef add_items(sidebar_items, items):\n\t\t\t\tfor d in items:\n\t\t\t\t\tif d.get('enabled') and ((not d.get('role')) or d.get('role') in roles):\n\t\t\t\t\t\tsidebar_items.append(d.as_dict() if isinstance(d, Document) else d)\n\n\t\t\tif not portal_settings.hide_standard_menu:\n\t\t\t\tadd_items(sidebar_items, portal_settings.get('menu'))\n\n\t\t\tif portal_settings.custom_menu:\n\t\t\t\tadd_items(sidebar_items, portal_settings.get('custom_menu'))\n\n\t\t\titems_via_hooks = frappe.get_hooks('portal_menu_items')\n\t\t\tif items_via_hooks:\n\t\t\t\tfor i in items_via_hooks: i['enabled'] = 1\n\t\t\t\tadd_items(sidebar_items, items_via_hooks)\n\n\t\t\tfrappe.cache().hset('portal_menu_items', frappe.session.user, sidebar_items)\n\n\t\tcontext.sidebar_items = sidebar_items\n\n\tinfo = get_fullname_and_avatar(frappe.session.user)\n\tcontext[\"fullname\"] = info.fullname\n\tcontext[\"user_image\"] = info.avatar\n\tcontext[\"user\"] = info.name\n\n\ndef add_metatags(context):\n\ttags = frappe._dict(context.get(\"metatags\") or {})\n\n\tif \"og:type\" not in tags:\n\t\ttags[\"og:type\"] = \"article\"\n\n\tif \"title\" not in tags and context.title:\n\t\ttags[\"title\"] = context.title\n\n\ttitle = tags.get(\"name\") or tags.get(\"title\")\n\tif title:\n\t\ttags[\"og:title\"] = tags[\"twitter:title\"] = title\n\t\ttags[\"twitter:card\"] = \"summary\"\n\n\tif \"description\" not in tags and context.description:\n\t\ttags[\"description\"] = context.description\n\n\tdescription = tags.get(\"description\")\n\tif description:\n\t\ttags[\"og:description\"] = tags[\"twitter:description\"] = description\n\n\tif \"image\" not in tags and context.image:\n\t\ttags[\"image\"] = context.image\n\n\timage = tags.get(\"image\")\n\tif image:\n\t\ttags[\"og:image\"] = tags[\"twitter:image\"] = tags[\"image\"] = frappe.utils.get_url(image)\n\t\ttags['twitter:card'] = \"summary_large_image\"\n\n\tif \"author\" not in tags and context.author:\n\t\ttags[\"author\"] = context.author\n\n\ttags[\"og:url\"] = tags[\"url\"] = frappe.utils.get_url(context.path)\n\n\tif \"published_on\" not in tags and context.published_on:\n\t\ttags[\"published_on\"] = context.published_on\n\n\tif \"published_on\" in tags:\n\t\ttags[\"datePublished\"] = tags[\"published_on\"]\n\t\tdel tags[\"published_on\"]\n\n\ttags[\"language\"] = frappe.local.lang or \"en\"\n\n\t# Get meta tags from Website Route meta\n\t# they can override the defaults set above\n\troute = context.path\n\tif route == '':\n\t\t# homepage\n\t\troute = frappe.db.get_single_value('Website Settings', 'home_page')\n\n\troute_exists = (route\n\t\tand not route.endswith(('.js', '.css'))\n\t\tand frappe.db.exists('Website Route Meta', route))\n\n\tif route_exists:\n\t\twebsite_route_meta = frappe.get_doc('Website Route Meta', route)\n\t\tfor meta_tag in website_route_meta.meta_tags:\n\t\t\td = meta_tag.get_meta_dict()\n\t\t\ttags.update(d)\n\n\t# update tags in context\n\tcontext.metatags = tags\n", "path": "frappe/website/context.py"}]} | 3,935 | 118 |
gh_patches_debug_24801 | rasdani/github-patches | git_diff | mirumee__ariadne-158 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
If parse_literal is not present try fallbacking to `parse_value(ast.value)`
Following idea was brought up in discussion for #24:
> Maybe we could default to calling parse_value with ast.value when only one function is provided?
This requires further study. `IntValue`, `StringValue` and friends are obvious to deal with, but but complex types like `ListValue` may require some extra unpacking magic.
Still, if it is possible to pull off, it could be an excellent convenience for developers creating custom scalars, saving the need for potentially maintaining two very simiiar implementations, one doing `isinstance(value, basestr)` and other `isinstance(value, StringValue)`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/scalars.py`
Content:
```
1 from typing import Optional, cast
2
3 from graphql.type import (
4 GraphQLNamedType,
5 GraphQLScalarLiteralParser,
6 GraphQLScalarSerializer,
7 GraphQLScalarType,
8 GraphQLScalarValueParser,
9 GraphQLSchema,
10 )
11
12 from .types import SchemaBindable
13
14
15 class ScalarType(SchemaBindable):
16 _serialize: Optional[GraphQLScalarSerializer]
17 _parse_value: Optional[GraphQLScalarValueParser]
18 _parse_literal: Optional[GraphQLScalarLiteralParser]
19
20 def __init__(
21 self,
22 name: str,
23 *,
24 serializer: GraphQLScalarSerializer = None,
25 value_parser: GraphQLScalarValueParser = None,
26 literal_parser: GraphQLScalarLiteralParser = None,
27 ) -> None:
28 self.name = name
29 self._serialize = serializer
30 self._parse_value = value_parser
31 self._parse_literal = literal_parser
32
33 def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:
34 self._serialize = f
35 return f
36
37 def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:
38 self._parse_value = f
39 return f
40
41 def set_literal_parser(
42 self, f: GraphQLScalarLiteralParser
43 ) -> GraphQLScalarLiteralParser:
44 self._parse_literal = f
45 return f
46
47 # Alias above setters for consistent decorator API
48 serializer = set_serializer
49 value_parser = set_value_parser
50 literal_parser = set_literal_parser
51
52 def bind_to_schema(self, schema: GraphQLSchema) -> None:
53 graphql_type = schema.type_map.get(self.name)
54 self.validate_graphql_type(graphql_type)
55 graphql_type = cast(GraphQLScalarType, graphql_type)
56
57 if self._serialize:
58 # See mypy bug https://github.com/python/mypy/issues/2427
59 graphql_type.serialize = self._serialize # type: ignore
60 if self._parse_value:
61 graphql_type.parse_value = self._parse_value
62 if self._parse_literal:
63 graphql_type.parse_literal = self._parse_literal
64
65 def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:
66 if not graphql_type:
67 raise ValueError("Scalar %s is not defined in the schema" % self.name)
68 if not isinstance(graphql_type, GraphQLScalarType):
69 raise ValueError(
70 "%s is defined in the schema, but it is instance of %s (expected %s)"
71 % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ariadne/scalars.py b/ariadne/scalars.py
--- a/ariadne/scalars.py
+++ b/ariadne/scalars.py
@@ -1,5 +1,11 @@
from typing import Optional, cast
+from graphql.language.ast import (
+ BooleanValueNode,
+ FloatValueNode,
+ IntValueNode,
+ StringValueNode,
+)
from graphql.type import (
GraphQLNamedType,
GraphQLScalarLiteralParser,
@@ -36,6 +42,8 @@
def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:
self._parse_value = f
+ if not self._parse_literal:
+ self._parse_literal = create_default_literal_parser(f)
return f
def set_literal_parser(
@@ -70,3 +78,15 @@
"%s is defined in the schema, but it is instance of %s (expected %s)"
% (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)
)
+
+
+SCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)
+
+
+def create_default_literal_parser(
+ value_parser: GraphQLScalarValueParser
+) -> GraphQLScalarLiteralParser:
+ def default_literal_parser(ast):
+ return value_parser(ast.value)
+
+ return default_literal_parser
| {"golden_diff": "diff --git a/ariadne/scalars.py b/ariadne/scalars.py\n--- a/ariadne/scalars.py\n+++ b/ariadne/scalars.py\n@@ -1,5 +1,11 @@\n from typing import Optional, cast\n \n+from graphql.language.ast import (\n+ BooleanValueNode,\n+ FloatValueNode,\n+ IntValueNode,\n+ StringValueNode,\n+)\n from graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n@@ -36,6 +42,8 @@\n \n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n+ if not self._parse_literal:\n+ self._parse_literal = create_default_literal_parser(f)\n return f\n \n def set_literal_parser(\n@@ -70,3 +78,15 @@\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n+\n+\n+SCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)\n+\n+\n+def create_default_literal_parser(\n+ value_parser: GraphQLScalarValueParser\n+) -> GraphQLScalarLiteralParser:\n+ def default_literal_parser(ast):\n+ return value_parser(ast.value)\n+\n+ return default_literal_parser\n", "issue": "If parse_literal is not present try fallbacking to `parse_value(ast.value)`\nFollowing idea was brought up in discussion for #24:\r\n\r\n> Maybe we could default to calling parse_value with ast.value when only one function is provided?\r\n\r\nThis requires further study. `IntValue`, `StringValue` and friends are obvious to deal with, but but complex types like `ListValue` may require some extra unpacking magic.\r\n\r\nStill, if it is possible to pull off, it could be an excellent convenience for developers creating custom scalars, saving the need for potentially maintaining two very simiiar implementations, one doing `isinstance(value, basestr)` and other `isinstance(value, StringValue)`.\n", "before_files": [{"content": "from typing import Optional, cast\n\nfrom graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n GraphQLScalarSerializer,\n GraphQLScalarType,\n GraphQLScalarValueParser,\n GraphQLSchema,\n)\n\nfrom .types import SchemaBindable\n\n\nclass ScalarType(SchemaBindable):\n _serialize: Optional[GraphQLScalarSerializer]\n _parse_value: Optional[GraphQLScalarValueParser]\n _parse_literal: Optional[GraphQLScalarLiteralParser]\n\n def __init__(\n self,\n name: str,\n *,\n serializer: GraphQLScalarSerializer = None,\n value_parser: GraphQLScalarValueParser = None,\n literal_parser: GraphQLScalarLiteralParser = None,\n ) -> None:\n self.name = name\n self._serialize = serializer\n self._parse_value = value_parser\n self._parse_literal = literal_parser\n\n def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:\n self._serialize = f\n return f\n\n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n return f\n\n def set_literal_parser(\n self, f: GraphQLScalarLiteralParser\n ) -> GraphQLScalarLiteralParser:\n self._parse_literal = f\n return f\n\n # Alias above setters for consistent decorator API\n serializer = set_serializer\n value_parser = set_value_parser\n literal_parser = set_literal_parser\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLScalarType, graphql_type)\n\n if self._serialize:\n # See mypy bug https://github.com/python/mypy/issues/2427\n graphql_type.serialize = self._serialize # type: ignore\n if self._parse_value:\n graphql_type.parse_value = self._parse_value\n if self._parse_literal:\n graphql_type.parse_literal = self._parse_literal\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Scalar %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLScalarType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n", "path": "ariadne/scalars.py"}], "after_files": [{"content": "from typing import Optional, cast\n\nfrom graphql.language.ast import (\n BooleanValueNode,\n FloatValueNode,\n IntValueNode,\n StringValueNode,\n)\nfrom graphql.type import (\n GraphQLNamedType,\n GraphQLScalarLiteralParser,\n GraphQLScalarSerializer,\n GraphQLScalarType,\n GraphQLScalarValueParser,\n GraphQLSchema,\n)\n\nfrom .types import SchemaBindable\n\n\nclass ScalarType(SchemaBindable):\n _serialize: Optional[GraphQLScalarSerializer]\n _parse_value: Optional[GraphQLScalarValueParser]\n _parse_literal: Optional[GraphQLScalarLiteralParser]\n\n def __init__(\n self,\n name: str,\n *,\n serializer: GraphQLScalarSerializer = None,\n value_parser: GraphQLScalarValueParser = None,\n literal_parser: GraphQLScalarLiteralParser = None,\n ) -> None:\n self.name = name\n self._serialize = serializer\n self._parse_value = value_parser\n self._parse_literal = literal_parser\n\n def set_serializer(self, f: GraphQLScalarSerializer) -> GraphQLScalarSerializer:\n self._serialize = f\n return f\n\n def set_value_parser(self, f: GraphQLScalarValueParser) -> GraphQLScalarValueParser:\n self._parse_value = f\n if not self._parse_literal:\n self._parse_literal = create_default_literal_parser(f)\n return f\n\n def set_literal_parser(\n self, f: GraphQLScalarLiteralParser\n ) -> GraphQLScalarLiteralParser:\n self._parse_literal = f\n return f\n\n # Alias above setters for consistent decorator API\n serializer = set_serializer\n value_parser = set_value_parser\n literal_parser = set_literal_parser\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLScalarType, graphql_type)\n\n if self._serialize:\n # See mypy bug https://github.com/python/mypy/issues/2427\n graphql_type.serialize = self._serialize # type: ignore\n if self._parse_value:\n graphql_type.parse_value = self._parse_value\n if self._parse_literal:\n graphql_type.parse_literal = self._parse_literal\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Scalar %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLScalarType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLScalarType.__name__)\n )\n\n\nSCALAR_AST_NODES = (BooleanValueNode, FloatValueNode, IntValueNode, StringValueNode)\n\n\ndef create_default_literal_parser(\n value_parser: GraphQLScalarValueParser\n) -> GraphQLScalarLiteralParser:\n def default_literal_parser(ast):\n return value_parser(ast.value)\n\n return default_literal_parser\n", "path": "ariadne/scalars.py"}]} | 1,080 | 310 |
gh_patches_debug_20645 | rasdani/github-patches | git_diff | Flexget__Flexget-1101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problem with sabnzbd after upgrade to version 2.0.5
Hi,
Last night I upgraded to version 2.0.5 from 1.2.521. I haven't made any config changes. Everything seems to work except adding downloads to sabnzbd. Reverting back to version 1.2.521 made everything work again.
```
2016-04-27 07:30 CRITICAL sabnzbd usenet Failed to use sabnzbd. Requested http://sabnzbd:8080/sabnzbd/api?nzbname=REL_NAME&apikey=11111&mode=addurl&name=URL_THAT_WORKS
2016-04-27 07:30 CRITICAL sabnzbd usenet Result was: 'Task' object has no attribute 'get'
2016-04-27 07:30 ERROR entry usenet Failed REL_NAME (sabnzbd unreachable)
```
Manually clicking the url does add the nzb to sabznbd.
This runs in a FreeBSD 10.3 jail using Python 2.7.11 installed and upgraded using pip.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/output/sabnzbd.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import *
3 from future.moves.urllib.parse import urlencode
4
5 import logging
6
7 from flexget import plugin
8 from flexget.event import event
9
10 log = logging.getLogger('sabnzbd')
11
12
13 class OutputSabnzbd(object):
14 """
15 Example::
16
17 sabnzbd:
18 apikey: 123456
19 url: http://localhost/sabnzbd/api?
20 category: movies
21
22 All parameters::
23
24 sabnzbd:
25 apikey: ...
26 url: ...
27 category: ...
28 script: ...
29 pp: ...
30 priority: ...
31 """
32 schema = {
33 'type': 'object',
34 'properties': {
35 'key': {'type': 'string'},
36 'url': {'type': 'string', 'format': 'url'},
37 'category': {'type': 'string'},
38 'script': {'type': 'string'},
39 'pp': {'type': 'string'},
40 'priority': {'type': 'integer'},
41 'password': {'type': 'string'},
42 'username': {'type': 'string'},
43 },
44 'required': ['key', 'url'],
45 'additionalProperties': False,
46 }
47
48 def get_params(self, config):
49 params = {}
50 if 'key' in config:
51 params['apikey'] = config['key']
52 if 'category' in config:
53 params['cat'] = '%s' % config['category']
54 if 'script' in config:
55 params['script'] = config['script']
56 if 'pp' in config:
57 params['pp'] = config['pp']
58 if 'priority' in config:
59 params['priority'] = config['priority']
60 if 'username' in config:
61 params['ma_username'] = config['username']
62 if 'password' in config:
63 params['ma_password'] = config['password']
64 params['mode'] = 'addurl'
65 return params
66
67 def on_task_output(self, task, config):
68 for entry in task.accepted:
69 if task.options.test:
70 log.info('Would add into sabnzbd: %s' % entry['title'])
71 continue
72
73 params = self.get_params(config)
74 # allow overriding the category
75 if 'category' in entry:
76 # Dirty hack over the next few lines to strip out non-ascii
77 # chars. We're going to urlencode this, which causes
78 # serious issues in python2.x if it's not ascii input.
79 params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])
80 params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])
81 # add cleaner nzb name (undocumented api feature)
82 params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])
83
84 request_url = config['url'] + urlencode(params)
85 log.debug('request_url: %s' % request_url)
86 try:
87 response = task.get(request_url)
88 except Exception as e:
89 log.critical('Failed to use sabnzbd. Requested %s' % request_url)
90 log.critical('Result was: %s' % e)
91 entry.fail('sabnzbd unreachable')
92 if task.options.debug:
93 log.exception(e)
94 continue
95
96 if 'error' in response.text.lower():
97 entry.fail(response.text.replace('\n', ''))
98 else:
99 log.info('Added `%s` to SABnzbd' % (entry['title']))
100
101
102 @event('plugin.register')
103 def register_plugin():
104 plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/output/sabnzbd.py b/flexget/plugins/output/sabnzbd.py
--- a/flexget/plugins/output/sabnzbd.py
+++ b/flexget/plugins/output/sabnzbd.py
@@ -6,6 +6,7 @@
from flexget import plugin
from flexget.event import event
+from requests import RequestException
log = logging.getLogger('sabnzbd')
@@ -84,10 +85,10 @@
request_url = config['url'] + urlencode(params)
log.debug('request_url: %s' % request_url)
try:
- response = task.get(request_url)
- except Exception as e:
+ response = task.requests.get(request_url)
+ except RequestException as e:
log.critical('Failed to use sabnzbd. Requested %s' % request_url)
- log.critical('Result was: %s' % e)
+ log.critical('Result was: %s' % e.args[0])
entry.fail('sabnzbd unreachable')
if task.options.debug:
log.exception(e)
| {"golden_diff": "diff --git a/flexget/plugins/output/sabnzbd.py b/flexget/plugins/output/sabnzbd.py\n--- a/flexget/plugins/output/sabnzbd.py\n+++ b/flexget/plugins/output/sabnzbd.py\n@@ -6,6 +6,7 @@\n \n from flexget import plugin\n from flexget.event import event\n+from requests import RequestException\n \n log = logging.getLogger('sabnzbd')\n \n@@ -84,10 +85,10 @@\n request_url = config['url'] + urlencode(params)\n log.debug('request_url: %s' % request_url)\n try:\n- response = task.get(request_url)\n- except Exception as e:\n+ response = task.requests.get(request_url)\n+ except RequestException as e:\n log.critical('Failed to use sabnzbd. Requested %s' % request_url)\n- log.critical('Result was: %s' % e)\n+ log.critical('Result was: %s' % e.args[0])\n entry.fail('sabnzbd unreachable')\n if task.options.debug:\n log.exception(e)\n", "issue": "Problem with sabnzbd after upgrade to version 2.0.5\nHi,\n\nLast night I upgraded to version 2.0.5 from 1.2.521. I haven't made any config changes. Everything seems to work except adding downloads to sabnzbd. Reverting back to version 1.2.521 made everything work again.\n\n```\n2016-04-27 07:30 CRITICAL sabnzbd usenet Failed to use sabnzbd. Requested http://sabnzbd:8080/sabnzbd/api?nzbname=REL_NAME&apikey=11111&mode=addurl&name=URL_THAT_WORKS\n2016-04-27 07:30 CRITICAL sabnzbd usenet Result was: 'Task' object has no attribute 'get'\n2016-04-27 07:30 ERROR entry usenet Failed REL_NAME (sabnzbd unreachable) \n```\n\nManually clicking the url does add the nzb to sabznbd. \n\nThis runs in a FreeBSD 10.3 jail using Python 2.7.11 installed and upgraded using pip.\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import *\nfrom future.moves.urllib.parse import urlencode\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\n\nlog = logging.getLogger('sabnzbd')\n\n\nclass OutputSabnzbd(object):\n \"\"\"\n Example::\n\n sabnzbd:\n apikey: 123456\n url: http://localhost/sabnzbd/api?\n category: movies\n\n All parameters::\n\n sabnzbd:\n apikey: ...\n url: ...\n category: ...\n script: ...\n pp: ...\n priority: ...\n \"\"\"\n schema = {\n 'type': 'object',\n 'properties': {\n 'key': {'type': 'string'},\n 'url': {'type': 'string', 'format': 'url'},\n 'category': {'type': 'string'},\n 'script': {'type': 'string'},\n 'pp': {'type': 'string'},\n 'priority': {'type': 'integer'},\n 'password': {'type': 'string'},\n 'username': {'type': 'string'},\n },\n 'required': ['key', 'url'],\n 'additionalProperties': False,\n }\n\n def get_params(self, config):\n params = {}\n if 'key' in config:\n params['apikey'] = config['key']\n if 'category' in config:\n params['cat'] = '%s' % config['category']\n if 'script' in config:\n params['script'] = config['script']\n if 'pp' in config:\n params['pp'] = config['pp']\n if 'priority' in config:\n params['priority'] = config['priority']\n if 'username' in config:\n params['ma_username'] = config['username']\n if 'password' in config:\n params['ma_password'] = config['password']\n params['mode'] = 'addurl'\n return params\n\n def on_task_output(self, task, config):\n for entry in task.accepted:\n if task.options.test:\n log.info('Would add into sabnzbd: %s' % entry['title'])\n continue\n\n params = self.get_params(config)\n # allow overriding the category\n if 'category' in entry:\n # Dirty hack over the next few lines to strip out non-ascii\n # chars. We're going to urlencode this, which causes\n # serious issues in python2.x if it's not ascii input.\n params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])\n params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])\n # add cleaner nzb name (undocumented api feature)\n params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])\n\n request_url = config['url'] + urlencode(params)\n log.debug('request_url: %s' % request_url)\n try:\n response = task.get(request_url)\n except Exception as e:\n log.critical('Failed to use sabnzbd. Requested %s' % request_url)\n log.critical('Result was: %s' % e)\n entry.fail('sabnzbd unreachable')\n if task.options.debug:\n log.exception(e)\n continue\n\n if 'error' in response.text.lower():\n entry.fail(response.text.replace('\\n', ''))\n else:\n log.info('Added `%s` to SABnzbd' % (entry['title']))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)\n", "path": "flexget/plugins/output/sabnzbd.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import *\nfrom future.moves.urllib.parse import urlencode\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom requests import RequestException\n\nlog = logging.getLogger('sabnzbd')\n\n\nclass OutputSabnzbd(object):\n \"\"\"\n Example::\n\n sabnzbd:\n apikey: 123456\n url: http://localhost/sabnzbd/api?\n category: movies\n\n All parameters::\n\n sabnzbd:\n apikey: ...\n url: ...\n category: ...\n script: ...\n pp: ...\n priority: ...\n \"\"\"\n schema = {\n 'type': 'object',\n 'properties': {\n 'key': {'type': 'string'},\n 'url': {'type': 'string', 'format': 'url'},\n 'category': {'type': 'string'},\n 'script': {'type': 'string'},\n 'pp': {'type': 'string'},\n 'priority': {'type': 'integer'},\n 'password': {'type': 'string'},\n 'username': {'type': 'string'},\n },\n 'required': ['key', 'url'],\n 'additionalProperties': False,\n }\n\n def get_params(self, config):\n params = {}\n if 'key' in config:\n params['apikey'] = config['key']\n if 'category' in config:\n params['cat'] = '%s' % config['category']\n if 'script' in config:\n params['script'] = config['script']\n if 'pp' in config:\n params['pp'] = config['pp']\n if 'priority' in config:\n params['priority'] = config['priority']\n if 'username' in config:\n params['ma_username'] = config['username']\n if 'password' in config:\n params['ma_password'] = config['password']\n params['mode'] = 'addurl'\n return params\n\n def on_task_output(self, task, config):\n for entry in task.accepted:\n if task.options.test:\n log.info('Would add into sabnzbd: %s' % entry['title'])\n continue\n\n params = self.get_params(config)\n # allow overriding the category\n if 'category' in entry:\n # Dirty hack over the next few lines to strip out non-ascii\n # chars. We're going to urlencode this, which causes\n # serious issues in python2.x if it's not ascii input.\n params['cat'] = ''.join([x for x in entry['category'] if ord(x) < 128])\n params['name'] = ''.join([x for x in entry['url'] if ord(x) < 128])\n # add cleaner nzb name (undocumented api feature)\n params['nzbname'] = ''.join([x for x in entry['title'] if ord(x) < 128])\n\n request_url = config['url'] + urlencode(params)\n log.debug('request_url: %s' % request_url)\n try:\n response = task.requests.get(request_url)\n except RequestException as e:\n log.critical('Failed to use sabnzbd. Requested %s' % request_url)\n log.critical('Result was: %s' % e.args[0])\n entry.fail('sabnzbd unreachable')\n if task.options.debug:\n log.exception(e)\n continue\n\n if 'error' in response.text.lower():\n entry.fail(response.text.replace('\\n', ''))\n else:\n log.info('Added `%s` to SABnzbd' % (entry['title']))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(OutputSabnzbd, 'sabnzbd', api_ver=2)\n", "path": "flexget/plugins/output/sabnzbd.py"}]} | 1,560 | 246 |
gh_patches_debug_5867 | rasdani/github-patches | git_diff | napari__napari-3424 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`normalize_dtype` excludes big endian types
## 🐛 Bug
```py
In [457]: from napari.utils._dtype import get_dtype_limits
In [458]: get_dtype_limits(np.dtype('<u2'))
Out[458]: (0, 65535)
In [459]: get_dtype_limits(np.dtype('>u2'))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-459-d109d903c3cf> in <module>
----> 1 get_dtype_limits(np.dtype('>u2'))
~/Dropbox (HMS)/Python/forks/napari/napari/utils/_dtype.py in get_dtype_limits(dtype_spec)
103 info = np.finfo(dtype)
104 else:
--> 105 raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')
106 return info.min, info.max
TypeError: Unrecognized or non-numeric dtype: >u2
In [460]: np.iinfo('>u2')
Out[460]: iinfo(min=0, max=65535, dtype=>u2)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/utils/_dtype.py`
Content:
```
1 from typing import Tuple, Union
2
3 import numpy as np
4
5 _np_uints = {
6 8: np.uint8,
7 16: np.uint16,
8 32: np.uint32,
9 64: np.uint64,
10 }
11
12 _np_ints = {
13 8: np.int8,
14 16: np.int16,
15 32: np.int32,
16 64: np.int64,
17 }
18
19 _np_floats = {
20 32: np.float32,
21 64: np.float64,
22 }
23
24 _np_complex = {
25 64: np.complex64,
26 128: np.complex128,
27 }
28
29 _np_kinds = {
30 'uint': _np_uints,
31 'int': _np_ints,
32 'float': _np_floats,
33 'complex': _np_complex,
34 }
35
36
37 def _normalize_str_by_bit_depth(dtype_str, kind):
38 if not any(str.isdigit(c) for c in dtype_str): # Python 'int' or 'float'
39 return np.dtype(kind).type
40 bit_dict = _np_kinds[kind]
41 if '128' in dtype_str:
42 return bit_dict[128]
43 if '8' in dtype_str:
44 return bit_dict[8]
45 if '16' in dtype_str:
46 return bit_dict[16]
47 if '32' in dtype_str:
48 return bit_dict[32]
49 if '64' in dtype_str:
50 return bit_dict[64]
51
52
53 def normalize_dtype(dtype_spec):
54 """Return a proper NumPy type given ~any duck array dtype.
55
56 Parameters
57 ----------
58 dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc
59 A type that can be interpreted as a NumPy numeric data type, e.g.
60 'uint32', np.uint8, torch.float32, etc.
61
62 Returns
63 -------
64 dtype : numpy.dtype
65 The corresponding dtype.
66
67 Notes
68 -----
69 half-precision floats are not supported.
70 """
71 dtype_str = str(dtype_spec)
72 if 'uint' in dtype_str:
73 return _normalize_str_by_bit_depth(dtype_str, 'uint')
74 if 'int' in dtype_str:
75 return _normalize_str_by_bit_depth(dtype_str, 'int')
76 if 'float' in dtype_str:
77 return _normalize_str_by_bit_depth(dtype_str, 'float')
78 if 'complex' in dtype_str:
79 return _normalize_str_by_bit_depth(dtype_str, 'complex')
80 if 'bool' in dtype_str:
81 return np.bool_
82
83
84 def get_dtype_limits(dtype_spec) -> Tuple[float, float]:
85 """Return machine limits for numeric types.
86
87 Parameters
88 ----------
89 dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc
90 A type that can be interpreted as a NumPy numeric data type, e.g.
91 'uint32', np.uint8, torch.float32, etc.
92
93 Returns
94 -------
95 limits : tuple
96 The smallest/largest numbers expressible by the type.
97 """
98 dtype = normalize_dtype(dtype_spec)
99 info: Union[np.iinfo, np.finfo]
100 if np.issubdtype(dtype, np.integer):
101 info = np.iinfo(dtype)
102 elif dtype and np.issubdtype(dtype, np.floating):
103 info = np.finfo(dtype)
104 else:
105 raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')
106 return info.min, info.max
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/utils/_dtype.py b/napari/utils/_dtype.py
--- a/napari/utils/_dtype.py
+++ b/napari/utils/_dtype.py
@@ -79,6 +79,11 @@
return _normalize_str_by_bit_depth(dtype_str, 'complex')
if 'bool' in dtype_str:
return np.bool_
+ # If we don't find one of the named dtypes, return the dtype_spec
+ # unchanged. This allows NumPy big endian types to work. See
+ # https://github.com/napari/napari/issues/3421
+ else:
+ return dtype_spec
def get_dtype_limits(dtype_spec) -> Tuple[float, float]:
| {"golden_diff": "diff --git a/napari/utils/_dtype.py b/napari/utils/_dtype.py\n--- a/napari/utils/_dtype.py\n+++ b/napari/utils/_dtype.py\n@@ -79,6 +79,11 @@\n return _normalize_str_by_bit_depth(dtype_str, 'complex')\n if 'bool' in dtype_str:\n return np.bool_\n+ # If we don't find one of the named dtypes, return the dtype_spec\n+ # unchanged. This allows NumPy big endian types to work. See\n+ # https://github.com/napari/napari/issues/3421\n+ else:\n+ return dtype_spec\n \n \n def get_dtype_limits(dtype_spec) -> Tuple[float, float]:\n", "issue": "`normalize_dtype` excludes big endian types\n## \ud83d\udc1b Bug\r\n```py\r\nIn [457]: from napari.utils._dtype import get_dtype_limits\r\n\r\nIn [458]: get_dtype_limits(np.dtype('<u2'))\r\nOut[458]: (0, 65535)\r\n\r\nIn [459]: get_dtype_limits(np.dtype('>u2'))\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-459-d109d903c3cf> in <module>\r\n----> 1 get_dtype_limits(np.dtype('>u2'))\r\n\r\n~/Dropbox (HMS)/Python/forks/napari/napari/utils/_dtype.py in get_dtype_limits(dtype_spec)\r\n 103 info = np.finfo(dtype)\r\n 104 else:\r\n--> 105 raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')\r\n 106 return info.min, info.max\r\n\r\nTypeError: Unrecognized or non-numeric dtype: >u2\r\n\r\nIn [460]: np.iinfo('>u2')\r\nOut[460]: iinfo(min=0, max=65535, dtype=>u2)\r\n```\n", "before_files": [{"content": "from typing import Tuple, Union\n\nimport numpy as np\n\n_np_uints = {\n 8: np.uint8,\n 16: np.uint16,\n 32: np.uint32,\n 64: np.uint64,\n}\n\n_np_ints = {\n 8: np.int8,\n 16: np.int16,\n 32: np.int32,\n 64: np.int64,\n}\n\n_np_floats = {\n 32: np.float32,\n 64: np.float64,\n}\n\n_np_complex = {\n 64: np.complex64,\n 128: np.complex128,\n}\n\n_np_kinds = {\n 'uint': _np_uints,\n 'int': _np_ints,\n 'float': _np_floats,\n 'complex': _np_complex,\n}\n\n\ndef _normalize_str_by_bit_depth(dtype_str, kind):\n if not any(str.isdigit(c) for c in dtype_str): # Python 'int' or 'float'\n return np.dtype(kind).type\n bit_dict = _np_kinds[kind]\n if '128' in dtype_str:\n return bit_dict[128]\n if '8' in dtype_str:\n return bit_dict[8]\n if '16' in dtype_str:\n return bit_dict[16]\n if '32' in dtype_str:\n return bit_dict[32]\n if '64' in dtype_str:\n return bit_dict[64]\n\n\ndef normalize_dtype(dtype_spec):\n \"\"\"Return a proper NumPy type given ~any duck array dtype.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n dtype : numpy.dtype\n The corresponding dtype.\n\n Notes\n -----\n half-precision floats are not supported.\n \"\"\"\n dtype_str = str(dtype_spec)\n if 'uint' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'uint')\n if 'int' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'int')\n if 'float' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'float')\n if 'complex' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'complex')\n if 'bool' in dtype_str:\n return np.bool_\n\n\ndef get_dtype_limits(dtype_spec) -> Tuple[float, float]:\n \"\"\"Return machine limits for numeric types.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n limits : tuple\n The smallest/largest numbers expressible by the type.\n \"\"\"\n dtype = normalize_dtype(dtype_spec)\n info: Union[np.iinfo, np.finfo]\n if np.issubdtype(dtype, np.integer):\n info = np.iinfo(dtype)\n elif dtype and np.issubdtype(dtype, np.floating):\n info = np.finfo(dtype)\n else:\n raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')\n return info.min, info.max\n", "path": "napari/utils/_dtype.py"}], "after_files": [{"content": "from typing import Tuple, Union\n\nimport numpy as np\n\n_np_uints = {\n 8: np.uint8,\n 16: np.uint16,\n 32: np.uint32,\n 64: np.uint64,\n}\n\n_np_ints = {\n 8: np.int8,\n 16: np.int16,\n 32: np.int32,\n 64: np.int64,\n}\n\n_np_floats = {\n 32: np.float32,\n 64: np.float64,\n}\n\n_np_complex = {\n 64: np.complex64,\n 128: np.complex128,\n}\n\n_np_kinds = {\n 'uint': _np_uints,\n 'int': _np_ints,\n 'float': _np_floats,\n 'complex': _np_complex,\n}\n\n\ndef _normalize_str_by_bit_depth(dtype_str, kind):\n if not any(str.isdigit(c) for c in dtype_str): # Python 'int' or 'float'\n return np.dtype(kind).type\n bit_dict = _np_kinds[kind]\n if '128' in dtype_str:\n return bit_dict[128]\n if '8' in dtype_str:\n return bit_dict[8]\n if '16' in dtype_str:\n return bit_dict[16]\n if '32' in dtype_str:\n return bit_dict[32]\n if '64' in dtype_str:\n return bit_dict[64]\n\n\ndef normalize_dtype(dtype_spec):\n \"\"\"Return a proper NumPy type given ~any duck array dtype.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n dtype : numpy.dtype\n The corresponding dtype.\n\n Notes\n -----\n half-precision floats are not supported.\n \"\"\"\n dtype_str = str(dtype_spec)\n if 'uint' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'uint')\n if 'int' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'int')\n if 'float' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'float')\n if 'complex' in dtype_str:\n return _normalize_str_by_bit_depth(dtype_str, 'complex')\n if 'bool' in dtype_str:\n return np.bool_\n # If we don't find one of the named dtypes, return the dtype_spec\n # unchanged. This allows NumPy big endian types to work. See\n # https://github.com/napari/napari/issues/3421\n else:\n return dtype_spec\n\n\ndef get_dtype_limits(dtype_spec) -> Tuple[float, float]:\n \"\"\"Return machine limits for numeric types.\n\n Parameters\n ----------\n dtype_spec : numpy dtype, numpy type, torch dtype, tensorstore dtype, etc\n A type that can be interpreted as a NumPy numeric data type, e.g.\n 'uint32', np.uint8, torch.float32, etc.\n\n Returns\n -------\n limits : tuple\n The smallest/largest numbers expressible by the type.\n \"\"\"\n dtype = normalize_dtype(dtype_spec)\n info: Union[np.iinfo, np.finfo]\n if np.issubdtype(dtype, np.integer):\n info = np.iinfo(dtype)\n elif dtype and np.issubdtype(dtype, np.floating):\n info = np.finfo(dtype)\n else:\n raise TypeError(f'Unrecognized or non-numeric dtype: {dtype_spec}')\n return info.min, info.max\n", "path": "napari/utils/_dtype.py"}]} | 1,527 | 162 |
gh_patches_debug_15002 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1430 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MetricTracker of a single metric should not accept a list of `maximize`
## 🐛 Bug
It can be confusing that MetricTracker can be instantiated with a single metric and still accept a list of booleans as `maximize`. Further, when calling `tracker.best_metric`, the metric will always be maximized when `maximize` is a list and `metric` is a single Metric (even if `maximize` is `[False]`), which is clearly not an expected behavior (the reason for this comes from those two lines in `MetricTracker`'s `best_metric` method:
```py
if isinstance(self._base_metric, Metric):
fn = torch.max if self.maximize else torch.min
```
Here, if `self.maximize` is any list, the condition will be True.
Raising an error at initialization in such a scenario would be safer.
### To Reproduce
Initialize a `MetricTracker` with a single metric as `metric` and a list of booleans as `maximize`.
#### Code sample
```py
>>> import torch
>>> from torchmetrics import MetricTracker, MeanSquaredError
>>> _ = torch.manual_seed(42)
>>> tracker = MetricTracker(MeanSquaredError(), maximize=[False])
>>> for epoch in range(5):
... tracker.increment()
... for batch_idx in range(5):
... preds, target = torch.randn(100), torch.randn(100)
... tracker.update(preds, target)
...
>>> best_acc, which_epoch = tracker.best_metric(return_step=True)
>>> print(best_acc)
2.2481114864349365
>>> print(which_epoch)
4
>>> print(tracker.compute_all())
tensor([1.8218, 2.0268, 1.9491, 1.9800, 2.2481])
```
=> The metric has been maximized despite `maximize` being `[False]`
### Expected behavior
Raising a `ValueError` at the initialization of `MetricTracker`, indicating that `maximize` should be a single `bool` when the `metric` is a single `Metric`.
### Environment
- TorchMetrics version: 0.12.0dev
- Python & PyTorch Version: Python 3.10.6, torch 1.13.1+cu117
- Any other relevant information such as OS (e.g., Linux): Ubuntu 20.04
### Additional context
With the additional support of `MultioutputWrapper` that I am working on (#1409) this becomes even more confusing, because a `MultioutputWrapper` is a single `Metric` and a user could be tempted to give a list of booleans as `maximize`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/wrappers/tracker.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from copy import deepcopy
15 from typing import Any, Dict, List, Tuple, Union
16
17 import torch
18 from torch import Tensor
19 from torch.nn import ModuleList
20
21 from torchmetrics.collections import MetricCollection
22 from torchmetrics.metric import Metric
23 from torchmetrics.utilities.prints import rank_zero_warn
24
25
26 class MetricTracker(ModuleList):
27 """A wrapper class that can help keeping track of a metric or metric collection over time and implement useful
28 methods. The wrapper implements the standard ``.update()``, ``.compute()``, ``.reset()`` methods that just
29 calls corresponding method of the currently tracked metric. However, the following additional methods are
30 provided:
31
32 -``MetricTracker.n_steps``: number of metrics being tracked
33 -``MetricTracker.increment()``: initialize a new metric for being tracked
34 -``MetricTracker.compute_all()``: get the metric value for all steps
35 -``MetricTracker.best_metric()``: returns the best value
36
37 Args:
38 metric: instance of a ``torchmetrics.Metric`` or ``torchmetrics.MetricCollection``
39 to keep track of at each timestep.
40 maximize: either single bool or list of bool indicating if higher metric values are
41 better (``True``) or lower is better (``False``).
42
43 Example (single metric):
44 >>> from torchmetrics import MetricTracker
45 >>> from torchmetrics.classification import MulticlassAccuracy
46 >>> _ = torch.manual_seed(42)
47 >>> tracker = MetricTracker(MulticlassAccuracy(num_classes=10, average='micro'))
48 >>> for epoch in range(5):
49 ... tracker.increment()
50 ... for batch_idx in range(5):
51 ... preds, target = torch.randint(10, (100,)), torch.randint(10, (100,))
52 ... tracker.update(preds, target)
53 ... print(f"current acc={tracker.compute()}")
54 current acc=0.1120000034570694
55 current acc=0.08799999952316284
56 current acc=0.12600000202655792
57 current acc=0.07999999821186066
58 current acc=0.10199999809265137
59 >>> best_acc, which_epoch = tracker.best_metric(return_step=True)
60 >>> best_acc # doctest: +ELLIPSIS
61 0.1260...
62 >>> which_epoch
63 2
64 >>> tracker.compute_all()
65 tensor([0.1120, 0.0880, 0.1260, 0.0800, 0.1020])
66
67 Example (multiple metrics using MetricCollection):
68 >>> from torchmetrics import MetricTracker, MetricCollection, MeanSquaredError, ExplainedVariance
69 >>> _ = torch.manual_seed(42)
70 >>> tracker = MetricTracker(MetricCollection([MeanSquaredError(), ExplainedVariance()]), maximize=[False, True])
71 >>> for epoch in range(5):
72 ... tracker.increment()
73 ... for batch_idx in range(5):
74 ... preds, target = torch.randn(100), torch.randn(100)
75 ... tracker.update(preds, target)
76 ... print(f"current stats={tracker.compute()}") # doctest: +NORMALIZE_WHITESPACE
77 current stats={'MeanSquaredError': tensor(1.8218), 'ExplainedVariance': tensor(-0.8969)}
78 current stats={'MeanSquaredError': tensor(2.0268), 'ExplainedVariance': tensor(-1.0206)}
79 current stats={'MeanSquaredError': tensor(1.9491), 'ExplainedVariance': tensor(-0.8298)}
80 current stats={'MeanSquaredError': tensor(1.9800), 'ExplainedVariance': tensor(-0.9199)}
81 current stats={'MeanSquaredError': tensor(2.2481), 'ExplainedVariance': tensor(-1.1622)}
82 >>> from pprint import pprint
83 >>> best_res, which_epoch = tracker.best_metric(return_step=True)
84 >>> pprint(best_res) # doctest: +ELLIPSIS
85 {'ExplainedVariance': -0.829...,
86 'MeanSquaredError': 1.821...}
87 >>> which_epoch
88 {'MeanSquaredError': 0, 'ExplainedVariance': 2}
89 >>> pprint(tracker.compute_all())
90 {'ExplainedVariance': tensor([-0.8969, -1.0206, -0.8298, -0.9199, -1.1622]),
91 'MeanSquaredError': tensor([1.8218, 2.0268, 1.9491, 1.9800, 2.2481])}
92 """
93
94 def __init__(self, metric: Union[Metric, MetricCollection], maximize: Union[bool, List[bool]] = True) -> None:
95 super().__init__()
96 if not isinstance(metric, (Metric, MetricCollection)):
97 raise TypeError(
98 "Metric arg need to be an instance of a torchmetrics"
99 f" `Metric` or `MetricCollection` but got {metric}"
100 )
101 self._base_metric = metric
102 if not isinstance(maximize, (bool, list)):
103 raise ValueError("Argument `maximize` should either be a single bool or list of bool")
104 if isinstance(maximize, list) and isinstance(metric, MetricCollection) and len(maximize) != len(metric):
105 raise ValueError("The len of argument `maximize` should match the length of the metric collection")
106 self.maximize = maximize
107
108 self._increment_called = False
109
110 @property
111 def n_steps(self) -> int:
112 """Returns the number of times the tracker has been incremented."""
113 return len(self) - 1 # subtract the base metric
114
115 def increment(self) -> None:
116 """Creates a new instance of the input metric that will be updated next."""
117 self._increment_called = True
118 self.append(deepcopy(self._base_metric))
119
120 def forward(self, *args, **kwargs) -> None: # type: ignore
121 """Calls forward of the current metric being tracked."""
122 self._check_for_increment("forward")
123 return self[-1](*args, **kwargs)
124
125 def update(self, *args, **kwargs) -> None: # type: ignore
126 """Updates the current metric being tracked."""
127 self._check_for_increment("update")
128 self[-1].update(*args, **kwargs)
129
130 def compute(self) -> Any:
131 """Call compute of the current metric being tracked."""
132 self._check_for_increment("compute")
133 return self[-1].compute()
134
135 def compute_all(self) -> Tensor:
136 """Compute the metric value for all tracked metrics."""
137 self._check_for_increment("compute_all")
138 # The i!=0 accounts for the self._base_metric should be ignored
139 res = [metric.compute() for i, metric in enumerate(self) if i != 0]
140 if isinstance(self._base_metric, MetricCollection):
141 keys = res[0].keys()
142 return {k: torch.stack([r[k] for r in res], dim=0) for k in keys}
143 return torch.stack(res, dim=0)
144
145 def reset(self) -> None:
146 """Resets the current metric being tracked."""
147 self[-1].reset()
148
149 def reset_all(self) -> None:
150 """Resets all metrics being tracked."""
151 for metric in self:
152 metric.reset()
153
154 def best_metric(
155 self, return_step: bool = False
156 ) -> Union[
157 None,
158 float,
159 Tuple[int, float],
160 Tuple[None, None],
161 Dict[str, Union[float, None]],
162 Tuple[Dict[str, Union[int, None]], Dict[str, Union[float, None]]],
163 ]:
164 """Returns the highest metric out of all tracked.
165
166 Args:
167 return_step: If ``True`` will also return the step with the highest metric value.
168
169 Returns:
170 The best metric value, and optionally the time-step.
171 """
172 if isinstance(self._base_metric, Metric):
173 fn = torch.max if self.maximize else torch.min
174 try:
175 value, idx = fn(self.compute_all(), 0)
176 if return_step:
177 return value.item(), idx.item()
178 return value.item()
179 except ValueError as error:
180 rank_zero_warn(
181 f"Encountered the following error when trying to get the best metric: {error}"
182 "this is probably due to the 'best' not being defined for this metric."
183 "Returning `None` instead.",
184 UserWarning,
185 )
186 if return_step:
187 return None, None
188 return None
189
190 else: # this is a metric collection
191 res = self.compute_all()
192 maximize = self.maximize if isinstance(self.maximize, list) else len(res) * [self.maximize]
193 value, idx = {}, {}
194 for i, (k, v) in enumerate(res.items()):
195 try:
196 fn = torch.max if maximize[i] else torch.min
197 out = fn(v, 0)
198 value[k], idx[k] = out[0].item(), out[1].item()
199 except ValueError as error:
200 rank_zero_warn(
201 f"Encountered the following error when trying to get the best metric for metric {k}:"
202 f"{error} this is probably due to the 'best' not being defined for this metric."
203 "Returning `None` instead.",
204 UserWarning,
205 )
206 value[k], idx[k] = None, None
207
208 if return_step:
209 return value, idx
210 return value
211
212 def _check_for_increment(self, method: str) -> None:
213 if not self._increment_called:
214 raise ValueError(f"`{method}` cannot be called before `.increment()` has been called")
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/wrappers/tracker.py b/src/torchmetrics/wrappers/tracker.py
--- a/src/torchmetrics/wrappers/tracker.py
+++ b/src/torchmetrics/wrappers/tracker.py
@@ -103,6 +103,8 @@
raise ValueError("Argument `maximize` should either be a single bool or list of bool")
if isinstance(maximize, list) and isinstance(metric, MetricCollection) and len(maximize) != len(metric):
raise ValueError("The len of argument `maximize` should match the length of the metric collection")
+ if isinstance(metric, Metric) and not isinstance(maximize, bool):
+ raise ValueError("Argument `maximize` should be a single bool when `metric` is a single Metric")
self.maximize = maximize
self._increment_called = False
| {"golden_diff": "diff --git a/src/torchmetrics/wrappers/tracker.py b/src/torchmetrics/wrappers/tracker.py\n--- a/src/torchmetrics/wrappers/tracker.py\n+++ b/src/torchmetrics/wrappers/tracker.py\n@@ -103,6 +103,8 @@\n raise ValueError(\"Argument `maximize` should either be a single bool or list of bool\")\n if isinstance(maximize, list) and isinstance(metric, MetricCollection) and len(maximize) != len(metric):\n raise ValueError(\"The len of argument `maximize` should match the length of the metric collection\")\n+ if isinstance(metric, Metric) and not isinstance(maximize, bool):\n+ raise ValueError(\"Argument `maximize` should be a single bool when `metric` is a single Metric\")\n self.maximize = maximize\n \n self._increment_called = False\n", "issue": "MetricTracker of a single metric should not accept a list of `maximize`\n## \ud83d\udc1b Bug\r\n\r\nIt can be confusing that MetricTracker can be instantiated with a single metric and still accept a list of booleans as `maximize`. Further, when calling `tracker.best_metric`, the metric will always be maximized when `maximize` is a list and `metric` is a single Metric (even if `maximize` is `[False]`), which is clearly not an expected behavior (the reason for this comes from those two lines in `MetricTracker`'s `best_metric` method:\r\n```py\r\nif isinstance(self._base_metric, Metric):\r\n fn = torch.max if self.maximize else torch.min\r\n```\r\nHere, if `self.maximize` is any list, the condition will be True.\r\nRaising an error at initialization in such a scenario would be safer.\r\n\r\n### To Reproduce\r\n\r\nInitialize a `MetricTracker` with a single metric as `metric` and a list of booleans as `maximize`.\r\n\r\n#### Code sample\r\n\r\n```py\r\n>>> import torch\r\n>>> from torchmetrics import MetricTracker, MeanSquaredError\r\n>>> _ = torch.manual_seed(42)\r\n>>> tracker = MetricTracker(MeanSquaredError(), maximize=[False])\r\n>>> for epoch in range(5):\r\n... tracker.increment()\r\n... for batch_idx in range(5):\r\n... preds, target = torch.randn(100), torch.randn(100)\r\n... tracker.update(preds, target)\r\n... \r\n>>> best_acc, which_epoch = tracker.best_metric(return_step=True)\r\n>>> print(best_acc)\r\n2.2481114864349365\r\n>>> print(which_epoch)\r\n4\r\n>>> print(tracker.compute_all())\r\ntensor([1.8218, 2.0268, 1.9491, 1.9800, 2.2481])\r\n```\r\n=> The metric has been maximized despite `maximize` being `[False]`\r\n\r\n### Expected behavior\r\n\r\nRaising a `ValueError` at the initialization of `MetricTracker`, indicating that `maximize` should be a single `bool` when the `metric` is a single `Metric`.\r\n\r\n### Environment\r\n\r\n- TorchMetrics version: 0.12.0dev\r\n- Python & PyTorch Version: Python 3.10.6, torch 1.13.1+cu117\r\n- Any other relevant information such as OS (e.g., Linux): Ubuntu 20.04\r\n\r\n### Additional context\r\n\r\nWith the additional support of `MultioutputWrapper` that I am working on (#1409) this becomes even more confusing, because a `MultioutputWrapper` is a single `Metric` and a user could be tempted to give a list of booleans as `maximize`.\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom copy import deepcopy\nfrom typing import Any, Dict, List, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import ModuleList\n\nfrom torchmetrics.collections import MetricCollection\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.prints import rank_zero_warn\n\n\nclass MetricTracker(ModuleList):\n \"\"\"A wrapper class that can help keeping track of a metric or metric collection over time and implement useful\n methods. The wrapper implements the standard ``.update()``, ``.compute()``, ``.reset()`` methods that just\n calls corresponding method of the currently tracked metric. However, the following additional methods are\n provided:\n\n -``MetricTracker.n_steps``: number of metrics being tracked\n -``MetricTracker.increment()``: initialize a new metric for being tracked\n -``MetricTracker.compute_all()``: get the metric value for all steps\n -``MetricTracker.best_metric()``: returns the best value\n\n Args:\n metric: instance of a ``torchmetrics.Metric`` or ``torchmetrics.MetricCollection``\n to keep track of at each timestep.\n maximize: either single bool or list of bool indicating if higher metric values are\n better (``True``) or lower is better (``False``).\n\n Example (single metric):\n >>> from torchmetrics import MetricTracker\n >>> from torchmetrics.classification import MulticlassAccuracy\n >>> _ = torch.manual_seed(42)\n >>> tracker = MetricTracker(MulticlassAccuracy(num_classes=10, average='micro'))\n >>> for epoch in range(5):\n ... tracker.increment()\n ... for batch_idx in range(5):\n ... preds, target = torch.randint(10, (100,)), torch.randint(10, (100,))\n ... tracker.update(preds, target)\n ... print(f\"current acc={tracker.compute()}\")\n current acc=0.1120000034570694\n current acc=0.08799999952316284\n current acc=0.12600000202655792\n current acc=0.07999999821186066\n current acc=0.10199999809265137\n >>> best_acc, which_epoch = tracker.best_metric(return_step=True)\n >>> best_acc # doctest: +ELLIPSIS\n 0.1260...\n >>> which_epoch\n 2\n >>> tracker.compute_all()\n tensor([0.1120, 0.0880, 0.1260, 0.0800, 0.1020])\n\n Example (multiple metrics using MetricCollection):\n >>> from torchmetrics import MetricTracker, MetricCollection, MeanSquaredError, ExplainedVariance\n >>> _ = torch.manual_seed(42)\n >>> tracker = MetricTracker(MetricCollection([MeanSquaredError(), ExplainedVariance()]), maximize=[False, True])\n >>> for epoch in range(5):\n ... tracker.increment()\n ... for batch_idx in range(5):\n ... preds, target = torch.randn(100), torch.randn(100)\n ... tracker.update(preds, target)\n ... print(f\"current stats={tracker.compute()}\") # doctest: +NORMALIZE_WHITESPACE\n current stats={'MeanSquaredError': tensor(1.8218), 'ExplainedVariance': tensor(-0.8969)}\n current stats={'MeanSquaredError': tensor(2.0268), 'ExplainedVariance': tensor(-1.0206)}\n current stats={'MeanSquaredError': tensor(1.9491), 'ExplainedVariance': tensor(-0.8298)}\n current stats={'MeanSquaredError': tensor(1.9800), 'ExplainedVariance': tensor(-0.9199)}\n current stats={'MeanSquaredError': tensor(2.2481), 'ExplainedVariance': tensor(-1.1622)}\n >>> from pprint import pprint\n >>> best_res, which_epoch = tracker.best_metric(return_step=True)\n >>> pprint(best_res) # doctest: +ELLIPSIS\n {'ExplainedVariance': -0.829...,\n 'MeanSquaredError': 1.821...}\n >>> which_epoch\n {'MeanSquaredError': 0, 'ExplainedVariance': 2}\n >>> pprint(tracker.compute_all())\n {'ExplainedVariance': tensor([-0.8969, -1.0206, -0.8298, -0.9199, -1.1622]),\n 'MeanSquaredError': tensor([1.8218, 2.0268, 1.9491, 1.9800, 2.2481])}\n \"\"\"\n\n def __init__(self, metric: Union[Metric, MetricCollection], maximize: Union[bool, List[bool]] = True) -> None:\n super().__init__()\n if not isinstance(metric, (Metric, MetricCollection)):\n raise TypeError(\n \"Metric arg need to be an instance of a torchmetrics\"\n f\" `Metric` or `MetricCollection` but got {metric}\"\n )\n self._base_metric = metric\n if not isinstance(maximize, (bool, list)):\n raise ValueError(\"Argument `maximize` should either be a single bool or list of bool\")\n if isinstance(maximize, list) and isinstance(metric, MetricCollection) and len(maximize) != len(metric):\n raise ValueError(\"The len of argument `maximize` should match the length of the metric collection\")\n self.maximize = maximize\n\n self._increment_called = False\n\n @property\n def n_steps(self) -> int:\n \"\"\"Returns the number of times the tracker has been incremented.\"\"\"\n return len(self) - 1 # subtract the base metric\n\n def increment(self) -> None:\n \"\"\"Creates a new instance of the input metric that will be updated next.\"\"\"\n self._increment_called = True\n self.append(deepcopy(self._base_metric))\n\n def forward(self, *args, **kwargs) -> None: # type: ignore\n \"\"\"Calls forward of the current metric being tracked.\"\"\"\n self._check_for_increment(\"forward\")\n return self[-1](*args, **kwargs)\n\n def update(self, *args, **kwargs) -> None: # type: ignore\n \"\"\"Updates the current metric being tracked.\"\"\"\n self._check_for_increment(\"update\")\n self[-1].update(*args, **kwargs)\n\n def compute(self) -> Any:\n \"\"\"Call compute of the current metric being tracked.\"\"\"\n self._check_for_increment(\"compute\")\n return self[-1].compute()\n\n def compute_all(self) -> Tensor:\n \"\"\"Compute the metric value for all tracked metrics.\"\"\"\n self._check_for_increment(\"compute_all\")\n # The i!=0 accounts for the self._base_metric should be ignored\n res = [metric.compute() for i, metric in enumerate(self) if i != 0]\n if isinstance(self._base_metric, MetricCollection):\n keys = res[0].keys()\n return {k: torch.stack([r[k] for r in res], dim=0) for k in keys}\n return torch.stack(res, dim=0)\n\n def reset(self) -> None:\n \"\"\"Resets the current metric being tracked.\"\"\"\n self[-1].reset()\n\n def reset_all(self) -> None:\n \"\"\"Resets all metrics being tracked.\"\"\"\n for metric in self:\n metric.reset()\n\n def best_metric(\n self, return_step: bool = False\n ) -> Union[\n None,\n float,\n Tuple[int, float],\n Tuple[None, None],\n Dict[str, Union[float, None]],\n Tuple[Dict[str, Union[int, None]], Dict[str, Union[float, None]]],\n ]:\n \"\"\"Returns the highest metric out of all tracked.\n\n Args:\n return_step: If ``True`` will also return the step with the highest metric value.\n\n Returns:\n The best metric value, and optionally the time-step.\n \"\"\"\n if isinstance(self._base_metric, Metric):\n fn = torch.max if self.maximize else torch.min\n try:\n value, idx = fn(self.compute_all(), 0)\n if return_step:\n return value.item(), idx.item()\n return value.item()\n except ValueError as error:\n rank_zero_warn(\n f\"Encountered the following error when trying to get the best metric: {error}\"\n \"this is probably due to the 'best' not being defined for this metric.\"\n \"Returning `None` instead.\",\n UserWarning,\n )\n if return_step:\n return None, None\n return None\n\n else: # this is a metric collection\n res = self.compute_all()\n maximize = self.maximize if isinstance(self.maximize, list) else len(res) * [self.maximize]\n value, idx = {}, {}\n for i, (k, v) in enumerate(res.items()):\n try:\n fn = torch.max if maximize[i] else torch.min\n out = fn(v, 0)\n value[k], idx[k] = out[0].item(), out[1].item()\n except ValueError as error:\n rank_zero_warn(\n f\"Encountered the following error when trying to get the best metric for metric {k}:\"\n f\"{error} this is probably due to the 'best' not being defined for this metric.\"\n \"Returning `None` instead.\",\n UserWarning,\n )\n value[k], idx[k] = None, None\n\n if return_step:\n return value, idx\n return value\n\n def _check_for_increment(self, method: str) -> None:\n if not self._increment_called:\n raise ValueError(f\"`{method}` cannot be called before `.increment()` has been called\")\n", "path": "src/torchmetrics/wrappers/tracker.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom copy import deepcopy\nfrom typing import Any, Dict, List, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import ModuleList\n\nfrom torchmetrics.collections import MetricCollection\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.prints import rank_zero_warn\n\n\nclass MetricTracker(ModuleList):\n \"\"\"A wrapper class that can help keeping track of a metric or metric collection over time and implement useful\n methods. The wrapper implements the standard ``.update()``, ``.compute()``, ``.reset()`` methods that just\n calls corresponding method of the currently tracked metric. However, the following additional methods are\n provided:\n\n -``MetricTracker.n_steps``: number of metrics being tracked\n -``MetricTracker.increment()``: initialize a new metric for being tracked\n -``MetricTracker.compute_all()``: get the metric value for all steps\n -``MetricTracker.best_metric()``: returns the best value\n\n Args:\n metric: instance of a ``torchmetrics.Metric`` or ``torchmetrics.MetricCollection``\n to keep track of at each timestep.\n maximize: either single bool or list of bool indicating if higher metric values are\n better (``True``) or lower is better (``False``).\n\n Example (single metric):\n >>> from torchmetrics import MetricTracker\n >>> from torchmetrics.classification import MulticlassAccuracy\n >>> _ = torch.manual_seed(42)\n >>> tracker = MetricTracker(MulticlassAccuracy(num_classes=10, average='micro'))\n >>> for epoch in range(5):\n ... tracker.increment()\n ... for batch_idx in range(5):\n ... preds, target = torch.randint(10, (100,)), torch.randint(10, (100,))\n ... tracker.update(preds, target)\n ... print(f\"current acc={tracker.compute()}\")\n current acc=0.1120000034570694\n current acc=0.08799999952316284\n current acc=0.12600000202655792\n current acc=0.07999999821186066\n current acc=0.10199999809265137\n >>> best_acc, which_epoch = tracker.best_metric(return_step=True)\n >>> best_acc # doctest: +ELLIPSIS\n 0.1260...\n >>> which_epoch\n 2\n >>> tracker.compute_all()\n tensor([0.1120, 0.0880, 0.1260, 0.0800, 0.1020])\n\n Example (multiple metrics using MetricCollection):\n >>> from torchmetrics import MetricTracker, MetricCollection, MeanSquaredError, ExplainedVariance\n >>> _ = torch.manual_seed(42)\n >>> tracker = MetricTracker(MetricCollection([MeanSquaredError(), ExplainedVariance()]), maximize=[False, True])\n >>> for epoch in range(5):\n ... tracker.increment()\n ... for batch_idx in range(5):\n ... preds, target = torch.randn(100), torch.randn(100)\n ... tracker.update(preds, target)\n ... print(f\"current stats={tracker.compute()}\") # doctest: +NORMALIZE_WHITESPACE\n current stats={'MeanSquaredError': tensor(1.8218), 'ExplainedVariance': tensor(-0.8969)}\n current stats={'MeanSquaredError': tensor(2.0268), 'ExplainedVariance': tensor(-1.0206)}\n current stats={'MeanSquaredError': tensor(1.9491), 'ExplainedVariance': tensor(-0.8298)}\n current stats={'MeanSquaredError': tensor(1.9800), 'ExplainedVariance': tensor(-0.9199)}\n current stats={'MeanSquaredError': tensor(2.2481), 'ExplainedVariance': tensor(-1.1622)}\n >>> from pprint import pprint\n >>> best_res, which_epoch = tracker.best_metric(return_step=True)\n >>> pprint(best_res) # doctest: +ELLIPSIS\n {'ExplainedVariance': -0.829...,\n 'MeanSquaredError': 1.821...}\n >>> which_epoch\n {'MeanSquaredError': 0, 'ExplainedVariance': 2}\n >>> pprint(tracker.compute_all())\n {'ExplainedVariance': tensor([-0.8969, -1.0206, -0.8298, -0.9199, -1.1622]),\n 'MeanSquaredError': tensor([1.8218, 2.0268, 1.9491, 1.9800, 2.2481])}\n \"\"\"\n\n def __init__(self, metric: Union[Metric, MetricCollection], maximize: Union[bool, List[bool]] = True) -> None:\n super().__init__()\n if not isinstance(metric, (Metric, MetricCollection)):\n raise TypeError(\n \"Metric arg need to be an instance of a torchmetrics\"\n f\" `Metric` or `MetricCollection` but got {metric}\"\n )\n self._base_metric = metric\n if not isinstance(maximize, (bool, list)):\n raise ValueError(\"Argument `maximize` should either be a single bool or list of bool\")\n if isinstance(maximize, list) and isinstance(metric, MetricCollection) and len(maximize) != len(metric):\n raise ValueError(\"The len of argument `maximize` should match the length of the metric collection\")\n if isinstance(metric, Metric) and not isinstance(maximize, bool):\n raise ValueError(\"Argument `maximize` should be a single bool when `metric` is a single Metric\")\n self.maximize = maximize\n\n self._increment_called = False\n\n @property\n def n_steps(self) -> int:\n \"\"\"Returns the number of times the tracker has been incremented.\"\"\"\n return len(self) - 1 # subtract the base metric\n\n def increment(self) -> None:\n \"\"\"Creates a new instance of the input metric that will be updated next.\"\"\"\n self._increment_called = True\n self.append(deepcopy(self._base_metric))\n\n def forward(self, *args, **kwargs) -> None: # type: ignore\n \"\"\"Calls forward of the current metric being tracked.\"\"\"\n self._check_for_increment(\"forward\")\n return self[-1](*args, **kwargs)\n\n def update(self, *args, **kwargs) -> None: # type: ignore\n \"\"\"Updates the current metric being tracked.\"\"\"\n self._check_for_increment(\"update\")\n self[-1].update(*args, **kwargs)\n\n def compute(self) -> Any:\n \"\"\"Call compute of the current metric being tracked.\"\"\"\n self._check_for_increment(\"compute\")\n return self[-1].compute()\n\n def compute_all(self) -> Tensor:\n \"\"\"Compute the metric value for all tracked metrics.\"\"\"\n self._check_for_increment(\"compute_all\")\n # The i!=0 accounts for the self._base_metric should be ignored\n res = [metric.compute() for i, metric in enumerate(self) if i != 0]\n if isinstance(self._base_metric, MetricCollection):\n keys = res[0].keys()\n return {k: torch.stack([r[k] for r in res], dim=0) for k in keys}\n return torch.stack(res, dim=0)\n\n def reset(self) -> None:\n \"\"\"Resets the current metric being tracked.\"\"\"\n self[-1].reset()\n\n def reset_all(self) -> None:\n \"\"\"Resets all metrics being tracked.\"\"\"\n for metric in self:\n metric.reset()\n\n def best_metric(\n self, return_step: bool = False\n ) -> Union[\n None,\n float,\n Tuple[int, float],\n Tuple[None, None],\n Dict[str, Union[float, None]],\n Tuple[Dict[str, Union[int, None]], Dict[str, Union[float, None]]],\n ]:\n \"\"\"Returns the highest metric out of all tracked.\n\n Args:\n return_step: If ``True`` will also return the step with the highest metric value.\n\n Returns:\n The best metric value, and optionally the time-step.\n \"\"\"\n if isinstance(self._base_metric, Metric):\n fn = torch.max if self.maximize else torch.min\n try:\n value, idx = fn(self.compute_all(), 0)\n if return_step:\n return value.item(), idx.item()\n return value.item()\n except ValueError as error:\n rank_zero_warn(\n f\"Encountered the following error when trying to get the best metric: {error}\"\n \"this is probably due to the 'best' not being defined for this metric.\"\n \"Returning `None` instead.\",\n UserWarning,\n )\n if return_step:\n return None, None\n return None\n\n else: # this is a metric collection\n res = self.compute_all()\n maximize = self.maximize if isinstance(self.maximize, list) else len(res) * [self.maximize]\n value, idx = {}, {}\n for i, (k, v) in enumerate(res.items()):\n try:\n fn = torch.max if maximize[i] else torch.min\n out = fn(v, 0)\n value[k], idx[k] = out[0].item(), out[1].item()\n except ValueError as error:\n rank_zero_warn(\n f\"Encountered the following error when trying to get the best metric for metric {k}:\"\n f\"{error} this is probably due to the 'best' not being defined for this metric.\"\n \"Returning `None` instead.\",\n UserWarning,\n )\n value[k], idx[k] = None, None\n\n if return_step:\n return value, idx\n return value\n\n def _check_for_increment(self, method: str) -> None:\n if not self._increment_called:\n raise ValueError(f\"`{method}` cannot be called before `.increment()` has been called\")\n", "path": "src/torchmetrics/wrappers/tracker.py"}]} | 3,763 | 186 |
gh_patches_debug_3818 | rasdani/github-patches | git_diff | pre-commit__pre-commit-2384 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ruby hooks cannot be installed when `gem` is globally configured with `--user-install`
### search tried in the issue tracker
gem, ruby
### describe your issue
On Arch Linux, `gem` is configured to always use `--user-install` via a global option set in `/etc/gemrc`: https://github.com/archlinux/svntogit-packages/blob/7a52ba210a99eee8df25ad27aa5700bc1f3f10d0/trunk/gemrc
This breaks Ruby tool installations for `pre-commit`, because it makes `gem` ignore `GEM_HOME`.
In a fresh `git` repo:
```console
$ ls -A
.git .pre-commit-config.yaml t.rb
$ file t.rb
t.rb: empty
$ pre-commit clean
[...]
$ pre-commit run rubocop -a
[INFO] Initializing environment for https://github.com/rubocop/rubocop.
[INFO] Installing environment for https://github.com/rubocop/rubocop.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
rubocop..................................................................Failed
- hook id: rubocop
- exit code: 1
Executable `rubocop` not found
$
```
This can also be reproduced in a fresh docker container using `archlinux:latest` and installing `python-virtualenv python-pip ruby git`.
Adding `--norc` (or `--no-install-user`) to the `gem install` invocation here https://github.com/pre-commit/pre-commit/blob/cc9d950601cd3eba27e8395a7edcd455262705d9/pre_commit/languages/ruby.py#L136-L143 fixes the issue for me (though I haven’t written any tests).
### pre-commit --version
pre-commit 2.19.0
### .pre-commit-config.yaml
```yaml
repos:
- repo: https://github.com/rubocop/rubocop
rev: v1.29.0
hooks:
- id: rubocop
```
### ~/.cache/pre-commit/pre-commit.log (if present)
not present
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/ruby.py`
Content:
```
1 from __future__ import annotations
2
3 import contextlib
4 import functools
5 import os.path
6 import shutil
7 import tarfile
8 from typing import Generator
9 from typing import Sequence
10
11 import pre_commit.constants as C
12 from pre_commit.envcontext import envcontext
13 from pre_commit.envcontext import PatchesT
14 from pre_commit.envcontext import UNSET
15 from pre_commit.envcontext import Var
16 from pre_commit.hook import Hook
17 from pre_commit.languages import helpers
18 from pre_commit.prefix import Prefix
19 from pre_commit.util import CalledProcessError
20 from pre_commit.util import clean_path_on_failure
21 from pre_commit.util import resource_bytesio
22
23 ENVIRONMENT_DIR = 'rbenv'
24 health_check = helpers.basic_health_check
25
26
27 @functools.lru_cache(maxsize=1)
28 def get_default_version() -> str:
29 if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):
30 return 'system'
31 else:
32 return C.DEFAULT
33
34
35 def get_env_patch(
36 venv: str,
37 language_version: str,
38 ) -> PatchesT:
39 patches: PatchesT = (
40 ('GEM_HOME', os.path.join(venv, 'gems')),
41 ('GEM_PATH', UNSET),
42 ('BUNDLE_IGNORE_CONFIG', '1'),
43 )
44 if language_version == 'system':
45 patches += (
46 (
47 'PATH', (
48 os.path.join(venv, 'gems', 'bin'), os.pathsep,
49 Var('PATH'),
50 ),
51 ),
52 )
53 else: # pragma: win32 no cover
54 patches += (
55 ('RBENV_ROOT', venv),
56 (
57 'PATH', (
58 os.path.join(venv, 'gems', 'bin'), os.pathsep,
59 os.path.join(venv, 'shims'), os.pathsep,
60 os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),
61 ),
62 ),
63 )
64 if language_version not in {'system', 'default'}: # pragma: win32 no cover
65 patches += (('RBENV_VERSION', language_version),)
66
67 return patches
68
69
70 @contextlib.contextmanager
71 def in_env(
72 prefix: Prefix,
73 language_version: str,
74 ) -> Generator[None, None, None]:
75 envdir = prefix.path(
76 helpers.environment_dir(ENVIRONMENT_DIR, language_version),
77 )
78 with envcontext(get_env_patch(envdir, language_version)):
79 yield
80
81
82 def _extract_resource(filename: str, dest: str) -> None:
83 with resource_bytesio(filename) as bio:
84 with tarfile.open(fileobj=bio) as tf:
85 tf.extractall(dest)
86
87
88 def _install_rbenv(
89 prefix: Prefix,
90 version: str,
91 ) -> None: # pragma: win32 no cover
92 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
93
94 _extract_resource('rbenv.tar.gz', prefix.path('.'))
95 shutil.move(prefix.path('rbenv'), prefix.path(directory))
96
97 # Only install ruby-build if the version is specified
98 if version != C.DEFAULT:
99 plugins_dir = prefix.path(directory, 'plugins')
100 _extract_resource('ruby-download.tar.gz', plugins_dir)
101 _extract_resource('ruby-build.tar.gz', plugins_dir)
102
103
104 def _install_ruby(
105 prefix: Prefix,
106 version: str,
107 ) -> None: # pragma: win32 no cover
108 try:
109 helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))
110 except CalledProcessError: # pragma: no cover (usually find with download)
111 # Failed to download from mirror for some reason, build it instead
112 helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))
113
114
115 def install_environment(
116 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
117 ) -> None:
118 additional_dependencies = tuple(additional_dependencies)
119 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
120 with clean_path_on_failure(prefix.path(directory)):
121 if version != 'system': # pragma: win32 no cover
122 _install_rbenv(prefix, version)
123 with in_env(prefix, version):
124 # Need to call this before installing so rbenv's directories
125 # are set up
126 helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))
127 if version != C.DEFAULT:
128 _install_ruby(prefix, version)
129 # Need to call this after installing to set up the shims
130 helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))
131
132 with in_env(prefix, version):
133 helpers.run_setup_cmd(
134 prefix, ('gem', 'build', *prefix.star('.gemspec')),
135 )
136 helpers.run_setup_cmd(
137 prefix,
138 (
139 'gem', 'install',
140 '--no-document', '--no-format-executable',
141 *prefix.star('.gem'), *additional_dependencies,
142 ),
143 )
144
145
146 def run_hook(
147 hook: Hook,
148 file_args: Sequence[str],
149 color: bool,
150 ) -> tuple[int, bytes]:
151 with in_env(hook.prefix, hook.language_version):
152 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py
--- a/pre_commit/languages/ruby.py
+++ b/pre_commit/languages/ruby.py
@@ -138,6 +138,7 @@
(
'gem', 'install',
'--no-document', '--no-format-executable',
+ '--no-user-install',
*prefix.star('.gem'), *additional_dependencies,
),
)
| {"golden_diff": "diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py\n--- a/pre_commit/languages/ruby.py\n+++ b/pre_commit/languages/ruby.py\n@@ -138,6 +138,7 @@\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n+ '--no-user-install',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n", "issue": "Ruby hooks cannot be installed when `gem` is globally configured with `--user-install`\n### search tried in the issue tracker\n\ngem, ruby\n\n### describe your issue\n\nOn Arch Linux, `gem` is configured to always use `--user-install` via a global option set in `/etc/gemrc`: https://github.com/archlinux/svntogit-packages/blob/7a52ba210a99eee8df25ad27aa5700bc1f3f10d0/trunk/gemrc\r\n\r\nThis breaks Ruby tool installations for `pre-commit`, because it makes `gem` ignore `GEM_HOME`.\r\n\r\nIn a fresh `git` repo:\r\n```console\r\n$ ls -A\r\n.git .pre-commit-config.yaml t.rb\r\n$ file t.rb\r\nt.rb: empty\r\n$ pre-commit clean\r\n[...]\r\n$ pre-commit run rubocop -a\r\n[INFO] Initializing environment for https://github.com/rubocop/rubocop.\r\n[INFO] Installing environment for https://github.com/rubocop/rubocop.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nrubocop..................................................................Failed\r\n- hook id: rubocop\r\n- exit code: 1\r\n\r\nExecutable `rubocop` not found\r\n\r\n$\r\n```\r\n\r\nThis can also be reproduced in a fresh docker container using `archlinux:latest` and installing `python-virtualenv python-pip ruby git`.\r\n\r\nAdding `--norc` (or `--no-install-user`) to the `gem install` invocation here https://github.com/pre-commit/pre-commit/blob/cc9d950601cd3eba27e8395a7edcd455262705d9/pre_commit/languages/ruby.py#L136-L143 fixes the issue for me (though I haven\u2019t written any tests).\n\n### pre-commit --version\n\npre-commit 2.19.0\n\n### .pre-commit-config.yaml\n\n```yaml\nrepos:\r\n - repo: https://github.com/rubocop/rubocop\r\n rev: v1.29.0\r\n hooks:\r\n - id: rubocop\n```\n\n\n### ~/.cache/pre-commit/pre-commit.log (if present)\n\nnot present\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport functools\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import resource_bytesio\n\nENVIRONMENT_DIR = 'rbenv'\nhealth_check = helpers.basic_health_check\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n if language_version not in {'system', 'default'}: # pragma: win32 no cover\n patches += (('RBENV_VERSION', language_version),)\n\n return patches\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir, language_version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), prefix.path(directory))\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = prefix.path(directory, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n with clean_path_on_failure(prefix.path(directory)):\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n if version != C.DEFAULT:\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n helpers.run_setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/ruby.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport functools\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import resource_bytesio\n\nENVIRONMENT_DIR = 'rbenv'\nhealth_check = helpers.basic_health_check\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n if language_version not in {'system', 'default'}: # pragma: win32 no cover\n patches += (('RBENV_VERSION', language_version),)\n\n return patches\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir, language_version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), prefix.path(directory))\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = prefix.path(directory, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n with clean_path_on_failure(prefix.path(directory)):\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n if version != C.DEFAULT:\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n helpers.run_setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n '--no-user-install',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/ruby.py"}]} | 2,205 | 96 |
gh_patches_debug_40002 | rasdani/github-patches | git_diff | carpentries__amy-2211 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Instructor Selection: Additional filter/sort options for Admin view
On the Instructor Selection [admin view page](https://test-amy.carpentries.org/recruitment/processes/), the admin user can filter by assigned to and by status (open/closed).
We would like to see the following additional options:
* Filter by Online/in-person
* Sort by Priority ascending and descending
* Sort by date ascending and descending
* Filter by curriculum
* Filter by country
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/recruitment/filters.py`
Content:
```
1 import django_filters
2
3 from workshops.fields import ModelSelect2Widget
4 from workshops.filters import AMYFilterSet
5 from workshops.forms import SELECT2_SIDEBAR
6 from workshops.models import Person
7
8 from .models import InstructorRecruitment
9
10
11 class InstructorRecruitmentFilter(AMYFilterSet):
12 assigned_to = django_filters.ModelChoiceFilter(
13 queryset=Person.objects.all(),
14 widget=ModelSelect2Widget(data_view="admin-lookup", attrs=SELECT2_SIDEBAR),
15 )
16
17 class Meta:
18 model = InstructorRecruitment
19 fields = [
20 "assigned_to",
21 "status",
22 ]
23
```
Path: `amy/dashboard/filters.py`
Content:
```
1 from django.db.models import F, QuerySet
2 from django.forms import widgets
3 import django_filters as filters
4
5 from recruitment.models import InstructorRecruitment
6 from workshops.filters import AMYFilterSet
7
8
9 class UpcomingTeachingOpportunitiesFilter(AMYFilterSet):
10 status = filters.ChoiceFilter(
11 choices=(
12 ("online", "Online only"),
13 ("inperson", "Inperson only"),
14 ),
15 empty_label="Any",
16 label="Online/inperson",
17 method="filter_status",
18 )
19
20 only_applied_to = filters.BooleanFilter(
21 label="Show only workshops I have applied to",
22 method="filter_application_only",
23 widget=widgets.CheckboxInput,
24 )
25
26 order_by = filters.OrderingFilter(
27 fields=("event__start",),
28 choices=(
29 ("-calculated_priority", "Priority"),
30 ("event__start", "Event start"),
31 ("-event__start", "Event start (descending)"),
32 ("proximity", "Closer to my airport"),
33 ("-proximity", "Further away from my airport"),
34 ),
35 method="filter_order_by",
36 )
37
38 class Meta:
39 model = InstructorRecruitment
40 fields = [
41 "status",
42 ]
43
44 def filter_status(self, queryset: QuerySet, name: str, value: str) -> QuerySet:
45 """Filter recruitments based on the event (online/inperson) status."""
46 if value == "online":
47 return queryset.filter(event__tags__name="online")
48 elif value == "inperson":
49 return queryset.exclude(event__tags__name="online")
50 else:
51 return queryset
52
53 def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:
54 """Order entries by proximity to user's airport."""
55 try:
56 latitude: float = self.request.user.airport.latitude
57 except AttributeError:
58 latitude = 0.0
59
60 try:
61 longitude: float = self.request.user.airport.longitude
62 except AttributeError:
63 longitude = 0.0
64
65 # `0.0` is neutral element for this equation, so even if user doesn't have the
66 # airport specified, the sorting should still work
67 distance = (F("event__latitude") - latitude) ** 2.0 + (
68 F("event__longitude") - longitude
69 ) ** 2.0
70
71 if values == ["proximity"]:
72 return queryset.annotate(distance=distance).order_by("distance")
73 elif values == ["-proximity"]:
74 return queryset.annotate(distance=distance).order_by("-distance")
75 else:
76 return queryset.order_by(*values)
77
78 def filter_application_only(
79 self, queryset: QuerySet, name: str, value: bool
80 ) -> QuerySet:
81 if value:
82 return queryset.filter(signups__person=self.request.user)
83
84 return queryset
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/amy/dashboard/filters.py b/amy/dashboard/filters.py
--- a/amy/dashboard/filters.py
+++ b/amy/dashboard/filters.py
@@ -3,7 +3,9 @@
import django_filters as filters
from recruitment.models import InstructorRecruitment
-from workshops.filters import AMYFilterSet
+from workshops.fields import Select2MultipleWidget
+from workshops.filters import AllCountriesMultipleFilter, AMYFilterSet
+from workshops.models import Curriculum
class UpcomingTeachingOpportunitiesFilter(AMYFilterSet):
@@ -23,6 +25,17 @@
widget=widgets.CheckboxInput,
)
+ country = AllCountriesMultipleFilter(
+ field_name="event__country", widget=Select2MultipleWidget
+ )
+
+ curricula = filters.ModelMultipleChoiceFilter(
+ field_name="event__curricula",
+ queryset=Curriculum.objects.all(),
+ label="Curriculum",
+ widget=Select2MultipleWidget(),
+ )
+
order_by = filters.OrderingFilter(
fields=("event__start",),
choices=(
diff --git a/amy/recruitment/filters.py b/amy/recruitment/filters.py
--- a/amy/recruitment/filters.py
+++ b/amy/recruitment/filters.py
@@ -1,22 +1,68 @@
-import django_filters
+from django.db.models import QuerySet
+import django_filters as filters
-from workshops.fields import ModelSelect2Widget
-from workshops.filters import AMYFilterSet
+from workshops.fields import ModelSelect2Widget, Select2MultipleWidget
+from workshops.filters import AllCountriesMultipleFilter, AMYFilterSet
from workshops.forms import SELECT2_SIDEBAR
-from workshops.models import Person
+from workshops.models import Curriculum, Person
from .models import InstructorRecruitment
class InstructorRecruitmentFilter(AMYFilterSet):
- assigned_to = django_filters.ModelChoiceFilter(
+ assigned_to = filters.ModelChoiceFilter(
queryset=Person.objects.all(),
widget=ModelSelect2Widget(data_view="admin-lookup", attrs=SELECT2_SIDEBAR),
)
+ online_inperson = filters.ChoiceFilter(
+ choices=(
+ ("online", "Online only"),
+ ("inperson", "Inperson only"),
+ ),
+ empty_label="Any",
+ label="Online/inperson",
+ method="filter_online_inperson",
+ )
+
+ country = AllCountriesMultipleFilter(
+ field_name="event__country", widget=Select2MultipleWidget
+ )
+
+ curricula = filters.ModelMultipleChoiceFilter(
+ field_name="event__curricula",
+ queryset=Curriculum.objects.all(),
+ label="Curriculum",
+ widget=Select2MultipleWidget(),
+ )
+
+ order_by = filters.OrderingFilter(
+ fields=("event__start",),
+ choices=(
+ ("-calculated_priority", "Priority"),
+ ("event__start", "Event start"),
+ ("-event__start", "Event start (descending)"),
+ ),
+ method="filter_order_by",
+ )
+
class Meta:
model = InstructorRecruitment
fields = [
"assigned_to",
"status",
]
+
+ def filter_online_inperson(
+ self, queryset: QuerySet, name: str, value: str
+ ) -> QuerySet:
+ """Filter recruitments based on the event (online/inperson) status."""
+ if value == "online":
+ return queryset.filter(event__tags__name="online")
+ elif value == "inperson":
+ return queryset.exclude(event__tags__name="online")
+ else:
+ return queryset
+
+ def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:
+ return queryset.order_by(*values)
| {"golden_diff": "diff --git a/amy/dashboard/filters.py b/amy/dashboard/filters.py\n--- a/amy/dashboard/filters.py\n+++ b/amy/dashboard/filters.py\n@@ -3,7 +3,9 @@\n import django_filters as filters\n \n from recruitment.models import InstructorRecruitment\n-from workshops.filters import AMYFilterSet\n+from workshops.fields import Select2MultipleWidget\n+from workshops.filters import AllCountriesMultipleFilter, AMYFilterSet\n+from workshops.models import Curriculum\n \n \n class UpcomingTeachingOpportunitiesFilter(AMYFilterSet):\n@@ -23,6 +25,17 @@\n widget=widgets.CheckboxInput,\n )\n \n+ country = AllCountriesMultipleFilter(\n+ field_name=\"event__country\", widget=Select2MultipleWidget\n+ )\n+\n+ curricula = filters.ModelMultipleChoiceFilter(\n+ field_name=\"event__curricula\",\n+ queryset=Curriculum.objects.all(),\n+ label=\"Curriculum\",\n+ widget=Select2MultipleWidget(),\n+ )\n+\n order_by = filters.OrderingFilter(\n fields=(\"event__start\",),\n choices=(\ndiff --git a/amy/recruitment/filters.py b/amy/recruitment/filters.py\n--- a/amy/recruitment/filters.py\n+++ b/amy/recruitment/filters.py\n@@ -1,22 +1,68 @@\n-import django_filters\n+from django.db.models import QuerySet\n+import django_filters as filters\n \n-from workshops.fields import ModelSelect2Widget\n-from workshops.filters import AMYFilterSet\n+from workshops.fields import ModelSelect2Widget, Select2MultipleWidget\n+from workshops.filters import AllCountriesMultipleFilter, AMYFilterSet\n from workshops.forms import SELECT2_SIDEBAR\n-from workshops.models import Person\n+from workshops.models import Curriculum, Person\n \n from .models import InstructorRecruitment\n \n \n class InstructorRecruitmentFilter(AMYFilterSet):\n- assigned_to = django_filters.ModelChoiceFilter(\n+ assigned_to = filters.ModelChoiceFilter(\n queryset=Person.objects.all(),\n widget=ModelSelect2Widget(data_view=\"admin-lookup\", attrs=SELECT2_SIDEBAR),\n )\n \n+ online_inperson = filters.ChoiceFilter(\n+ choices=(\n+ (\"online\", \"Online only\"),\n+ (\"inperson\", \"Inperson only\"),\n+ ),\n+ empty_label=\"Any\",\n+ label=\"Online/inperson\",\n+ method=\"filter_online_inperson\",\n+ )\n+\n+ country = AllCountriesMultipleFilter(\n+ field_name=\"event__country\", widget=Select2MultipleWidget\n+ )\n+\n+ curricula = filters.ModelMultipleChoiceFilter(\n+ field_name=\"event__curricula\",\n+ queryset=Curriculum.objects.all(),\n+ label=\"Curriculum\",\n+ widget=Select2MultipleWidget(),\n+ )\n+\n+ order_by = filters.OrderingFilter(\n+ fields=(\"event__start\",),\n+ choices=(\n+ (\"-calculated_priority\", \"Priority\"),\n+ (\"event__start\", \"Event start\"),\n+ (\"-event__start\", \"Event start (descending)\"),\n+ ),\n+ method=\"filter_order_by\",\n+ )\n+\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"assigned_to\",\n \"status\",\n ]\n+\n+ def filter_online_inperson(\n+ self, queryset: QuerySet, name: str, value: str\n+ ) -> QuerySet:\n+ \"\"\"Filter recruitments based on the event (online/inperson) status.\"\"\"\n+ if value == \"online\":\n+ return queryset.filter(event__tags__name=\"online\")\n+ elif value == \"inperson\":\n+ return queryset.exclude(event__tags__name=\"online\")\n+ else:\n+ return queryset\n+\n+ def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:\n+ return queryset.order_by(*values)\n", "issue": "Instructor Selection: Additional filter/sort options for Admin view \nOn the Instructor Selection [admin view page](https://test-amy.carpentries.org/recruitment/processes/), the admin user can filter by assigned to and by status (open/closed).\r\n\r\nWe would like to see the following additional options:\r\n\r\n* Filter by Online/in-person\r\n* Sort by Priority ascending and descending\r\n* Sort by date ascending and descending \r\n* Filter by curriculum\r\n* Filter by country \r\n\n", "before_files": [{"content": "import django_filters\n\nfrom workshops.fields import ModelSelect2Widget\nfrom workshops.filters import AMYFilterSet\nfrom workshops.forms import SELECT2_SIDEBAR\nfrom workshops.models import Person\n\nfrom .models import InstructorRecruitment\n\n\nclass InstructorRecruitmentFilter(AMYFilterSet):\n assigned_to = django_filters.ModelChoiceFilter(\n queryset=Person.objects.all(),\n widget=ModelSelect2Widget(data_view=\"admin-lookup\", attrs=SELECT2_SIDEBAR),\n )\n\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"assigned_to\",\n \"status\",\n ]\n", "path": "amy/recruitment/filters.py"}, {"content": "from django.db.models import F, QuerySet\nfrom django.forms import widgets\nimport django_filters as filters\n\nfrom recruitment.models import InstructorRecruitment\nfrom workshops.filters import AMYFilterSet\n\n\nclass UpcomingTeachingOpportunitiesFilter(AMYFilterSet):\n status = filters.ChoiceFilter(\n choices=(\n (\"online\", \"Online only\"),\n (\"inperson\", \"Inperson only\"),\n ),\n empty_label=\"Any\",\n label=\"Online/inperson\",\n method=\"filter_status\",\n )\n\n only_applied_to = filters.BooleanFilter(\n label=\"Show only workshops I have applied to\",\n method=\"filter_application_only\",\n widget=widgets.CheckboxInput,\n )\n\n order_by = filters.OrderingFilter(\n fields=(\"event__start\",),\n choices=(\n (\"-calculated_priority\", \"Priority\"),\n (\"event__start\", \"Event start\"),\n (\"-event__start\", \"Event start (descending)\"),\n (\"proximity\", \"Closer to my airport\"),\n (\"-proximity\", \"Further away from my airport\"),\n ),\n method=\"filter_order_by\",\n )\n\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"status\",\n ]\n\n def filter_status(self, queryset: QuerySet, name: str, value: str) -> QuerySet:\n \"\"\"Filter recruitments based on the event (online/inperson) status.\"\"\"\n if value == \"online\":\n return queryset.filter(event__tags__name=\"online\")\n elif value == \"inperson\":\n return queryset.exclude(event__tags__name=\"online\")\n else:\n return queryset\n\n def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:\n \"\"\"Order entries by proximity to user's airport.\"\"\"\n try:\n latitude: float = self.request.user.airport.latitude\n except AttributeError:\n latitude = 0.0\n\n try:\n longitude: float = self.request.user.airport.longitude\n except AttributeError:\n longitude = 0.0\n\n # `0.0` is neutral element for this equation, so even if user doesn't have the\n # airport specified, the sorting should still work\n distance = (F(\"event__latitude\") - latitude) ** 2.0 + (\n F(\"event__longitude\") - longitude\n ) ** 2.0\n\n if values == [\"proximity\"]:\n return queryset.annotate(distance=distance).order_by(\"distance\")\n elif values == [\"-proximity\"]:\n return queryset.annotate(distance=distance).order_by(\"-distance\")\n else:\n return queryset.order_by(*values)\n\n def filter_application_only(\n self, queryset: QuerySet, name: str, value: bool\n ) -> QuerySet:\n if value:\n return queryset.filter(signups__person=self.request.user)\n\n return queryset\n", "path": "amy/dashboard/filters.py"}], "after_files": [{"content": "from django.db.models import QuerySet\nimport django_filters as filters\n\nfrom workshops.fields import ModelSelect2Widget, Select2MultipleWidget\nfrom workshops.filters import AllCountriesMultipleFilter, AMYFilterSet\nfrom workshops.forms import SELECT2_SIDEBAR\nfrom workshops.models import Curriculum, Person\n\nfrom .models import InstructorRecruitment\n\n\nclass InstructorRecruitmentFilter(AMYFilterSet):\n assigned_to = filters.ModelChoiceFilter(\n queryset=Person.objects.all(),\n widget=ModelSelect2Widget(data_view=\"admin-lookup\", attrs=SELECT2_SIDEBAR),\n )\n\n online_inperson = filters.ChoiceFilter(\n choices=(\n (\"online\", \"Online only\"),\n (\"inperson\", \"Inperson only\"),\n ),\n empty_label=\"Any\",\n label=\"Online/inperson\",\n method=\"filter_online_inperson\",\n )\n\n country = AllCountriesMultipleFilter(\n field_name=\"event__country\", widget=Select2MultipleWidget\n )\n\n curricula = filters.ModelMultipleChoiceFilter(\n field_name=\"event__curricula\",\n queryset=Curriculum.objects.all(),\n label=\"Curriculum\",\n widget=Select2MultipleWidget(),\n )\n\n order_by = filters.OrderingFilter(\n fields=(\"event__start\",),\n choices=(\n (\"-calculated_priority\", \"Priority\"),\n (\"event__start\", \"Event start\"),\n (\"-event__start\", \"Event start (descending)\"),\n ),\n method=\"filter_order_by\",\n )\n\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"assigned_to\",\n \"status\",\n ]\n\n def filter_online_inperson(\n self, queryset: QuerySet, name: str, value: str\n ) -> QuerySet:\n \"\"\"Filter recruitments based on the event (online/inperson) status.\"\"\"\n if value == \"online\":\n return queryset.filter(event__tags__name=\"online\")\n elif value == \"inperson\":\n return queryset.exclude(event__tags__name=\"online\")\n else:\n return queryset\n\n def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:\n return queryset.order_by(*values)\n", "path": "amy/recruitment/filters.py"}, {"content": "from django.db.models import F, QuerySet\nfrom django.forms import widgets\nimport django_filters as filters\n\nfrom recruitment.models import InstructorRecruitment\nfrom workshops.fields import Select2MultipleWidget\nfrom workshops.filters import AllCountriesMultipleFilter, AMYFilterSet\nfrom workshops.models import Curriculum\n\n\nclass UpcomingTeachingOpportunitiesFilter(AMYFilterSet):\n status = filters.ChoiceFilter(\n choices=(\n (\"online\", \"Online only\"),\n (\"inperson\", \"Inperson only\"),\n ),\n empty_label=\"Any\",\n label=\"Online/inperson\",\n method=\"filter_status\",\n )\n\n only_applied_to = filters.BooleanFilter(\n label=\"Show only workshops I have applied to\",\n method=\"filter_application_only\",\n widget=widgets.CheckboxInput,\n )\n\n country = AllCountriesMultipleFilter(\n field_name=\"event__country\", widget=Select2MultipleWidget\n )\n\n curricula = filters.ModelMultipleChoiceFilter(\n field_name=\"event__curricula\",\n queryset=Curriculum.objects.all(),\n label=\"Curriculum\",\n widget=Select2MultipleWidget(),\n )\n\n order_by = filters.OrderingFilter(\n fields=(\"event__start\",),\n choices=(\n (\"-calculated_priority\", \"Priority\"),\n (\"event__start\", \"Event start\"),\n (\"-event__start\", \"Event start (descending)\"),\n (\"proximity\", \"Closer to my airport\"),\n (\"-proximity\", \"Further away from my airport\"),\n ),\n method=\"filter_order_by\",\n )\n\n class Meta:\n model = InstructorRecruitment\n fields = [\n \"status\",\n ]\n\n def filter_status(self, queryset: QuerySet, name: str, value: str) -> QuerySet:\n \"\"\"Filter recruitments based on the event (online/inperson) status.\"\"\"\n if value == \"online\":\n return queryset.filter(event__tags__name=\"online\")\n elif value == \"inperson\":\n return queryset.exclude(event__tags__name=\"online\")\n else:\n return queryset\n\n def filter_order_by(self, queryset: QuerySet, name: str, values: list) -> QuerySet:\n \"\"\"Order entries by proximity to user's airport.\"\"\"\n try:\n latitude: float = self.request.user.airport.latitude\n except AttributeError:\n latitude = 0.0\n\n try:\n longitude: float = self.request.user.airport.longitude\n except AttributeError:\n longitude = 0.0\n\n # `0.0` is neutral element for this equation, so even if user doesn't have the\n # airport specified, the sorting should still work\n distance = (F(\"event__latitude\") - latitude) ** 2.0 + (\n F(\"event__longitude\") - longitude\n ) ** 2.0\n\n if values == [\"proximity\"]:\n return queryset.annotate(distance=distance).order_by(\"distance\")\n elif values == [\"-proximity\"]:\n return queryset.annotate(distance=distance).order_by(\"-distance\")\n else:\n return queryset.order_by(*values)\n\n def filter_application_only(\n self, queryset: QuerySet, name: str, value: bool\n ) -> QuerySet:\n if value:\n return queryset.filter(signups__person=self.request.user)\n\n return queryset\n", "path": "amy/dashboard/filters.py"}]} | 1,313 | 837 |
gh_patches_debug_27330 | rasdani/github-patches | git_diff | PaddlePaddle__models-2986 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SSD Inconsistent function call
In your `eval_coco_map.py` file, you imported one function [`mobile_net `](https://github.com/PaddlePaddle/models/blob/8d7328017dc50c4b82d25733cc7a4cd14ad4469f/PaddleCV/ssd/eval_coco_map.py#L10) that do not exist in the `mobilenet_ssd.py` file. And the function call [below](https://github.com/PaddlePaddle/models/blob/8d7328017dc50c4b82d25733cc7a4cd14ad4469f/PaddleCV/ssd/eval_coco_map.py#L52) should be modified too.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PaddleCV/ssd/eval_coco_map.py`
Content:
```
1 import os
2 import time
3 import numpy as np
4 import argparse
5 import functools
6
7 import paddle
8 import paddle.fluid as fluid
9 import reader
10 from mobilenet_ssd import mobile_net
11 from utility import add_arguments, print_arguments
12
13 # A special mAP metric for COCO dataset, which averages AP in different IoUs.
14 # To use this eval_cocoMAP.py, [cocoapi](https://github.com/cocodataset/cocoapi) is needed.
15 import json
16 from pycocotools.coco import COCO
17 from pycocotools.cocoeval import COCOeval
18
19 parser = argparse.ArgumentParser(description=__doc__)
20 add_arg = functools.partial(add_arguments, argparser=parser)
21 # yapf: disable
22 add_arg('dataset', str, 'coco2014', "coco2014, coco2017.")
23 add_arg('batch_size', int, 32, "Minibatch size.")
24 add_arg('use_gpu', bool, True, "Whether use GPU.")
25 add_arg('data_dir', str, '', "The data root path.")
26 add_arg('test_list', str, '', "The testing data lists.")
27 add_arg('model_dir', str, '', "The model path.")
28 add_arg('nms_threshold', float, 0.5, "NMS threshold.")
29 add_arg('ap_version', str, 'cocoMAP', "cocoMAP.")
30 add_arg('resize_h', int, 300, "The resized image height.")
31 add_arg('resize_w', int, 300, "The resized image height.")
32 add_arg('mean_value_B', float, 127.5, "Mean value for B channel which will be subtracted.") #123.68
33 add_arg('mean_value_G', float, 127.5, "Mean value for G channel which will be subtracted.") #116.78
34 add_arg('mean_value_R', float, 127.5, "Mean value for R channel which will be subtracted.") #103.94
35 # yapf: enable
36
37
38 def eval(args, data_args, test_list, batch_size, model_dir=None):
39 image_shape = [3, data_args.resize_h, data_args.resize_w]
40 num_classes = 91
41
42 image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
43 gt_box = fluid.layers.data(
44 name='gt_box', shape=[4], dtype='float32', lod_level=1)
45 gt_label = fluid.layers.data(
46 name='gt_label', shape=[1], dtype='int32', lod_level=1)
47 gt_iscrowd = fluid.layers.data(
48 name='gt_iscrowd', shape=[1], dtype='int32', lod_level=1)
49 gt_image_info = fluid.layers.data(
50 name='gt_image_id', shape=[3], dtype='int32')
51
52 locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
53 nmsed_out = fluid.layers.detection_output(
54 locs, confs, box, box_var, nms_threshold=args.nms_threshold)
55 loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)
56 loss = fluid.layers.reduce_sum(loss)
57
58 place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
59 exe = fluid.Executor(place)
60 exe.run(fluid.default_startup_program())
61 # yapf: disable
62 if model_dir:
63 def if_exist(var):
64 return os.path.exists(os.path.join(model_dir, var.name))
65 fluid.io.load_vars(exe, model_dir, predicate=if_exist)
66 # yapf: enable
67 test_reader = reader.test(data_args, test_list, batch_size)
68 feeder = fluid.DataFeeder(
69 place=place,
70 feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])
71
72 def get_dt_res(nmsed_out_v, data):
73 dts_res = []
74 lod = nmsed_out_v[0].lod()[0]
75 nmsed_out_v = np.array(nmsed_out_v[0])
76 real_batch_size = min(batch_size, len(data))
77 assert (len(lod) == real_batch_size + 1), \
78 "Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
79 k = 0
80 for i in range(real_batch_size):
81 dt_num_this_img = lod[i + 1] - lod[i]
82 image_id = int(data[i][4][0])
83 image_width = int(data[i][4][1])
84 image_height = int(data[i][4][2])
85 for j in range(dt_num_this_img):
86 dt = nmsed_out_v[k]
87 k = k + 1
88 category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
89 xmin = max(min(xmin, 1.0), 0.0) * image_width
90 ymin = max(min(ymin, 1.0), 0.0) * image_height
91 xmax = max(min(xmax, 1.0), 0.0) * image_width
92 ymax = max(min(ymax, 1.0), 0.0) * image_height
93 w = xmax - xmin
94 h = ymax - ymin
95 bbox = [xmin, ymin, w, h]
96 dt_res = {
97 'image_id': image_id,
98 'category_id': category_id,
99 'bbox': bbox,
100 'score': score
101 }
102 dts_res.append(dt_res)
103 return dts_res
104
105 def test():
106 dts_res = []
107
108 for batch_id, data in enumerate(test_reader()):
109 nmsed_out_v = exe.run(fluid.default_main_program(),
110 feed=feeder.feed(data),
111 fetch_list=[nmsed_out],
112 return_numpy=False)
113 if batch_id % 20 == 0:
114 print("Batch {0}".format(batch_id))
115 dts_res += get_dt_res(nmsed_out_v, data)
116
117 with open("detection_result.json", 'w') as outfile:
118 json.dump(dts_res, outfile)
119 print("start evaluate using coco api")
120 cocoGt = COCO(os.path.join(data_args.data_dir, test_list))
121 cocoDt = cocoGt.loadRes("detection_result.json")
122 cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
123 cocoEval.evaluate()
124 cocoEval.accumulate()
125 cocoEval.summarize()
126
127 test()
128
129
130 if __name__ == '__main__':
131 args = parser.parse_args()
132 print_arguments(args)
133
134 data_dir = './data/coco'
135 if '2014' in args.dataset:
136 test_list = 'annotations/instances_val2014.json'
137 elif '2017' in args.dataset:
138 test_list = 'annotations/instances_val2017.json'
139
140 data_args = reader.Settings(
141 dataset=args.dataset,
142 data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir,
143 label_file='',
144 resize_h=args.resize_h,
145 resize_w=args.resize_w,
146 mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],
147 apply_distort=False,
148 apply_expand=False,
149 ap_version=args.ap_version)
150 eval(
151 args,
152 data_args=data_args,
153 test_list=args.test_list if len(args.test_list) > 0 else test_list,
154 batch_size=args.batch_size,
155 model_dir=args.model_dir)
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PaddleCV/ssd/eval_coco_map.py b/PaddleCV/ssd/eval_coco_map.py
--- a/PaddleCV/ssd/eval_coco_map.py
+++ b/PaddleCV/ssd/eval_coco_map.py
@@ -7,7 +7,7 @@
import paddle
import paddle.fluid as fluid
import reader
-from mobilenet_ssd import mobile_net
+from mobilenet_ssd import build_mobilenet_ssd
from utility import add_arguments, print_arguments
# A special mAP metric for COCO dataset, which averages AP in different IoUs.
@@ -49,7 +49,8 @@
gt_image_info = fluid.layers.data(
name='gt_image_id', shape=[3], dtype='int32')
- locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
+ locs, confs, box, box_var = build_mobilenet_ssd(image,
+ num_classes, image_shape)
nmsed_out = fluid.layers.detection_output(
locs, confs, box, box_var, nms_threshold=args.nms_threshold)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)
@@ -130,7 +131,7 @@
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
-
+ assert args.dataset in ['coco2014', 'coco2017']
data_dir = './data/coco'
if '2014' in args.dataset:
test_list = 'annotations/instances_val2014.json'
| {"golden_diff": "diff --git a/PaddleCV/ssd/eval_coco_map.py b/PaddleCV/ssd/eval_coco_map.py\n--- a/PaddleCV/ssd/eval_coco_map.py\n+++ b/PaddleCV/ssd/eval_coco_map.py\n@@ -7,7 +7,7 @@\n import paddle\n import paddle.fluid as fluid\n import reader\n-from mobilenet_ssd import mobile_net\n+from mobilenet_ssd import build_mobilenet_ssd\n from utility import add_arguments, print_arguments\n \n # A special mAP metric for COCO dataset, which averages AP in different IoUs.\n@@ -49,7 +49,8 @@\n gt_image_info = fluid.layers.data(\n name='gt_image_id', shape=[3], dtype='int32')\n \n- locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)\n+ locs, confs, box, box_var = build_mobilenet_ssd(image,\n+ num_classes, image_shape)\n nmsed_out = fluid.layers.detection_output(\n locs, confs, box, box_var, nms_threshold=args.nms_threshold)\n loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)\n@@ -130,7 +131,7 @@\n if __name__ == '__main__':\n args = parser.parse_args()\n print_arguments(args)\n-\n+ assert args.dataset in ['coco2014', 'coco2017']\n data_dir = './data/coco'\n if '2014' in args.dataset:\n test_list = 'annotations/instances_val2014.json'\n", "issue": "SSD Inconsistent function call \nIn your `eval_coco_map.py` file, you imported one function [`mobile_net `](https://github.com/PaddlePaddle/models/blob/8d7328017dc50c4b82d25733cc7a4cd14ad4469f/PaddleCV/ssd/eval_coco_map.py#L10) that do not exist in the `mobilenet_ssd.py` file. And the function call [below](https://github.com/PaddlePaddle/models/blob/8d7328017dc50c4b82d25733cc7a4cd14ad4469f/PaddleCV/ssd/eval_coco_map.py#L52) should be modified too.\n", "before_files": [{"content": "import os\nimport time\nimport numpy as np\nimport argparse\nimport functools\n\nimport paddle\nimport paddle.fluid as fluid\nimport reader\nfrom mobilenet_ssd import mobile_net\nfrom utility import add_arguments, print_arguments\n\n# A special mAP metric for COCO dataset, which averages AP in different IoUs.\n# To use this eval_cocoMAP.py, [cocoapi](https://github.com/cocodataset/cocoapi) is needed.\nimport json\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('dataset', str, 'coco2014', \"coco2014, coco2017.\")\nadd_arg('batch_size', int, 32, \"Minibatch size.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU.\")\nadd_arg('data_dir', str, '', \"The data root path.\")\nadd_arg('test_list', str, '', \"The testing data lists.\")\nadd_arg('model_dir', str, '', \"The model path.\")\nadd_arg('nms_threshold', float, 0.5, \"NMS threshold.\")\nadd_arg('ap_version', str, 'cocoMAP', \"cocoMAP.\")\nadd_arg('resize_h', int, 300, \"The resized image height.\")\nadd_arg('resize_w', int, 300, \"The resized image height.\")\nadd_arg('mean_value_B', float, 127.5, \"Mean value for B channel which will be subtracted.\") #123.68\nadd_arg('mean_value_G', float, 127.5, \"Mean value for G channel which will be subtracted.\") #116.78\nadd_arg('mean_value_R', float, 127.5, \"Mean value for R channel which will be subtracted.\") #103.94\n# yapf: enable\n\n\ndef eval(args, data_args, test_list, batch_size, model_dir=None):\n image_shape = [3, data_args.resize_h, data_args.resize_w]\n num_classes = 91\n\n image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')\n gt_box = fluid.layers.data(\n name='gt_box', shape=[4], dtype='float32', lod_level=1)\n gt_label = fluid.layers.data(\n name='gt_label', shape=[1], dtype='int32', lod_level=1)\n gt_iscrowd = fluid.layers.data(\n name='gt_iscrowd', shape=[1], dtype='int32', lod_level=1)\n gt_image_info = fluid.layers.data(\n name='gt_image_id', shape=[3], dtype='int32')\n\n locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)\n nmsed_out = fluid.layers.detection_output(\n locs, confs, box, box_var, nms_threshold=args.nms_threshold)\n loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)\n loss = fluid.layers.reduce_sum(loss)\n\n place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n # yapf: disable\n if model_dir:\n def if_exist(var):\n return os.path.exists(os.path.join(model_dir, var.name))\n fluid.io.load_vars(exe, model_dir, predicate=if_exist)\n # yapf: enable\n test_reader = reader.test(data_args, test_list, batch_size)\n feeder = fluid.DataFeeder(\n place=place,\n feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])\n\n def get_dt_res(nmsed_out_v, data):\n dts_res = []\n lod = nmsed_out_v[0].lod()[0]\n nmsed_out_v = np.array(nmsed_out_v[0])\n real_batch_size = min(batch_size, len(data))\n assert (len(lod) == real_batch_size + 1), \\\n \"Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})\".format(len(lod), batch_size)\n k = 0\n for i in range(real_batch_size):\n dt_num_this_img = lod[i + 1] - lod[i]\n image_id = int(data[i][4][0])\n image_width = int(data[i][4][1])\n image_height = int(data[i][4][2])\n for j in range(dt_num_this_img):\n dt = nmsed_out_v[k]\n k = k + 1\n category_id, score, xmin, ymin, xmax, ymax = dt.tolist()\n xmin = max(min(xmin, 1.0), 0.0) * image_width\n ymin = max(min(ymin, 1.0), 0.0) * image_height\n xmax = max(min(xmax, 1.0), 0.0) * image_width\n ymax = max(min(ymax, 1.0), 0.0) * image_height\n w = xmax - xmin\n h = ymax - ymin\n bbox = [xmin, ymin, w, h]\n dt_res = {\n 'image_id': image_id,\n 'category_id': category_id,\n 'bbox': bbox,\n 'score': score\n }\n dts_res.append(dt_res)\n return dts_res\n\n def test():\n dts_res = []\n\n for batch_id, data in enumerate(test_reader()):\n nmsed_out_v = exe.run(fluid.default_main_program(),\n feed=feeder.feed(data),\n fetch_list=[nmsed_out],\n return_numpy=False)\n if batch_id % 20 == 0:\n print(\"Batch {0}\".format(batch_id))\n dts_res += get_dt_res(nmsed_out_v, data)\n\n with open(\"detection_result.json\", 'w') as outfile:\n json.dump(dts_res, outfile)\n print(\"start evaluate using coco api\")\n cocoGt = COCO(os.path.join(data_args.data_dir, test_list))\n cocoDt = cocoGt.loadRes(\"detection_result.json\")\n cocoEval = COCOeval(cocoGt, cocoDt, \"bbox\")\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n\n test()\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n print_arguments(args)\n\n data_dir = './data/coco'\n if '2014' in args.dataset:\n test_list = 'annotations/instances_val2014.json'\n elif '2017' in args.dataset:\n test_list = 'annotations/instances_val2017.json'\n\n data_args = reader.Settings(\n dataset=args.dataset,\n data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir,\n label_file='',\n resize_h=args.resize_h,\n resize_w=args.resize_w,\n mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],\n apply_distort=False,\n apply_expand=False,\n ap_version=args.ap_version)\n eval(\n args,\n data_args=data_args,\n test_list=args.test_list if len(args.test_list) > 0 else test_list,\n batch_size=args.batch_size,\n model_dir=args.model_dir)\n", "path": "PaddleCV/ssd/eval_coco_map.py"}], "after_files": [{"content": "import os\nimport time\nimport numpy as np\nimport argparse\nimport functools\n\nimport paddle\nimport paddle.fluid as fluid\nimport reader\nfrom mobilenet_ssd import build_mobilenet_ssd\nfrom utility import add_arguments, print_arguments\n\n# A special mAP metric for COCO dataset, which averages AP in different IoUs.\n# To use this eval_cocoMAP.py, [cocoapi](https://github.com/cocodataset/cocoapi) is needed.\nimport json\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('dataset', str, 'coco2014', \"coco2014, coco2017.\")\nadd_arg('batch_size', int, 32, \"Minibatch size.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU.\")\nadd_arg('data_dir', str, '', \"The data root path.\")\nadd_arg('test_list', str, '', \"The testing data lists.\")\nadd_arg('model_dir', str, '', \"The model path.\")\nadd_arg('nms_threshold', float, 0.5, \"NMS threshold.\")\nadd_arg('ap_version', str, 'cocoMAP', \"cocoMAP.\")\nadd_arg('resize_h', int, 300, \"The resized image height.\")\nadd_arg('resize_w', int, 300, \"The resized image height.\")\nadd_arg('mean_value_B', float, 127.5, \"Mean value for B channel which will be subtracted.\") #123.68\nadd_arg('mean_value_G', float, 127.5, \"Mean value for G channel which will be subtracted.\") #116.78\nadd_arg('mean_value_R', float, 127.5, \"Mean value for R channel which will be subtracted.\") #103.94\n# yapf: enable\n\n\ndef eval(args, data_args, test_list, batch_size, model_dir=None):\n image_shape = [3, data_args.resize_h, data_args.resize_w]\n num_classes = 91\n\n image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')\n gt_box = fluid.layers.data(\n name='gt_box', shape=[4], dtype='float32', lod_level=1)\n gt_label = fluid.layers.data(\n name='gt_label', shape=[1], dtype='int32', lod_level=1)\n gt_iscrowd = fluid.layers.data(\n name='gt_iscrowd', shape=[1], dtype='int32', lod_level=1)\n gt_image_info = fluid.layers.data(\n name='gt_image_id', shape=[3], dtype='int32')\n\n locs, confs, box, box_var = build_mobilenet_ssd(image,\n num_classes, image_shape)\n nmsed_out = fluid.layers.detection_output(\n locs, confs, box, box_var, nms_threshold=args.nms_threshold)\n loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)\n loss = fluid.layers.reduce_sum(loss)\n\n place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n # yapf: disable\n if model_dir:\n def if_exist(var):\n return os.path.exists(os.path.join(model_dir, var.name))\n fluid.io.load_vars(exe, model_dir, predicate=if_exist)\n # yapf: enable\n test_reader = reader.test(data_args, test_list, batch_size)\n feeder = fluid.DataFeeder(\n place=place,\n feed_list=[image, gt_box, gt_label, gt_iscrowd, gt_image_info])\n\n def get_dt_res(nmsed_out_v, data):\n dts_res = []\n lod = nmsed_out_v[0].lod()[0]\n nmsed_out_v = np.array(nmsed_out_v[0])\n real_batch_size = min(batch_size, len(data))\n assert (len(lod) == real_batch_size + 1), \\\n \"Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})\".format(len(lod), batch_size)\n k = 0\n for i in range(real_batch_size):\n dt_num_this_img = lod[i + 1] - lod[i]\n image_id = int(data[i][4][0])\n image_width = int(data[i][4][1])\n image_height = int(data[i][4][2])\n for j in range(dt_num_this_img):\n dt = nmsed_out_v[k]\n k = k + 1\n category_id, score, xmin, ymin, xmax, ymax = dt.tolist()\n xmin = max(min(xmin, 1.0), 0.0) * image_width\n ymin = max(min(ymin, 1.0), 0.0) * image_height\n xmax = max(min(xmax, 1.0), 0.0) * image_width\n ymax = max(min(ymax, 1.0), 0.0) * image_height\n w = xmax - xmin\n h = ymax - ymin\n bbox = [xmin, ymin, w, h]\n dt_res = {\n 'image_id': image_id,\n 'category_id': category_id,\n 'bbox': bbox,\n 'score': score\n }\n dts_res.append(dt_res)\n return dts_res\n\n def test():\n dts_res = []\n\n for batch_id, data in enumerate(test_reader()):\n nmsed_out_v = exe.run(fluid.default_main_program(),\n feed=feeder.feed(data),\n fetch_list=[nmsed_out],\n return_numpy=False)\n if batch_id % 20 == 0:\n print(\"Batch {0}\".format(batch_id))\n dts_res += get_dt_res(nmsed_out_v, data)\n\n with open(\"detection_result.json\", 'w') as outfile:\n json.dump(dts_res, outfile)\n print(\"start evaluate using coco api\")\n cocoGt = COCO(os.path.join(data_args.data_dir, test_list))\n cocoDt = cocoGt.loadRes(\"detection_result.json\")\n cocoEval = COCOeval(cocoGt, cocoDt, \"bbox\")\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n\n test()\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n print_arguments(args)\n assert args.dataset in ['coco2014', 'coco2017']\n data_dir = './data/coco'\n if '2014' in args.dataset:\n test_list = 'annotations/instances_val2014.json'\n elif '2017' in args.dataset:\n test_list = 'annotations/instances_val2017.json'\n\n data_args = reader.Settings(\n dataset=args.dataset,\n data_dir=args.data_dir if len(args.data_dir) > 0 else data_dir,\n label_file='',\n resize_h=args.resize_h,\n resize_w=args.resize_w,\n mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R],\n apply_distort=False,\n apply_expand=False,\n ap_version=args.ap_version)\n eval(\n args,\n data_args=data_args,\n test_list=args.test_list if len(args.test_list) > 0 else test_list,\n batch_size=args.batch_size,\n model_dir=args.model_dir)\n", "path": "PaddleCV/ssd/eval_coco_map.py"}]} | 2,518 | 373 |
gh_patches_debug_19806 | rasdani/github-patches | git_diff | PaddlePaddle__models-780 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add decoder init & verify output
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fluid/DeepASR/infer_by_ckpt.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import sys
6 import os
7 import numpy as np
8 import argparse
9 import time
10
11 import paddle.fluid as fluid
12 import data_utils.augmentor.trans_mean_variance_norm as trans_mean_variance_norm
13 import data_utils.augmentor.trans_add_delta as trans_add_delta
14 import data_utils.augmentor.trans_splice as trans_splice
15 import data_utils.async_data_reader as reader
16 from decoder.post_decode_faster import Decoder
17 from data_utils.util import lodtensor_to_ndarray
18 from model_utils.model import stacked_lstmp_model
19 from data_utils.util import split_infer_result
20
21
22 def parse_args():
23 parser = argparse.ArgumentParser("Run inference by using checkpoint.")
24 parser.add_argument(
25 '--batch_size',
26 type=int,
27 default=32,
28 help='The sequence number of a batch data. (default: %(default)d)')
29 parser.add_argument(
30 '--minimum_batch_size',
31 type=int,
32 default=1,
33 help='The minimum sequence number of a batch data. '
34 '(default: %(default)d)')
35 parser.add_argument(
36 '--frame_dim',
37 type=int,
38 default=120 * 11,
39 help='Frame dimension of feature data. (default: %(default)d)')
40 parser.add_argument(
41 '--stacked_num',
42 type=int,
43 default=5,
44 help='Number of lstmp layers to stack. (default: %(default)d)')
45 parser.add_argument(
46 '--proj_dim',
47 type=int,
48 default=512,
49 help='Project size of lstmp unit. (default: %(default)d)')
50 parser.add_argument(
51 '--hidden_dim',
52 type=int,
53 default=1024,
54 help='Hidden size of lstmp unit. (default: %(default)d)')
55 parser.add_argument(
56 '--class_num',
57 type=int,
58 default=1749,
59 help='Number of classes in label. (default: %(default)d)')
60 parser.add_argument(
61 '--learning_rate',
62 type=float,
63 default=0.00016,
64 help='Learning rate used to train. (default: %(default)f)')
65 parser.add_argument(
66 '--device',
67 type=str,
68 default='GPU',
69 choices=['CPU', 'GPU'],
70 help='The device type. (default: %(default)s)')
71 parser.add_argument(
72 '--parallel', action='store_true', help='If set, run in parallel.')
73 parser.add_argument(
74 '--mean_var',
75 type=str,
76 default='data/global_mean_var_search26kHr',
77 help="The path for feature's global mean and variance. "
78 "(default: %(default)s)")
79 parser.add_argument(
80 '--infer_feature_lst',
81 type=str,
82 default='data/infer_feature.lst',
83 help='The feature list path for inference. (default: %(default)s)')
84 parser.add_argument(
85 '--infer_label_lst',
86 type=str,
87 default='data/infer_label.lst',
88 help='The label list path for inference. (default: %(default)s)')
89 parser.add_argument(
90 '--checkpoint',
91 type=str,
92 default='./checkpoint',
93 help="The checkpoint path to init model. (default: %(default)s)")
94 parser.add_argument(
95 '--vocabulary',
96 type=str,
97 default='./decoder/graph/words.txt',
98 help="The path to vocabulary. (default: %(default)s)")
99 parser.add_argument(
100 '--graphs',
101 type=str,
102 default='./decoder/graph/TLG.fst',
103 help="The path to TLG graphs for decoding. (default: %(default)s)")
104 parser.add_argument(
105 '--log_prior',
106 type=str,
107 default="./decoder/logprior",
108 help="The log prior probs for training data. (default: %(default)s)")
109 parser.add_argument(
110 '--acoustic_scale',
111 type=float,
112 default=0.2,
113 help="Scaling factor for acoustic likelihoods. (default: %(default)f)")
114 args = parser.parse_args()
115 return args
116
117
118 def print_arguments(args):
119 print('----------- Configuration Arguments -----------')
120 for arg, value in sorted(vars(args).iteritems()):
121 print('%s: %s' % (arg, value))
122 print('------------------------------------------------')
123
124
125 def infer_from_ckpt(args):
126 """Inference by using checkpoint."""
127
128 if not os.path.exists(args.checkpoint):
129 raise IOError("Invalid checkpoint!")
130
131 prediction, avg_cost, accuracy = stacked_lstmp_model(
132 frame_dim=args.frame_dim,
133 hidden_dim=args.hidden_dim,
134 proj_dim=args.proj_dim,
135 stacked_num=args.stacked_num,
136 class_num=args.class_num,
137 parallel=args.parallel)
138
139 infer_program = fluid.default_main_program().clone()
140
141 optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate)
142 optimizer.minimize(avg_cost)
143
144 place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)
145 exe = fluid.Executor(place)
146 exe.run(fluid.default_startup_program())
147
148 # load checkpoint.
149 fluid.io.load_persistables(exe, args.checkpoint)
150
151 ltrans = [
152 trans_add_delta.TransAddDelta(2, 2),
153 trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),
154 trans_splice.TransSplice()
155 ]
156
157 feature_t = fluid.LoDTensor()
158 label_t = fluid.LoDTensor()
159
160 # infer data reader
161 infer_data_reader = reader.AsyncDataReader(args.infer_feature_lst,
162 args.infer_label_lst)
163 infer_data_reader.set_transformers(ltrans)
164 infer_costs, infer_accs = [], []
165 for batch_id, batch_data in enumerate(
166 infer_data_reader.batch_iterator(args.batch_size,
167 args.minimum_batch_size)):
168 # load_data
169 (features, labels, lod) = batch_data
170 feature_t.set(features, place)
171 feature_t.set_lod([lod])
172 label_t.set(labels, place)
173 label_t.set_lod([lod])
174
175 results = exe.run(infer_program,
176 feed={"feature": feature_t,
177 "label": label_t},
178 fetch_list=[prediction, avg_cost, accuracy],
179 return_numpy=False)
180 infer_costs.append(lodtensor_to_ndarray(results[1])[0])
181 infer_accs.append(lodtensor_to_ndarray(results[2])[0])
182
183 probs, lod = lodtensor_to_ndarray(results[0])
184 infer_batch = split_infer_result(probs, lod)
185 for index, sample in enumerate(infer_batch):
186 key = "utter#%d" % (batch_id * args.batch_size + index)
187 print(key, ": ", decoder.decode(key, sample), "\n")
188
189 print(np.mean(infer_costs), np.mean(infer_accs))
190
191
192 if __name__ == '__main__':
193 args = parse_args()
194 print_arguments(args)
195
196 infer_from_ckpt(args)
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fluid/DeepASR/infer_by_ckpt.py b/fluid/DeepASR/infer_by_ckpt.py
--- a/fluid/DeepASR/infer_by_ckpt.py
+++ b/fluid/DeepASR/infer_by_ckpt.py
@@ -148,6 +148,10 @@
# load checkpoint.
fluid.io.load_persistables(exe, args.checkpoint)
+ # init decoder
+ decoder = Decoder(args.vocabulary, args.graphs, args.log_prior,
+ args.acoustic_scale)
+
ltrans = [
trans_add_delta.TransAddDelta(2, 2),
trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),
@@ -184,7 +188,7 @@
infer_batch = split_infer_result(probs, lod)
for index, sample in enumerate(infer_batch):
key = "utter#%d" % (batch_id * args.batch_size + index)
- print(key, ": ", decoder.decode(key, sample), "\n")
+ print(key, ": ", decoder.decode(key, sample).encode("utf8"), "\n")
print(np.mean(infer_costs), np.mean(infer_accs))
| {"golden_diff": "diff --git a/fluid/DeepASR/infer_by_ckpt.py b/fluid/DeepASR/infer_by_ckpt.py\n--- a/fluid/DeepASR/infer_by_ckpt.py\n+++ b/fluid/DeepASR/infer_by_ckpt.py\n@@ -148,6 +148,10 @@\n # load checkpoint.\n fluid.io.load_persistables(exe, args.checkpoint)\n \n+ # init decoder\n+ decoder = Decoder(args.vocabulary, args.graphs, args.log_prior,\n+ args.acoustic_scale)\n+\n ltrans = [\n trans_add_delta.TransAddDelta(2, 2),\n trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),\n@@ -184,7 +188,7 @@\n infer_batch = split_infer_result(probs, lod)\n for index, sample in enumerate(infer_batch):\n key = \"utter#%d\" % (batch_id * args.batch_size + index)\n- print(key, \": \", decoder.decode(key, sample), \"\\n\")\n+ print(key, \": \", decoder.decode(key, sample).encode(\"utf8\"), \"\\n\")\n \n print(np.mean(infer_costs), np.mean(infer_accs))\n", "issue": "Add decoder init & verify output\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport numpy as np\nimport argparse\nimport time\n\nimport paddle.fluid as fluid\nimport data_utils.augmentor.trans_mean_variance_norm as trans_mean_variance_norm\nimport data_utils.augmentor.trans_add_delta as trans_add_delta\nimport data_utils.augmentor.trans_splice as trans_splice\nimport data_utils.async_data_reader as reader\nfrom decoder.post_decode_faster import Decoder\nfrom data_utils.util import lodtensor_to_ndarray\nfrom model_utils.model import stacked_lstmp_model\nfrom data_utils.util import split_infer_result\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Run inference by using checkpoint.\")\n parser.add_argument(\n '--batch_size',\n type=int,\n default=32,\n help='The sequence number of a batch data. (default: %(default)d)')\n parser.add_argument(\n '--minimum_batch_size',\n type=int,\n default=1,\n help='The minimum sequence number of a batch data. '\n '(default: %(default)d)')\n parser.add_argument(\n '--frame_dim',\n type=int,\n default=120 * 11,\n help='Frame dimension of feature data. (default: %(default)d)')\n parser.add_argument(\n '--stacked_num',\n type=int,\n default=5,\n help='Number of lstmp layers to stack. (default: %(default)d)')\n parser.add_argument(\n '--proj_dim',\n type=int,\n default=512,\n help='Project size of lstmp unit. (default: %(default)d)')\n parser.add_argument(\n '--hidden_dim',\n type=int,\n default=1024,\n help='Hidden size of lstmp unit. (default: %(default)d)')\n parser.add_argument(\n '--class_num',\n type=int,\n default=1749,\n help='Number of classes in label. (default: %(default)d)')\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.00016,\n help='Learning rate used to train. (default: %(default)f)')\n parser.add_argument(\n '--device',\n type=str,\n default='GPU',\n choices=['CPU', 'GPU'],\n help='The device type. (default: %(default)s)')\n parser.add_argument(\n '--parallel', action='store_true', help='If set, run in parallel.')\n parser.add_argument(\n '--mean_var',\n type=str,\n default='data/global_mean_var_search26kHr',\n help=\"The path for feature's global mean and variance. \"\n \"(default: %(default)s)\")\n parser.add_argument(\n '--infer_feature_lst',\n type=str,\n default='data/infer_feature.lst',\n help='The feature list path for inference. (default: %(default)s)')\n parser.add_argument(\n '--infer_label_lst',\n type=str,\n default='data/infer_label.lst',\n help='The label list path for inference. (default: %(default)s)')\n parser.add_argument(\n '--checkpoint',\n type=str,\n default='./checkpoint',\n help=\"The checkpoint path to init model. (default: %(default)s)\")\n parser.add_argument(\n '--vocabulary',\n type=str,\n default='./decoder/graph/words.txt',\n help=\"The path to vocabulary. (default: %(default)s)\")\n parser.add_argument(\n '--graphs',\n type=str,\n default='./decoder/graph/TLG.fst',\n help=\"The path to TLG graphs for decoding. (default: %(default)s)\")\n parser.add_argument(\n '--log_prior',\n type=str,\n default=\"./decoder/logprior\",\n help=\"The log prior probs for training data. (default: %(default)s)\")\n parser.add_argument(\n '--acoustic_scale',\n type=float,\n default=0.2,\n help=\"Scaling factor for acoustic likelihoods. (default: %(default)f)\")\n args = parser.parse_args()\n return args\n\n\ndef print_arguments(args):\n print('----------- Configuration Arguments -----------')\n for arg, value in sorted(vars(args).iteritems()):\n print('%s: %s' % (arg, value))\n print('------------------------------------------------')\n\n\ndef infer_from_ckpt(args):\n \"\"\"Inference by using checkpoint.\"\"\"\n\n if not os.path.exists(args.checkpoint):\n raise IOError(\"Invalid checkpoint!\")\n\n prediction, avg_cost, accuracy = stacked_lstmp_model(\n frame_dim=args.frame_dim,\n hidden_dim=args.hidden_dim,\n proj_dim=args.proj_dim,\n stacked_num=args.stacked_num,\n class_num=args.class_num,\n parallel=args.parallel)\n\n infer_program = fluid.default_main_program().clone()\n\n optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate)\n optimizer.minimize(avg_cost)\n\n place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n # load checkpoint.\n fluid.io.load_persistables(exe, args.checkpoint)\n\n ltrans = [\n trans_add_delta.TransAddDelta(2, 2),\n trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),\n trans_splice.TransSplice()\n ]\n\n feature_t = fluid.LoDTensor()\n label_t = fluid.LoDTensor()\n\n # infer data reader\n infer_data_reader = reader.AsyncDataReader(args.infer_feature_lst,\n args.infer_label_lst)\n infer_data_reader.set_transformers(ltrans)\n infer_costs, infer_accs = [], []\n for batch_id, batch_data in enumerate(\n infer_data_reader.batch_iterator(args.batch_size,\n args.minimum_batch_size)):\n # load_data\n (features, labels, lod) = batch_data\n feature_t.set(features, place)\n feature_t.set_lod([lod])\n label_t.set(labels, place)\n label_t.set_lod([lod])\n\n results = exe.run(infer_program,\n feed={\"feature\": feature_t,\n \"label\": label_t},\n fetch_list=[prediction, avg_cost, accuracy],\n return_numpy=False)\n infer_costs.append(lodtensor_to_ndarray(results[1])[0])\n infer_accs.append(lodtensor_to_ndarray(results[2])[0])\n\n probs, lod = lodtensor_to_ndarray(results[0])\n infer_batch = split_infer_result(probs, lod)\n for index, sample in enumerate(infer_batch):\n key = \"utter#%d\" % (batch_id * args.batch_size + index)\n print(key, \": \", decoder.decode(key, sample), \"\\n\")\n\n print(np.mean(infer_costs), np.mean(infer_accs))\n\n\nif __name__ == '__main__':\n args = parse_args()\n print_arguments(args)\n\n infer_from_ckpt(args)\n", "path": "fluid/DeepASR/infer_by_ckpt.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport numpy as np\nimport argparse\nimport time\n\nimport paddle.fluid as fluid\nimport data_utils.augmentor.trans_mean_variance_norm as trans_mean_variance_norm\nimport data_utils.augmentor.trans_add_delta as trans_add_delta\nimport data_utils.augmentor.trans_splice as trans_splice\nimport data_utils.async_data_reader as reader\nfrom decoder.post_decode_faster import Decoder\nfrom data_utils.util import lodtensor_to_ndarray\nfrom model_utils.model import stacked_lstmp_model\nfrom data_utils.util import split_infer_result\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Run inference by using checkpoint.\")\n parser.add_argument(\n '--batch_size',\n type=int,\n default=32,\n help='The sequence number of a batch data. (default: %(default)d)')\n parser.add_argument(\n '--minimum_batch_size',\n type=int,\n default=1,\n help='The minimum sequence number of a batch data. '\n '(default: %(default)d)')\n parser.add_argument(\n '--frame_dim',\n type=int,\n default=120 * 11,\n help='Frame dimension of feature data. (default: %(default)d)')\n parser.add_argument(\n '--stacked_num',\n type=int,\n default=5,\n help='Number of lstmp layers to stack. (default: %(default)d)')\n parser.add_argument(\n '--proj_dim',\n type=int,\n default=512,\n help='Project size of lstmp unit. (default: %(default)d)')\n parser.add_argument(\n '--hidden_dim',\n type=int,\n default=1024,\n help='Hidden size of lstmp unit. (default: %(default)d)')\n parser.add_argument(\n '--class_num',\n type=int,\n default=1749,\n help='Number of classes in label. (default: %(default)d)')\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.00016,\n help='Learning rate used to train. (default: %(default)f)')\n parser.add_argument(\n '--device',\n type=str,\n default='GPU',\n choices=['CPU', 'GPU'],\n help='The device type. (default: %(default)s)')\n parser.add_argument(\n '--parallel', action='store_true', help='If set, run in parallel.')\n parser.add_argument(\n '--mean_var',\n type=str,\n default='data/global_mean_var_search26kHr',\n help=\"The path for feature's global mean and variance. \"\n \"(default: %(default)s)\")\n parser.add_argument(\n '--infer_feature_lst',\n type=str,\n default='data/infer_feature.lst',\n help='The feature list path for inference. (default: %(default)s)')\n parser.add_argument(\n '--infer_label_lst',\n type=str,\n default='data/infer_label.lst',\n help='The label list path for inference. (default: %(default)s)')\n parser.add_argument(\n '--checkpoint',\n type=str,\n default='./checkpoint',\n help=\"The checkpoint path to init model. (default: %(default)s)\")\n parser.add_argument(\n '--vocabulary',\n type=str,\n default='./decoder/graph/words.txt',\n help=\"The path to vocabulary. (default: %(default)s)\")\n parser.add_argument(\n '--graphs',\n type=str,\n default='./decoder/graph/TLG.fst',\n help=\"The path to TLG graphs for decoding. (default: %(default)s)\")\n parser.add_argument(\n '--log_prior',\n type=str,\n default=\"./decoder/logprior\",\n help=\"The log prior probs for training data. (default: %(default)s)\")\n parser.add_argument(\n '--acoustic_scale',\n type=float,\n default=0.2,\n help=\"Scaling factor for acoustic likelihoods. (default: %(default)f)\")\n args = parser.parse_args()\n return args\n\n\ndef print_arguments(args):\n print('----------- Configuration Arguments -----------')\n for arg, value in sorted(vars(args).iteritems()):\n print('%s: %s' % (arg, value))\n print('------------------------------------------------')\n\n\ndef infer_from_ckpt(args):\n \"\"\"Inference by using checkpoint.\"\"\"\n\n if not os.path.exists(args.checkpoint):\n raise IOError(\"Invalid checkpoint!\")\n\n prediction, avg_cost, accuracy = stacked_lstmp_model(\n frame_dim=args.frame_dim,\n hidden_dim=args.hidden_dim,\n proj_dim=args.proj_dim,\n stacked_num=args.stacked_num,\n class_num=args.class_num,\n parallel=args.parallel)\n\n infer_program = fluid.default_main_program().clone()\n\n optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate)\n optimizer.minimize(avg_cost)\n\n place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n # load checkpoint.\n fluid.io.load_persistables(exe, args.checkpoint)\n\n # init decoder\n decoder = Decoder(args.vocabulary, args.graphs, args.log_prior,\n args.acoustic_scale)\n\n ltrans = [\n trans_add_delta.TransAddDelta(2, 2),\n trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),\n trans_splice.TransSplice()\n ]\n\n feature_t = fluid.LoDTensor()\n label_t = fluid.LoDTensor()\n\n # infer data reader\n infer_data_reader = reader.AsyncDataReader(args.infer_feature_lst,\n args.infer_label_lst)\n infer_data_reader.set_transformers(ltrans)\n infer_costs, infer_accs = [], []\n for batch_id, batch_data in enumerate(\n infer_data_reader.batch_iterator(args.batch_size,\n args.minimum_batch_size)):\n # load_data\n (features, labels, lod) = batch_data\n feature_t.set(features, place)\n feature_t.set_lod([lod])\n label_t.set(labels, place)\n label_t.set_lod([lod])\n\n results = exe.run(infer_program,\n feed={\"feature\": feature_t,\n \"label\": label_t},\n fetch_list=[prediction, avg_cost, accuracy],\n return_numpy=False)\n infer_costs.append(lodtensor_to_ndarray(results[1])[0])\n infer_accs.append(lodtensor_to_ndarray(results[2])[0])\n\n probs, lod = lodtensor_to_ndarray(results[0])\n infer_batch = split_infer_result(probs, lod)\n for index, sample in enumerate(infer_batch):\n key = \"utter#%d\" % (batch_id * args.batch_size + index)\n print(key, \": \", decoder.decode(key, sample).encode(\"utf8\"), \"\\n\")\n\n print(np.mean(infer_costs), np.mean(infer_accs))\n\n\nif __name__ == '__main__':\n args = parse_args()\n print_arguments(args)\n\n infer_from_ckpt(args)\n", "path": "fluid/DeepASR/infer_by_ckpt.py"}]} | 2,240 | 269 |
gh_patches_debug_16815 | rasdani/github-patches | git_diff | pypa__cibuildwheel-701 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing the default branch to `main`
This is just a heads up, I'm planning to change the default branch on this repo to `main` this week, let's say Wednesday 26th. Github have a tool to change it over, and update PRs to target the new branch, but you might have to update it on local checkouts and forks. Shouldn't be a big issue though, this is what [Github say](https://github.com/github/renaming#renaming-existing-branches) about it:
> Renaming a branch will:
>
> - Re-target any open pull requests
> - Update any draft releases based on the branch
> - Move any branch protection rules that explicitly reference the old name
> - Update the branch used to build GitHub Pages, if applicable
> - Show a notice to repository contributors, maintainers, and admins on the repository homepage with instructions to update local copies of the repository
> - Show a notice to contributors who git push to the old branch
> - Redirect web requests for the old branch name to the new branch name
> - Return a "Moved Permanently" response in API requests for the old branch name
---
Checklist for the switch:
- [x] Use the Github tool to change it over
- [x] Find/replace `master` to `main` in CI configs, docs, scripts, example code, etc
- [x] Change default branch on Readthedocs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/make_dependency_update_pr.py`
Content:
```
1 #!/usr/bin/env python3
2
3 from __future__ import annotations
4
5 import os
6 import sys
7 import textwrap
8 import time
9 from pathlib import Path
10 from subprocess import run
11
12 import click
13
14
15 def shell(cmd, **kwargs):
16 return run([cmd], shell=True, **kwargs)
17
18
19 def git_repo_has_changes():
20 unstaged_changes = shell("git diff-index --quiet HEAD --").returncode != 0
21 staged_changes = shell("git diff-index --quiet --cached HEAD --").returncode != 0
22 return unstaged_changes or staged_changes
23
24
25 @click.command()
26 def main():
27 project_root = Path(__file__).parent / ".."
28 os.chdir(project_root)
29
30 if git_repo_has_changes():
31 print("Your git repo has uncommitted changes. Commit or stash before continuing.")
32 sys.exit(1)
33
34 previous_branch = shell(
35 "git rev-parse --abbrev-ref HEAD", check=True, capture_output=True, encoding="utf8"
36 ).stdout.strip()
37
38 shell("git fetch origin", check=True)
39
40 timestamp = time.strftime("%Y-%m-%dT%H-%M-%S", time.gmtime())
41 branch_name = f"update-constraints-{timestamp}"
42
43 shell(f"git checkout -b {branch_name} origin/master", check=True)
44
45 try:
46 shell("bin/update_dependencies.py", check=True)
47
48 if not git_repo_has_changes():
49 print("Done: no constraint updates required.")
50 return
51
52 shell('git commit -a -m "Update dependencies"', check=True)
53 body = textwrap.dedent(
54 f"""
55 Update the versions of our dependencies.
56
57 PR generated by `{os.path.basename(__file__)}`.
58 """
59 )
60 run(
61 [
62 "gh",
63 "pr",
64 "create",
65 "--repo=pypa/cibuildwheel",
66 "--base=master",
67 "--title=Update dependencies",
68 f"--body='{body}'",
69 ],
70 check=True,
71 )
72
73 print("Done.")
74 finally:
75 # remove any local changes
76 shell("git checkout -- .")
77 shell(f"git checkout {previous_branch}", check=True)
78 shell(f"git branch -D --force {branch_name}", check=True)
79
80
81 if __name__ == "__main__":
82 main.main(standalone_mode=True)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bin/make_dependency_update_pr.py b/bin/make_dependency_update_pr.py
--- a/bin/make_dependency_update_pr.py
+++ b/bin/make_dependency_update_pr.py
@@ -40,7 +40,7 @@
timestamp = time.strftime("%Y-%m-%dT%H-%M-%S", time.gmtime())
branch_name = f"update-constraints-{timestamp}"
- shell(f"git checkout -b {branch_name} origin/master", check=True)
+ shell(f"git checkout -b {branch_name} origin/main", check=True)
try:
shell("bin/update_dependencies.py", check=True)
@@ -63,7 +63,7 @@
"pr",
"create",
"--repo=pypa/cibuildwheel",
- "--base=master",
+ "--base=main",
"--title=Update dependencies",
f"--body='{body}'",
],
| {"golden_diff": "diff --git a/bin/make_dependency_update_pr.py b/bin/make_dependency_update_pr.py\n--- a/bin/make_dependency_update_pr.py\n+++ b/bin/make_dependency_update_pr.py\n@@ -40,7 +40,7 @@\n timestamp = time.strftime(\"%Y-%m-%dT%H-%M-%S\", time.gmtime())\n branch_name = f\"update-constraints-{timestamp}\"\n \n- shell(f\"git checkout -b {branch_name} origin/master\", check=True)\n+ shell(f\"git checkout -b {branch_name} origin/main\", check=True)\n \n try:\n shell(\"bin/update_dependencies.py\", check=True)\n@@ -63,7 +63,7 @@\n \"pr\",\n \"create\",\n \"--repo=pypa/cibuildwheel\",\n- \"--base=master\",\n+ \"--base=main\",\n \"--title=Update dependencies\",\n f\"--body='{body}'\",\n ],\n", "issue": "Changing the default branch to `main`\nThis is just a heads up, I'm planning to change the default branch on this repo to `main` this week, let's say Wednesday 26th. Github have a tool to change it over, and update PRs to target the new branch, but you might have to update it on local checkouts and forks. Shouldn't be a big issue though, this is what [Github say](https://github.com/github/renaming#renaming-existing-branches) about it:\r\n\r\n> Renaming a branch will:\r\n> \r\n> - Re-target any open pull requests\r\n> - Update any draft releases based on the branch\r\n> - Move any branch protection rules that explicitly reference the old name\r\n> - Update the branch used to build GitHub Pages, if applicable\r\n> - Show a notice to repository contributors, maintainers, and admins on the repository homepage with instructions to update local copies of the repository\r\n> - Show a notice to contributors who git push to the old branch\r\n> - Redirect web requests for the old branch name to the new branch name\r\n> - Return a \"Moved Permanently\" response in API requests for the old branch name\r\n\r\n---\r\n\r\nChecklist for the switch:\r\n\r\n- [x] Use the Github tool to change it over\r\n- [x] Find/replace `master` to `main` in CI configs, docs, scripts, example code, etc\r\n- [x] Change default branch on Readthedocs\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport textwrap\nimport time\nfrom pathlib import Path\nfrom subprocess import run\n\nimport click\n\n\ndef shell(cmd, **kwargs):\n return run([cmd], shell=True, **kwargs)\n\n\ndef git_repo_has_changes():\n unstaged_changes = shell(\"git diff-index --quiet HEAD --\").returncode != 0\n staged_changes = shell(\"git diff-index --quiet --cached HEAD --\").returncode != 0\n return unstaged_changes or staged_changes\n\n\[email protected]()\ndef main():\n project_root = Path(__file__).parent / \"..\"\n os.chdir(project_root)\n\n if git_repo_has_changes():\n print(\"Your git repo has uncommitted changes. Commit or stash before continuing.\")\n sys.exit(1)\n\n previous_branch = shell(\n \"git rev-parse --abbrev-ref HEAD\", check=True, capture_output=True, encoding=\"utf8\"\n ).stdout.strip()\n\n shell(\"git fetch origin\", check=True)\n\n timestamp = time.strftime(\"%Y-%m-%dT%H-%M-%S\", time.gmtime())\n branch_name = f\"update-constraints-{timestamp}\"\n\n shell(f\"git checkout -b {branch_name} origin/master\", check=True)\n\n try:\n shell(\"bin/update_dependencies.py\", check=True)\n\n if not git_repo_has_changes():\n print(\"Done: no constraint updates required.\")\n return\n\n shell('git commit -a -m \"Update dependencies\"', check=True)\n body = textwrap.dedent(\n f\"\"\"\n Update the versions of our dependencies.\n\n PR generated by `{os.path.basename(__file__)}`.\n \"\"\"\n )\n run(\n [\n \"gh\",\n \"pr\",\n \"create\",\n \"--repo=pypa/cibuildwheel\",\n \"--base=master\",\n \"--title=Update dependencies\",\n f\"--body='{body}'\",\n ],\n check=True,\n )\n\n print(\"Done.\")\n finally:\n # remove any local changes\n shell(\"git checkout -- .\")\n shell(f\"git checkout {previous_branch}\", check=True)\n shell(f\"git branch -D --force {branch_name}\", check=True)\n\n\nif __name__ == \"__main__\":\n main.main(standalone_mode=True)\n", "path": "bin/make_dependency_update_pr.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport textwrap\nimport time\nfrom pathlib import Path\nfrom subprocess import run\n\nimport click\n\n\ndef shell(cmd, **kwargs):\n return run([cmd], shell=True, **kwargs)\n\n\ndef git_repo_has_changes():\n unstaged_changes = shell(\"git diff-index --quiet HEAD --\").returncode != 0\n staged_changes = shell(\"git diff-index --quiet --cached HEAD --\").returncode != 0\n return unstaged_changes or staged_changes\n\n\[email protected]()\ndef main():\n project_root = Path(__file__).parent / \"..\"\n os.chdir(project_root)\n\n if git_repo_has_changes():\n print(\"Your git repo has uncommitted changes. Commit or stash before continuing.\")\n sys.exit(1)\n\n previous_branch = shell(\n \"git rev-parse --abbrev-ref HEAD\", check=True, capture_output=True, encoding=\"utf8\"\n ).stdout.strip()\n\n shell(\"git fetch origin\", check=True)\n\n timestamp = time.strftime(\"%Y-%m-%dT%H-%M-%S\", time.gmtime())\n branch_name = f\"update-constraints-{timestamp}\"\n\n shell(f\"git checkout -b {branch_name} origin/main\", check=True)\n\n try:\n shell(\"bin/update_dependencies.py\", check=True)\n\n if not git_repo_has_changes():\n print(\"Done: no constraint updates required.\")\n return\n\n shell('git commit -a -m \"Update dependencies\"', check=True)\n body = textwrap.dedent(\n f\"\"\"\n Update the versions of our dependencies.\n\n PR generated by `{os.path.basename(__file__)}`.\n \"\"\"\n )\n run(\n [\n \"gh\",\n \"pr\",\n \"create\",\n \"--repo=pypa/cibuildwheel\",\n \"--base=main\",\n \"--title=Update dependencies\",\n f\"--body='{body}'\",\n ],\n check=True,\n )\n\n print(\"Done.\")\n finally:\n # remove any local changes\n shell(\"git checkout -- .\")\n shell(f\"git checkout {previous_branch}\", check=True)\n shell(f\"git branch -D --force {branch_name}\", check=True)\n\n\nif __name__ == \"__main__\":\n main.main(standalone_mode=True)\n", "path": "bin/make_dependency_update_pr.py"}]} | 1,227 | 201 |
gh_patches_debug_14764 | rasdani/github-patches | git_diff | kserve__kserve-882 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing requirements.txt in the Pypi source code
**What steps did you take and what happened:**
The requirements.txt file is missing in the source code on Pypi so setuptools will not work.
```
Executing setuptoolsBuildPhase
Traceback (most recent call last):
File "nix_run_setup", line 8, in <module>
exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
File "setup.py", line 23, in <module>
with open('requirements.txt') as f:
FileNotFoundError: [Errno 2] No such file or directory: 'requirements.txt'
builder for '/nix/store/z8sh0v4cji9aq9v02865273xvmhcwzgh-python3.8-kfserving-0.3.0.1.drv' failed with exit code 1
cannot build derivation '/nix/store/75ihn4avq52qdpavs0s8c1y0nj0wjfdx-python3-3.8.2-env.drv': 1 dependencies couldn't be built
```
**What did you expect to happen:**
requirements.txt in the tar.gz archive
**Environment:**
- Istio Version:
- Knative Version:
- KFServing Version: 0.3.0.1
- Kubeflow version:
- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]
- Minikube version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`): NixOS 20.03 (Markhor) x86_64
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kfserving/setup.py`
Content:
```
1 # Copyright 2020 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import setuptools
16
17 TESTS_REQUIRES = [
18 'pytest',
19 'pytest-tornasync',
20 'mypy'
21 ]
22
23 with open('requirements.txt') as f:
24 REQUIRES = f.readlines()
25
26 setuptools.setup(
27 name='kfserving',
28 version='0.3.0.1',
29 author="Kubeflow Authors",
30 author_email='[email protected], [email protected]',
31 license="Apache License Version 2.0",
32 url="https://github.com/kubeflow/kfserving/python/kfserving",
33 description="KFServing Python SDK",
34 long_description="Python SDK for KFServing Server and Client.",
35 python_requires='>=3.6',
36 packages=[
37 'kfserving',
38 'kfserving.api',
39 'kfserving.constants',
40 'kfserving.models',
41 'kfserving.handlers',
42 'kfserving.utils',
43 ],
44 package_data={},
45 include_package_data=False,
46 zip_safe=False,
47 classifiers=[
48 'Intended Audience :: Developers',
49 'Intended Audience :: Education',
50 'Intended Audience :: Science/Research',
51 'Programming Language :: Python :: 3',
52 'Programming Language :: Python :: 3.6',
53 'Programming Language :: Python :: 3.7',
54 "License :: OSI Approved :: Apache Software License",
55 "Operating System :: OS Independent",
56 'Topic :: Scientific/Engineering',
57 'Topic :: Scientific/Engineering :: Artificial Intelligence',
58 'Topic :: Software Development',
59 'Topic :: Software Development :: Libraries',
60 'Topic :: Software Development :: Libraries :: Python Modules',
61 ],
62 install_requires=REQUIRES,
63 tests_require=TESTS_REQUIRES,
64 extras_require={'test': TESTS_REQUIRES}
65 )
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kfserving/setup.py b/python/kfserving/setup.py
--- a/python/kfserving/setup.py
+++ b/python/kfserving/setup.py
@@ -25,7 +25,7 @@
setuptools.setup(
name='kfserving',
- version='0.3.0.1',
+ version='0.3.0.2',
author="Kubeflow Authors",
author_email='[email protected], [email protected]',
license="Apache License Version 2.0",
@@ -41,8 +41,8 @@
'kfserving.handlers',
'kfserving.utils',
],
- package_data={},
- include_package_data=False,
+ package_data={'': ['requirements.txt']},
+ include_package_data=True,
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
| {"golden_diff": "diff --git a/python/kfserving/setup.py b/python/kfserving/setup.py\n--- a/python/kfserving/setup.py\n+++ b/python/kfserving/setup.py\n@@ -25,7 +25,7 @@\n \n setuptools.setup(\n name='kfserving',\n- version='0.3.0.1',\n+ version='0.3.0.2',\n author=\"Kubeflow Authors\",\n author_email='[email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n@@ -41,8 +41,8 @@\n 'kfserving.handlers',\n 'kfserving.utils',\n ],\n- package_data={},\n- include_package_data=False,\n+ package_data={'': ['requirements.txt']},\n+ include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n", "issue": "Missing requirements.txt in the Pypi source code\n**What steps did you take and what happened:**\r\nThe requirements.txt file is missing in the source code on Pypi so setuptools will not work.\r\n\r\n```\r\nExecuting setuptoolsBuildPhase\r\nTraceback (most recent call last):\r\n File \"nix_run_setup\", line 8, in <module>\r\n exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\\\r\\\\n', '\\\\n'), __file__, 'exec'))\r\n File \"setup.py\", line 23, in <module>\r\n with open('requirements.txt') as f:\r\nFileNotFoundError: [Errno 2] No such file or directory: 'requirements.txt'\r\nbuilder for '/nix/store/z8sh0v4cji9aq9v02865273xvmhcwzgh-python3.8-kfserving-0.3.0.1.drv' failed with exit code 1\r\ncannot build derivation '/nix/store/75ihn4avq52qdpavs0s8c1y0nj0wjfdx-python3-3.8.2-env.drv': 1 dependencies couldn't be built\r\n```\r\n\r\n**What did you expect to happen:**\r\nrequirements.txt in the tar.gz archive\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KFServing Version: 0.3.0.1\r\n- Kubeflow version:\r\n- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`): NixOS 20.03 (Markhor) x86_64\r\n\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kfserving',\n version='0.3.0.1',\n author=\"Kubeflow Authors\",\n author_email='[email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kubeflow/kfserving/python/kfserving\",\n description=\"KFServing Python SDK\",\n long_description=\"Python SDK for KFServing Server and Client.\",\n python_requires='>=3.6',\n packages=[\n 'kfserving',\n 'kfserving.api',\n 'kfserving.constants',\n 'kfserving.models',\n 'kfserving.handlers',\n 'kfserving.utils',\n ],\n package_data={},\n include_package_data=False,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kfserving/setup.py"}], "after_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kfserving',\n version='0.3.0.2',\n author=\"Kubeflow Authors\",\n author_email='[email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kubeflow/kfserving/python/kfserving\",\n description=\"KFServing Python SDK\",\n long_description=\"Python SDK for KFServing Server and Client.\",\n python_requires='>=3.6',\n packages=[\n 'kfserving',\n 'kfserving.api',\n 'kfserving.constants',\n 'kfserving.models',\n 'kfserving.handlers',\n 'kfserving.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kfserving/setup.py"}]} | 1,288 | 197 |
gh_patches_debug_30307 | rasdani/github-patches | git_diff | mne-tools__mne-bids-207 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Binder links in examples
This uses a new-ish feature of sphinx-gallery to auto-generate binder links for the examples.
We can't merge this until my PR in sphinx gallery lands (https://github.com/sphinx-gallery/sphinx-gallery/pull/439)
In the meantime, here's an example of how it'll look live:
http://predictablynoisy.com/mne-bids/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/convert_group_studies.py`
Content:
```
1 """
2 =================================
3 BIDS conversion for group studies
4 =================================
5
6 Here, we show how to do BIDS conversion for group studies.
7 The data is available here: https://openneuro.org/datasets/ds000117
8
9 References
10 ----------
11
12 [1] Wakeman, Daniel G., and Richard N. Henson.
13 "A multi-subject, multi-modal human neuroimaging dataset."
14 Scientific data, 2 (2015): 150001.
15
16 """
17
18 # Authors: Mainak Jas <[email protected]>
19 # Teon Brooks <[email protected]>
20
21 # License: BSD (3-clause)
22
23 ###############################################################################
24 # Let us import ``mne_bids``
25
26 import os.path as op
27
28 import mne
29 from mne_bids import write_raw_bids, make_bids_basename
30 from mne_bids.datasets import fetch_faces_data
31 from mne_bids.utils import print_dir_tree
32
33 ###############################################################################
34 # And fetch the data.
35
36 subject_ids = [1]
37 runs = range(1, 7)
38
39 data_path = op.join(op.expanduser('~'), 'mne_data')
40 repo = 'ds000117'
41 fetch_faces_data(data_path, repo, subject_ids)
42
43 output_path = op.join(data_path, 'ds000117-bids')
44
45 ###############################################################################
46 #
47 # .. warning:: This will download 7.9 GB of data for one subject!
48 # Define event_ids.
49
50 event_id = {
51 'face/famous/first': 5,
52 'face/famous/immediate': 6,
53 'face/famous/long': 7,
54 'face/unfamiliar/first': 13,
55 'face/unfamiliar/immediate': 14,
56 'face/unfamiliar/long': 15,
57 'scrambled/first': 17,
58 'scrambled/immediate': 18,
59 'scrambled/long': 19,
60 }
61
62 ###############################################################################
63 # Let us loop over the subjects and create BIDS-compatible folder
64
65 for subject_id in subject_ids:
66 subject = 'sub%03d' % subject_id
67 for run in runs:
68 raw_fname = op.join(data_path, repo, subject, 'MEG',
69 'run_%02d_raw.fif' % run)
70
71 raw = mne.io.read_raw_fif(raw_fname)
72 bids_basename = make_bids_basename(subject=str(subject_id),
73 session='01', task='VisualFaces',
74 run=str(run))
75 write_raw_bids(raw, bids_basename, output_path, event_id=event_id,
76 overwrite=True)
77
78 ###############################################################################
79 # Now let's see the structure of the BIDS folder we created.
80 print_dir_tree(output_path)
81
```
Path: `doc/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # mne_bids documentation build configuration file, created by
4 # sphinx-quickstart on Wed Sep 6 04:42:26 2017.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 # If extensions (or modules to document with autodoc) are in another directory,
16 # add these directories to sys.path here. If the directory is relative to the
17 # documentation root, use os.path.abspath to make it absolute, like shown here.
18 #
19 # import os
20 # import sys
21 # sys.path.insert(0, os.path.abspath('.'))
22
23 from datetime import date
24 import sphinx_gallery # noqa
25 import sphinx_bootstrap_theme
26
27 import mne_bids
28
29 # -- General configuration ------------------------------------------------
30
31 # If your documentation needs a minimal Sphinx version, state it here.
32 #
33 # needs_sphinx = '1.0'
34
35 # Add any Sphinx extension module names here, as strings. They can be
36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
37 # ones.
38 extensions = [
39 'sphinx.ext.autodoc',
40 'sphinx.ext.mathjax',
41 'sphinx.ext.viewcode',
42 'numpydoc',
43 'sphinx.ext.autosummary',
44 'sphinx.ext.doctest',
45 'sphinx_gallery.gen_gallery'
46 ]
47
48 # generate autosummary even if no references
49 autosummary_generate = True
50
51 # Add any paths that contain templates here, relative to this directory.
52 templates_path = ['_templates']
53
54 # The suffix(es) of source filenames.
55 # You can specify multiple suffix as a list of string:
56 #
57 # source_suffix = ['.rst', '.md']
58 source_suffix = '.rst'
59
60 # The master toctree document.
61 master_doc = 'index'
62
63 # General information about the project.
64 project = u'mne_bids'
65 td = date.today()
66 copyright = u'%s, MNE Developers. Last updated on %s' % (td.year,
67 td.isoformat())
68
69 author = u'MNE Developers'
70
71 # The version info for the project you're documenting, acts as replacement for
72 # |version| and |release|, also used in various other places throughout the
73 # built documents.
74 #
75 # The short X.Y version.
76 version = mne_bids.__version__
77 # The full version, including alpha/beta/rc tags.
78 release = version
79
80 # The language for content autogenerated by Sphinx. Refer to documentation
81 # for a list of supported languages.
82 #
83 # This is also used if you do content translation via gettext catalogs.
84 # Usually you set "language" from the command line for these cases.
85 language = None
86
87 # List of patterns, relative to source directory, that match files and
88 # directories to ignore when looking for source files.
89 # This patterns also effect to html_static_path and html_extra_path
90 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
91
92 # The name of the Pygments (syntax highlighting) style to use.
93 pygments_style = 'sphinx'
94
95 # If true, `todo` and `todoList` produce output, else they produce nothing.
96 todo_include_todos = False
97
98
99 # -- Options for HTML output ----------------------------------------------
100
101 # The theme to use for HTML and HTML Help pages. See the documentation for
102 # a list of builtin themes.
103 #
104 html_theme = 'bootstrap'
105 html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
106
107 # Theme options are theme-specific and customize the look and feel of a theme
108 # further. For a list of options available for each theme, see the
109 # documentation.
110 #
111 html_theme_options = {
112 'navbar_title': 'MNE-BIDS',
113 'bootswatch_theme': "flatly",
114 'navbar_sidebarrel': False,
115 'bootstrap_version': "3",
116 'navbar_links': [
117 ("Gallery", "auto_examples/index"),
118 ("API", "api"),
119 ("What's new", "whats_new"),
120 ("Github", "https://github.com/mne-tools/mne-bids", True),
121 ]}
122
123 # Add any paths that contain custom static files (such as style sheets) here,
124 # relative to this directory. They are copied after the builtin static files,
125 # so a file named "default.css" will overwrite the builtin "default.css".
126 html_static_path = ['_static']
127
128
129 # -- Options for HTMLHelp output ------------------------------------------
130
131 # Output file base name for HTML help builder.
132 htmlhelp_basename = 'mne_bidsdoc'
133
134
135 # -- Options for LaTeX output ---------------------------------------------
136
137 latex_elements = {
138 # The paper size ('letterpaper' or 'a4paper').
139 #
140 # 'papersize': 'letterpaper',
141
142 # The font size ('10pt', '11pt' or '12pt').
143 #
144 # 'pointsize': '10pt',
145
146 # Additional stuff for the LaTeX preamble.
147 #
148 # 'preamble': '',
149
150 # Latex figure (float) alignment
151 #
152 # 'figure_align': 'htbp',
153 }
154
155 # Grouping the document tree into LaTeX files. List of tuples
156 # (source start file, target name, title,
157 # author, documentclass [howto, manual, or own class]).
158 latex_documents = [
159 (master_doc, 'mne_bids.tex', u'mne\\_bids Documentation',
160 u'Mainak Jas', 'manual'),
161 ]
162
163
164 # -- Options for manual page output ---------------------------------------
165
166 # One entry per manual page. List of tuples
167 # (source start file, name, description, authors, manual section).
168 man_pages = [
169 (master_doc, 'mne_bids', u'mne_bids Documentation',
170 [author], 1)
171 ]
172
173
174 # -- Options for Texinfo output -------------------------------------------
175
176 # Grouping the document tree into Texinfo files. List of tuples
177 # (source start file, target name, title, author,
178 # dir menu entry, description, category)
179 texinfo_documents = [
180 (master_doc, 'mne_bids', u'mne_bids Documentation',
181 author, 'mne_bids', 'One line description of project.',
182 'Miscellaneous'),
183 ]
184
185
186 sphinx_gallery_conf = {
187 'examples_dirs': '../examples',
188 'gallery_dirs': 'auto_examples',
189 'filename_pattern': '^((?!sgskip).)*$',
190 'backreferences_dir': 'generated',
191 'reference_url': {
192 'mne': 'http://mne-tools.github.io/stable/',
193 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',
194 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference'
195 }
196 }
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -192,5 +192,15 @@
'mne': 'http://mne-tools.github.io/stable/',
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',
'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference'
+ },
+ 'binder': {
+ # Required keys
+ 'org': 'mne-tools',
+ 'repo': 'mne-bids',
+ 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
+ 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).
+ 'dependencies': [
+ '../environment.yml'
+ ],
}
}
diff --git a/examples/convert_group_studies.py b/examples/convert_group_studies.py
--- a/examples/convert_group_studies.py
+++ b/examples/convert_group_studies.py
@@ -32,6 +32,7 @@
###############################################################################
# And fetch the data.
+# .. warning:: This will download 7.9 GB of data for one subject!
subject_ids = [1]
runs = range(1, 7)
@@ -43,8 +44,6 @@
output_path = op.join(data_path, 'ds000117-bids')
###############################################################################
-#
-# .. warning:: This will download 7.9 GB of data for one subject!
# Define event_ids.
event_id = {
@@ -77,4 +76,5 @@
###############################################################################
# Now let's see the structure of the BIDS folder we created.
+
print_dir_tree(output_path)
| {"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -192,5 +192,15 @@\n 'mne': 'http://mne-tools.github.io/stable/',\n 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',\n 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference'\n+ },\n+ 'binder': {\n+ # Required keys\n+ 'org': 'mne-tools',\n+ 'repo': 'mne-bids',\n+ 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n+ 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n+ 'dependencies': [\n+ '../environment.yml'\n+ ],\n }\n }\ndiff --git a/examples/convert_group_studies.py b/examples/convert_group_studies.py\n--- a/examples/convert_group_studies.py\n+++ b/examples/convert_group_studies.py\n@@ -32,6 +32,7 @@\n \n ###############################################################################\n # And fetch the data.\n+# .. warning:: This will download 7.9 GB of data for one subject!\n \n subject_ids = [1]\n runs = range(1, 7)\n@@ -43,8 +44,6 @@\n output_path = op.join(data_path, 'ds000117-bids')\n \n ###############################################################################\n-#\n-# .. warning:: This will download 7.9 GB of data for one subject!\n # Define event_ids.\n \n event_id = {\n@@ -77,4 +76,5 @@\n \n ###############################################################################\n # Now let's see the structure of the BIDS folder we created.\n+\n print_dir_tree(output_path)\n", "issue": "Binder links in examples\nThis uses a new-ish feature of sphinx-gallery to auto-generate binder links for the examples.\r\n\r\nWe can't merge this until my PR in sphinx gallery lands (https://github.com/sphinx-gallery/sphinx-gallery/pull/439)\r\n\r\nIn the meantime, here's an example of how it'll look live:\r\n\r\nhttp://predictablynoisy.com/mne-bids/\n", "before_files": [{"content": "\"\"\"\n=================================\nBIDS conversion for group studies\n=================================\n\nHere, we show how to do BIDS conversion for group studies.\nThe data is available here: https://openneuro.org/datasets/ds000117\n\nReferences\n----------\n\n[1] Wakeman, Daniel G., and Richard N. Henson.\n\"A multi-subject, multi-modal human neuroimaging dataset.\"\nScientific data, 2 (2015): 150001.\n\n\"\"\"\n\n# Authors: Mainak Jas <[email protected]>\n# Teon Brooks <[email protected]>\n\n# License: BSD (3-clause)\n\n###############################################################################\n# Let us import ``mne_bids``\n\nimport os.path as op\n\nimport mne\nfrom mne_bids import write_raw_bids, make_bids_basename\nfrom mne_bids.datasets import fetch_faces_data\nfrom mne_bids.utils import print_dir_tree\n\n###############################################################################\n# And fetch the data.\n\nsubject_ids = [1]\nruns = range(1, 7)\n\ndata_path = op.join(op.expanduser('~'), 'mne_data')\nrepo = 'ds000117'\nfetch_faces_data(data_path, repo, subject_ids)\n\noutput_path = op.join(data_path, 'ds000117-bids')\n\n###############################################################################\n#\n# .. warning:: This will download 7.9 GB of data for one subject!\n# Define event_ids.\n\nevent_id = {\n 'face/famous/first': 5,\n 'face/famous/immediate': 6,\n 'face/famous/long': 7,\n 'face/unfamiliar/first': 13,\n 'face/unfamiliar/immediate': 14,\n 'face/unfamiliar/long': 15,\n 'scrambled/first': 17,\n 'scrambled/immediate': 18,\n 'scrambled/long': 19,\n}\n\n###############################################################################\n# Let us loop over the subjects and create BIDS-compatible folder\n\nfor subject_id in subject_ids:\n subject = 'sub%03d' % subject_id\n for run in runs:\n raw_fname = op.join(data_path, repo, subject, 'MEG',\n 'run_%02d_raw.fif' % run)\n\n raw = mne.io.read_raw_fif(raw_fname)\n bids_basename = make_bids_basename(subject=str(subject_id),\n session='01', task='VisualFaces',\n run=str(run))\n write_raw_bids(raw, bids_basename, output_path, event_id=event_id,\n overwrite=True)\n\n###############################################################################\n# Now let's see the structure of the BIDS folder we created.\nprint_dir_tree(output_path)\n", "path": "examples/convert_group_studies.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# mne_bids documentation build configuration file, created by\n# sphinx-quickstart on Wed Sep 6 04:42:26 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom datetime import date\nimport sphinx_gallery # noqa\nimport sphinx_bootstrap_theme\n\nimport mne_bids\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'numpydoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx_gallery.gen_gallery'\n]\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'mne_bids'\ntd = date.today()\ncopyright = u'%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'bootstrap'\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'navbar_title': 'MNE-BIDS',\n 'bootswatch_theme': \"flatly\",\n 'navbar_sidebarrel': False,\n 'bootstrap_version': \"3\",\n 'navbar_links': [\n (\"Gallery\", \"auto_examples/index\"),\n (\"API\", \"api\"),\n (\"What's new\", \"whats_new\"),\n (\"Github\", \"https://github.com/mne-tools/mne-bids\", True),\n ]}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'mne_bidsdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'mne_bids.tex', u'mne\\\\_bids Documentation',\n u'Mainak Jas', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'mne_bids', u'mne_bids Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'mne_bids', u'mne_bids Documentation',\n author, 'mne_bids', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\nsphinx_gallery_conf = {\n 'examples_dirs': '../examples',\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'backreferences_dir': 'generated',\n 'reference_url': {\n 'mne': 'http://mne-tools.github.io/stable/',\n 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',\n 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference'\n }\n}\n", "path": "doc/conf.py"}], "after_files": [{"content": "\"\"\"\n=================================\nBIDS conversion for group studies\n=================================\n\nHere, we show how to do BIDS conversion for group studies.\nThe data is available here: https://openneuro.org/datasets/ds000117\n\nReferences\n----------\n\n[1] Wakeman, Daniel G., and Richard N. Henson.\n\"A multi-subject, multi-modal human neuroimaging dataset.\"\nScientific data, 2 (2015): 150001.\n\n\"\"\"\n\n# Authors: Mainak Jas <[email protected]>\n# Teon Brooks <[email protected]>\n\n# License: BSD (3-clause)\n\n###############################################################################\n# Let us import ``mne_bids``\n\nimport os.path as op\n\nimport mne\nfrom mne_bids import write_raw_bids, make_bids_basename\nfrom mne_bids.datasets import fetch_faces_data\nfrom mne_bids.utils import print_dir_tree\n\n###############################################################################\n# And fetch the data.\n# .. warning:: This will download 7.9 GB of data for one subject!\n\nsubject_ids = [1]\nruns = range(1, 7)\n\ndata_path = op.join(op.expanduser('~'), 'mne_data')\nrepo = 'ds000117'\nfetch_faces_data(data_path, repo, subject_ids)\n\noutput_path = op.join(data_path, 'ds000117-bids')\n\n###############################################################################\n# Define event_ids.\n\nevent_id = {\n 'face/famous/first': 5,\n 'face/famous/immediate': 6,\n 'face/famous/long': 7,\n 'face/unfamiliar/first': 13,\n 'face/unfamiliar/immediate': 14,\n 'face/unfamiliar/long': 15,\n 'scrambled/first': 17,\n 'scrambled/immediate': 18,\n 'scrambled/long': 19,\n}\n\n###############################################################################\n# Let us loop over the subjects and create BIDS-compatible folder\n\nfor subject_id in subject_ids:\n subject = 'sub%03d' % subject_id\n for run in runs:\n raw_fname = op.join(data_path, repo, subject, 'MEG',\n 'run_%02d_raw.fif' % run)\n\n raw = mne.io.read_raw_fif(raw_fname)\n bids_basename = make_bids_basename(subject=str(subject_id),\n session='01', task='VisualFaces',\n run=str(run))\n write_raw_bids(raw, bids_basename, output_path, event_id=event_id,\n overwrite=True)\n\n###############################################################################\n# Now let's see the structure of the BIDS folder we created.\n\nprint_dir_tree(output_path)\n", "path": "examples/convert_group_studies.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# mne_bids documentation build configuration file, created by\n# sphinx-quickstart on Wed Sep 6 04:42:26 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom datetime import date\nimport sphinx_gallery # noqa\nimport sphinx_bootstrap_theme\n\nimport mne_bids\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'numpydoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx_gallery.gen_gallery'\n]\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'mne_bids'\ntd = date.today()\ncopyright = u'%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'bootstrap'\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'navbar_title': 'MNE-BIDS',\n 'bootswatch_theme': \"flatly\",\n 'navbar_sidebarrel': False,\n 'bootstrap_version': \"3\",\n 'navbar_links': [\n (\"Gallery\", \"auto_examples/index\"),\n (\"API\", \"api\"),\n (\"What's new\", \"whats_new\"),\n (\"Github\", \"https://github.com/mne-tools/mne-bids\", True),\n ]}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'mne_bidsdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'mne_bids.tex', u'mne\\\\_bids Documentation',\n u'Mainak Jas', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'mne_bids', u'mne_bids Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'mne_bids', u'mne_bids Documentation',\n author, 'mne_bids', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\nsphinx_gallery_conf = {\n 'examples_dirs': '../examples',\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'backreferences_dir': 'generated',\n 'reference_url': {\n 'mne': 'http://mne-tools.github.io/stable/',\n 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',\n 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference'\n },\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'dependencies': [\n '../environment.yml'\n ],\n }\n}\n", "path": "doc/conf.py"}]} | 3,083 | 435 |
gh_patches_debug_11393 | rasdani/github-patches | git_diff | ManimCommunity__manim-189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
With no scene specified on CLI, manim gives false choice.
I was running the command:
`manim -pl --media_dir ~/renderings ~/dev/manim/example_scenes/basic.py `
and was greeted by this:
```
[19:29:42] INFO Read configuration files: config.py:416
['/home/cobordism/.local/lib/python3.8/site-packages/manim/default.cfg']
1: GraphScene
2: LinearTransformationScene
3: MovingCameraScene
4: OpeningManimExample
5: ReconfigurableScene
6: SampleSpaceScene
7: SpecialThreeDScene
8: SquareToCircle
9: ThreeDScene
10: UpdatersExample
11: VectorScene
12: WarpSquare
13: WriteStuff
14: ZoomedScene
Choose number corresponding to desired scene/arguments.
(Use comma separated list for multiple entries)
Choice(s):
```
Clearly something is wonky here The choices should have been only the scenes from basic.py.
i.e. OpeningManimExample, SquareToCircle, WarpSquare, WriteStuff, UpdatersExample.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/__main__.py`
Content:
```
1 import inspect
2 import os
3 import platform
4 import subprocess as sp
5 import sys
6 import re
7 import traceback
8 import importlib.util
9
10 from .config import file_writer_config
11 from .scene.scene import Scene
12 from .utils.sounds import play_error_sound
13 from .utils.sounds import play_finish_sound
14 from . import constants
15 from .logger import logger
16
17
18 def open_file_if_needed(file_writer):
19 if file_writer_config["quiet"]:
20 curr_stdout = sys.stdout
21 sys.stdout = open(os.devnull, "w")
22
23 open_file = any([
24 file_writer_config["preview"],
25 file_writer_config["show_file_in_finder"]
26 ])
27 if open_file:
28 current_os = platform.system()
29 file_paths = []
30
31 if file_writer_config["save_last_frame"]:
32 file_paths.append(file_writer.get_image_file_path())
33 if file_writer_config["write_to_movie"]:
34 file_paths.append(file_writer.get_movie_file_path())
35
36 for file_path in file_paths:
37 if current_os == "Windows":
38 os.startfile(file_path)
39 else:
40 commands = []
41 if current_os == "Linux":
42 commands.append("xdg-open")
43 elif current_os.startswith("CYGWIN"):
44 commands.append("cygstart")
45 else: # Assume macOS
46 commands.append("open")
47
48 if file_writer_config["show_file_in_finder"]:
49 commands.append("-R")
50
51 commands.append(file_path)
52
53 # commands.append("-g")
54 FNULL = open(os.devnull, 'w')
55 sp.call(commands, stdout=FNULL, stderr=sp.STDOUT)
56 FNULL.close()
57
58 if file_writer_config["quiet"]:
59 sys.stdout.close()
60 sys.stdout = curr_stdout
61
62
63 def is_child_scene(obj, module):
64 return (inspect.isclass(obj)
65 and issubclass(obj, Scene)
66 and obj != Scene
67 and obj.__module__.startswith(module.__name__))
68
69
70 def prompt_user_for_choice(scene_classes):
71 num_to_class = {}
72 for count, scene_class in enumerate(scene_classes):
73 count += 1 # start with 1 instead of 0
74 name = scene_class.__name__
75 print("%d: %s" % (count, name))
76 num_to_class[count] = scene_class
77 try:
78 user_input = input(constants.CHOOSE_NUMBER_MESSAGE)
79 return [num_to_class[int(num_str)]
80 for num_str in re.split(r"\s*,\s*", user_input.strip())]
81 except KeyError:
82 logger.error(constants.INVALID_NUMBER_MESSAGE)
83 sys.exit(2)
84 except EOFError:
85 sys.exit(1)
86
87
88 def get_scenes_to_render(scene_classes):
89 if not scene_classes:
90 logger.error(constants.NO_SCENE_MESSAGE)
91 return []
92 if file_writer_config["write_all"]:
93 return scene_classes
94 result = []
95 for scene_name in file_writer_config["scene_names"]:
96 found = False
97 for scene_class in scene_classes:
98 if scene_class.__name__ == scene_name:
99 result.append(scene_class)
100 found = True
101 break
102 if not found and (scene_name != ""):
103 logger.error(
104 constants.SCENE_NOT_FOUND_MESSAGE.format(
105 scene_name
106 )
107 )
108 if result:
109 return result
110 return [scene_classes[0]] if len(scene_classes) == 1 else prompt_user_for_choice(scene_classes)
111
112
113 def get_scene_classes_from_module(module):
114 if hasattr(module, "SCENES_IN_ORDER"):
115 return module.SCENES_IN_ORDER
116 else:
117 return [
118 member[1]
119 for member in inspect.getmembers(
120 module,
121 lambda x: is_child_scene(x, module)
122 )
123 ]
124
125
126 def get_module(file_name):
127 if file_name == "-":
128 module = types.ModuleType("input_scenes")
129 code = sys.stdin.read()
130 try:
131 exec(code, module.__dict__)
132 return module
133 except Exception as e:
134 logger.error(f"Failed to render scene: {str(e)}")
135 sys.exit(2)
136 else:
137 if os.path.exists(file_name):
138 module_name = re.sub(r"\..+$", "", file_name.replace(os.sep, "."))
139 spec = importlib.util.spec_from_file_location(module_name, file_name)
140 module = importlib.util.module_from_spec(spec)
141 spec.loader.exec_module(module)
142 return module
143 else:
144 raise FileNotFoundError(f'{file_name} not found')
145
146
147 def main():
148 module = get_module(file_writer_config["input_file"])
149 all_scene_classes = get_scene_classes_from_module(module)
150 scene_classes_to_render = get_scenes_to_render(all_scene_classes)
151 sound_on = file_writer_config["sound"]
152 for SceneClass in scene_classes_to_render:
153 try:
154 # By invoking, this renders the full scene
155 scene = SceneClass()
156 open_file_if_needed(scene.file_writer)
157 if sound_on:
158 play_finish_sound()
159 except Exception:
160 print("\n\n")
161 traceback.print_exc()
162 print("\n\n")
163 if sound_on:
164 play_error_sound()
165
166
167 if __name__ == "__main__":
168 main()
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/__main__.py b/manim/__main__.py
--- a/manim/__main__.py
+++ b/manim/__main__.py
@@ -135,7 +135,9 @@
sys.exit(2)
else:
if os.path.exists(file_name):
- module_name = re.sub(r"\..+$", "", file_name.replace(os.sep, "."))
+ if file_name[-3:] != ".py":
+ raise Exception(f"{file_name} is not a valid Manim python script.")
+ module_name = file_name[:-3].replace(os.sep, '.').split('.')[-1]
spec = importlib.util.spec_from_file_location(module_name, file_name)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
| {"golden_diff": "diff --git a/manim/__main__.py b/manim/__main__.py\n--- a/manim/__main__.py\n+++ b/manim/__main__.py\n@@ -135,7 +135,9 @@\n sys.exit(2)\n else:\n if os.path.exists(file_name):\n- module_name = re.sub(r\"\\..+$\", \"\", file_name.replace(os.sep, \".\"))\n+ if file_name[-3:] != \".py\":\n+ raise Exception(f\"{file_name} is not a valid Manim python script.\")\n+ module_name = file_name[:-3].replace(os.sep, '.').split('.')[-1]\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n", "issue": "With no scene specified on CLI, manim gives false choice.\nI was running the command:\r\n`manim -pl --media_dir ~/renderings ~/dev/manim/example_scenes/basic.py ` \r\nand was greeted by this:\r\n```\r\n\r\n[19:29:42] INFO Read configuration files: config.py:416\r\n ['/home/cobordism/.local/lib/python3.8/site-packages/manim/default.cfg'] \r\n1: GraphScene\r\n2: LinearTransformationScene\r\n3: MovingCameraScene\r\n4: OpeningManimExample\r\n5: ReconfigurableScene\r\n6: SampleSpaceScene\r\n7: SpecialThreeDScene\r\n8: SquareToCircle\r\n9: ThreeDScene\r\n10: UpdatersExample\r\n11: VectorScene\r\n12: WarpSquare\r\n13: WriteStuff\r\n14: ZoomedScene\r\n\r\nChoose number corresponding to desired scene/arguments.\r\n(Use comma separated list for multiple entries)\r\nChoice(s): \r\n\r\n```\r\n\r\nClearly something is wonky here The choices should have been only the scenes from basic.py.\r\ni.e. OpeningManimExample, SquareToCircle, WarpSquare, WriteStuff, UpdatersExample.\r\n\r\n\n", "before_files": [{"content": "import inspect\nimport os\nimport platform\nimport subprocess as sp\nimport sys\nimport re\nimport traceback\nimport importlib.util\n\nfrom .config import file_writer_config\nfrom .scene.scene import Scene\nfrom .utils.sounds import play_error_sound\nfrom .utils.sounds import play_finish_sound\nfrom . import constants\nfrom .logger import logger\n\n\ndef open_file_if_needed(file_writer):\n if file_writer_config[\"quiet\"]:\n curr_stdout = sys.stdout\n sys.stdout = open(os.devnull, \"w\")\n\n open_file = any([\n file_writer_config[\"preview\"],\n file_writer_config[\"show_file_in_finder\"]\n ])\n if open_file:\n current_os = platform.system()\n file_paths = []\n\n if file_writer_config[\"save_last_frame\"]:\n file_paths.append(file_writer.get_image_file_path())\n if file_writer_config[\"write_to_movie\"]:\n file_paths.append(file_writer.get_movie_file_path())\n\n for file_path in file_paths:\n if current_os == \"Windows\":\n os.startfile(file_path)\n else:\n commands = []\n if current_os == \"Linux\":\n commands.append(\"xdg-open\")\n elif current_os.startswith(\"CYGWIN\"):\n commands.append(\"cygstart\")\n else: # Assume macOS\n commands.append(\"open\")\n\n if file_writer_config[\"show_file_in_finder\"]:\n commands.append(\"-R\")\n\n commands.append(file_path)\n\n # commands.append(\"-g\")\n FNULL = open(os.devnull, 'w')\n sp.call(commands, stdout=FNULL, stderr=sp.STDOUT)\n FNULL.close()\n\n if file_writer_config[\"quiet\"]:\n sys.stdout.close()\n sys.stdout = curr_stdout\n\n\ndef is_child_scene(obj, module):\n return (inspect.isclass(obj)\n and issubclass(obj, Scene)\n and obj != Scene\n and obj.__module__.startswith(module.__name__))\n\n\ndef prompt_user_for_choice(scene_classes):\n num_to_class = {}\n for count, scene_class in enumerate(scene_classes):\n count += 1 # start with 1 instead of 0\n name = scene_class.__name__\n print(\"%d: %s\" % (count, name))\n num_to_class[count] = scene_class\n try:\n user_input = input(constants.CHOOSE_NUMBER_MESSAGE)\n return [num_to_class[int(num_str)]\n for num_str in re.split(r\"\\s*,\\s*\", user_input.strip())]\n except KeyError:\n logger.error(constants.INVALID_NUMBER_MESSAGE)\n sys.exit(2)\n except EOFError:\n sys.exit(1)\n\n\ndef get_scenes_to_render(scene_classes):\n if not scene_classes:\n logger.error(constants.NO_SCENE_MESSAGE)\n return []\n if file_writer_config[\"write_all\"]:\n return scene_classes\n result = []\n for scene_name in file_writer_config[\"scene_names\"]:\n found = False\n for scene_class in scene_classes:\n if scene_class.__name__ == scene_name:\n result.append(scene_class)\n found = True\n break\n if not found and (scene_name != \"\"):\n logger.error(\n constants.SCENE_NOT_FOUND_MESSAGE.format(\n scene_name\n )\n )\n if result:\n return result\n return [scene_classes[0]] if len(scene_classes) == 1 else prompt_user_for_choice(scene_classes)\n\n\ndef get_scene_classes_from_module(module):\n if hasattr(module, \"SCENES_IN_ORDER\"):\n return module.SCENES_IN_ORDER\n else:\n return [\n member[1]\n for member in inspect.getmembers(\n module,\n lambda x: is_child_scene(x, module)\n )\n ]\n\n\ndef get_module(file_name):\n if file_name == \"-\":\n module = types.ModuleType(\"input_scenes\")\n code = sys.stdin.read()\n try:\n exec(code, module.__dict__)\n return module\n except Exception as e:\n logger.error(f\"Failed to render scene: {str(e)}\")\n sys.exit(2)\n else:\n if os.path.exists(file_name):\n module_name = re.sub(r\"\\..+$\", \"\", file_name.replace(os.sep, \".\"))\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n else:\n raise FileNotFoundError(f'{file_name} not found')\n\n\ndef main():\n module = get_module(file_writer_config[\"input_file\"])\n all_scene_classes = get_scene_classes_from_module(module)\n scene_classes_to_render = get_scenes_to_render(all_scene_classes)\n sound_on = file_writer_config[\"sound\"]\n for SceneClass in scene_classes_to_render:\n try:\n # By invoking, this renders the full scene\n scene = SceneClass()\n open_file_if_needed(scene.file_writer)\n if sound_on:\n play_finish_sound()\n except Exception:\n print(\"\\n\\n\")\n traceback.print_exc()\n print(\"\\n\\n\")\n if sound_on:\n play_error_sound()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manim/__main__.py"}], "after_files": [{"content": "import inspect\nimport os\nimport platform\nimport subprocess as sp\nimport sys\nimport re\nimport traceback\nimport importlib.util\n\nfrom .config import file_writer_config\nfrom .scene.scene import Scene\nfrom .utils.sounds import play_error_sound\nfrom .utils.sounds import play_finish_sound\nfrom . import constants\nfrom .logger import logger\n\n\ndef open_file_if_needed(file_writer):\n if file_writer_config[\"quiet\"]:\n curr_stdout = sys.stdout\n sys.stdout = open(os.devnull, \"w\")\n\n open_file = any([\n file_writer_config[\"preview\"],\n file_writer_config[\"show_file_in_finder\"]\n ])\n if open_file:\n current_os = platform.system()\n file_paths = []\n\n if file_writer_config[\"save_last_frame\"]:\n file_paths.append(file_writer.get_image_file_path())\n if file_writer_config[\"write_to_movie\"]:\n file_paths.append(file_writer.get_movie_file_path())\n\n for file_path in file_paths:\n if current_os == \"Windows\":\n os.startfile(file_path)\n else:\n commands = []\n if current_os == \"Linux\":\n commands.append(\"xdg-open\")\n elif current_os.startswith(\"CYGWIN\"):\n commands.append(\"cygstart\")\n else: # Assume macOS\n commands.append(\"open\")\n\n if file_writer_config[\"show_file_in_finder\"]:\n commands.append(\"-R\")\n\n commands.append(file_path)\n\n # commands.append(\"-g\")\n FNULL = open(os.devnull, 'w')\n sp.call(commands, stdout=FNULL, stderr=sp.STDOUT)\n FNULL.close()\n\n if file_writer_config[\"quiet\"]:\n sys.stdout.close()\n sys.stdout = curr_stdout\n\n\ndef is_child_scene(obj, module):\n return (inspect.isclass(obj)\n and issubclass(obj, Scene)\n and obj != Scene\n and obj.__module__.startswith(module.__name__))\n\n\ndef prompt_user_for_choice(scene_classes):\n num_to_class = {}\n for count, scene_class in enumerate(scene_classes):\n count += 1 # start with 1 instead of 0\n name = scene_class.__name__\n print(\"%d: %s\" % (count, name))\n num_to_class[count] = scene_class\n try:\n user_input = input(constants.CHOOSE_NUMBER_MESSAGE)\n return [num_to_class[int(num_str)]\n for num_str in re.split(r\"\\s*,\\s*\", user_input.strip())]\n except KeyError:\n logger.error(constants.INVALID_NUMBER_MESSAGE)\n sys.exit(2)\n except EOFError:\n sys.exit(1)\n\n\ndef get_scenes_to_render(scene_classes):\n if not scene_classes:\n logger.error(constants.NO_SCENE_MESSAGE)\n return []\n if file_writer_config[\"write_all\"]:\n return scene_classes\n result = []\n for scene_name in file_writer_config[\"scene_names\"]:\n found = False\n for scene_class in scene_classes:\n if scene_class.__name__ == scene_name:\n result.append(scene_class)\n found = True\n break\n if not found and (scene_name != \"\"):\n logger.error(\n constants.SCENE_NOT_FOUND_MESSAGE.format(\n scene_name\n )\n )\n if result:\n return result\n return [scene_classes[0]] if len(scene_classes) == 1 else prompt_user_for_choice(scene_classes)\n\n\ndef get_scene_classes_from_module(module):\n if hasattr(module, \"SCENES_IN_ORDER\"):\n return module.SCENES_IN_ORDER\n else:\n return [\n member[1]\n for member in inspect.getmembers(\n module,\n lambda x: is_child_scene(x, module)\n )\n ]\n\n\ndef get_module(file_name):\n if file_name == \"-\":\n module = types.ModuleType(\"input_scenes\")\n code = sys.stdin.read()\n try:\n exec(code, module.__dict__)\n return module\n except Exception as e:\n logger.error(f\"Failed to render scene: {str(e)}\")\n sys.exit(2)\n else:\n if os.path.exists(file_name):\n if file_name[-3:] != \".py\":\n raise Exception(f\"{file_name} is not a valid Manim python script.\")\n module_name = file_name[:-3].replace(os.sep, '.').split('.')[-1]\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n else:\n raise FileNotFoundError(f'{file_name} not found')\n\n\ndef main():\n module = get_module(file_writer_config[\"input_file\"])\n all_scene_classes = get_scene_classes_from_module(module)\n scene_classes_to_render = get_scenes_to_render(all_scene_classes)\n sound_on = file_writer_config[\"sound\"]\n for SceneClass in scene_classes_to_render:\n try:\n # By invoking, this renders the full scene\n scene = SceneClass()\n open_file_if_needed(scene.file_writer)\n if sound_on:\n play_finish_sound()\n except Exception:\n print(\"\\n\\n\")\n traceback.print_exc()\n print(\"\\n\\n\")\n if sound_on:\n play_error_sound()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manim/__main__.py"}]} | 1,998 | 173 |
gh_patches_debug_28819 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1499 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add django-browser-reload
## Task
- [ ] follow the [installation instructions](https://github.com/adamchainz/django-browser-reload#installation) to enable `django-browser-reload` for this project
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/core/urls.py`
Content:
```
1 """civiwiki URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/3.2/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.urls import include, path
14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 """
16
17 from core.router import CiviWikiRouter
18 from django.conf import settings
19 from django.conf.urls.static import static
20 from django.contrib import admin
21 from django.urls import include, path, re_path
22 from django.views.generic.base import RedirectView
23 from django.views.static import serve
24
25 urlpatterns = [
26 path("admin/", admin.site.urls),
27 path("api/v1/", include(CiviWikiRouter.urls)),
28 path("api/", include("threads.urls.api")),
29 path("", include("accounts.urls")),
30 path("", include("threads.urls.urls")),
31 path(
32 "inbox/notifications/",
33 include("notifications.urls", namespace="notifications"),
34 ),
35 path("favicon.ico", RedirectView.as_view(url="/static/favicon/favicon.ico")),
36 path(
37 "favicon-32x32.png",
38 RedirectView.as_view(url="/static/favicon/favicon-32x32.png"),
39 ),
40 path(
41 "apple-touch-icon.png",
42 RedirectView.as_view(url="/static/favicon/apple-touch-icon.png"),
43 ),
44 path(
45 "mstile-144x144.png",
46 RedirectView.as_view(url="/static/favicon/mstile-144x144.png"),
47 ),
48 re_path(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT}),
49 path("__debug__/", include("debug_toolbar.urls")),
50 ]
51
52 urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
53 urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
54
```
Path: `project/core/settings.py`
Content:
```
1 """
2 Django settings for civiwiki project.
3 Darius Calliet May 12, 2016
4
5 Production settings file to select proper environment variables.
6 """
7 import os
8
9 # False if not in os.environ
10 DEBUG = os.getenv("DEBUG", False)
11
12 # defaults to second value if not found in os.environ
13 DJANGO_HOST = os.getenv("DJANGO_HOST", "LOCALHOST")
14
15 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
16 SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "TEST_KEY_FOR_DEVELOPMENT")
17 ALLOWED_HOSTS = [".herokuapp.com", ".civiwiki.org", "127.0.0.1", "localhost", "0.0.0.0"]
18
19 INSTALLED_APPS = (
20 "django.contrib.admin",
21 "django.contrib.auth",
22 "django.contrib.contenttypes",
23 "django.contrib.sessions",
24 "django.contrib.messages",
25 "django.contrib.staticfiles",
26 "django_extensions",
27 "django_linear_migrations",
28 "storages",
29 "core",
30 "rest_framework",
31 "accounts.apps.AccountsConfig",
32 "threads",
33 "notifications",
34 "corsheaders",
35 "taggit",
36 "categories",
37 "notification",
38 "debug_toolbar",
39 )
40
41 MIDDLEWARE = [
42 "debug_toolbar.middleware.DebugToolbarMiddleware",
43 "corsheaders.middleware.CorsMiddleware",
44 "django.middleware.security.SecurityMiddleware",
45 "whitenoise.middleware.WhiteNoiseMiddleware",
46 "django.contrib.sessions.middleware.SessionMiddleware",
47 "django.middleware.common.CommonMiddleware",
48 "django.middleware.csrf.CsrfViewMiddleware",
49 "django.contrib.auth.middleware.AuthenticationMiddleware",
50 # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
51 "django.contrib.messages.middleware.MessageMiddleware",
52 "django.middleware.clickjacking.XFrameOptionsMiddleware",
53 ]
54
55 INTERNAL_IPS = [
56 "127.0.0.1",
57 ]
58
59 CSRF_USE_SESSIONS = (
60 True # Store the CSRF token in the users session instead of in a cookie
61 )
62
63 CORS_ORIGIN_ALLOW_ALL = True
64 ROOT_URLCONF = "core.urls"
65
66 # SSL Setup
67 if DJANGO_HOST != "LOCALHOST":
68 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
69 SECURE_SSL_REDIRECT = True
70 SESSION_COOKIE_SECURE = True
71 CSRF_COOKIE_SECURE = True
72
73 # Internationalization & Localization
74 LANGUAGE_CODE = "en-us"
75 TIME_ZONE = "UTC"
76 USE_I18N = True
77 USE_TZ = True
78
79 TEMPLATES = [
80 {
81 "BACKEND": "django.template.backends.django.DjangoTemplates",
82 "DIRS": [
83 os.path.join(BASE_DIR, "threads/templates/threads"),
84 os.path.join(BASE_DIR, "accounts/templates/accounts"),
85 ], # TODO: Add non-webapp template directory
86 "APP_DIRS": True,
87 "OPTIONS": {
88 "context_processors": [
89 "django.template.context_processors.debug",
90 "django.template.context_processors.request",
91 "django.contrib.auth.context_processors.auth",
92 "django.contrib.messages.context_processors.messages",
93 ],
94 },
95 },
96 ]
97
98 WSGI_APPLICATION = "core.wsgi.application"
99
100 # Apex Contact for Production Errors
101 ADMINS = [("Development Team", "[email protected]")]
102
103 STATIC_URL = "/static/"
104 STATICFILES_DIRS = (os.path.join(BASE_DIR, "core/templates/static"),)
105 STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
106
107 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
108 MEDIA_URL = "/media/"
109
110 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
111
112 PROFILE_IMG = {
113 "SIZE": (171, 171),
114 "THUMB_SIZE": (40, 40),
115 "WHITE_BG": (255, 255, 255),
116 }
117
118 # Use DATABASE_URL in production
119 DATABASE_URL = os.getenv("DATABASE_URL")
120
121 if DATABASE_URL is not None:
122 DATABASES = {"default": DATABASE_URL}
123 else:
124 # Default to sqlite for simplicity in development
125 DATABASES = {
126 "default": {
127 "ENGINE": "django.db.backends.sqlite3",
128 "NAME": BASE_DIR + "/" + "db.sqlite3",
129 }
130 }
131
132 # Email Backend Setup
133 if "EMAIL_HOST" not in os.environ:
134 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
135 EMAIL_HOST_USER = "[email protected]"
136 else:
137 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
138 EMAIL_HOST = os.getenv("EMAIL_HOST")
139 EMAIL_PORT = os.getenv("EMAIL_PORT")
140 EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER")
141 EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD")
142 EMAIL_USE_SSL = True
143 DEFAULT_FROM_EMAIL = EMAIL_HOST
144
145 # Notification API Settings
146 NOTIFICATIONS_SOFT_DELETE = True
147 NOTIFICATIONS_USE_JSONFIELD = True
148
149 # Django REST API Settings
150 DEFAULT_RENDERER_CLASSES = ("rest_framework.renderers.JSONRenderer",)
151
152 if DEBUG:
153 # Browsable HTML - Enabled only in Debug mode (dev)
154 DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (
155 "rest_framework.renderers.BrowsableAPIRenderer",
156 )
157
158 REST_FRAMEWORK = {
159 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
160 "DEFAULT_RENDERER_CLASSES": DEFAULT_RENDERER_CLASSES,
161 "DEFAULT_AUTHENTICATION_CLASSES": (
162 "rest_framework.authentication.BasicAuthentication",
163 "rest_framework.authentication.SessionAuthentication",
164 ),
165 }
166
167 # CORS Settings
168 CORS_ORIGIN_ALLOW_ALL = True
169
170 # Custom User model
171 AUTH_USER_MODEL = "accounts.User"
172
173 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
174
175 # Login Logout URLS
176 LOGIN_URL = "login/"
177 LOGIN_REDIRECT_URL = "/"
178 LOGOUT_REDIRECT_URL = "/"
179
180 AUTH_PASSWORD_VALIDATORS = [
181 {
182 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa: E501
183 },
184 {
185 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
186 "OPTIONS": {
187 "min_length": 4,
188 },
189 },
190 {
191 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
192 },
193 {
194 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
195 },
196 ]
197
198 LOGGING = {
199 "version": 1,
200 "disable_existing_loggers": False,
201 "handlers": {
202 "console": {
203 "class": "logging.StreamHandler",
204 },
205 },
206 "root": {
207 "handlers": ["console"],
208 "level": "WARNING",
209 },
210 }
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/project/core/settings.py b/project/core/settings.py
--- a/project/core/settings.py
+++ b/project/core/settings.py
@@ -36,6 +36,7 @@
"categories",
"notification",
"debug_toolbar",
+ "django_browser_reload",
)
MIDDLEWARE = [
@@ -50,6 +51,7 @@
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
+ "django_browser_reload.middleware.BrowserReloadMiddleware",
]
INTERNAL_IPS = [
diff --git a/project/core/urls.py b/project/core/urls.py
--- a/project/core/urls.py
+++ b/project/core/urls.py
@@ -13,7 +13,6 @@
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
-
from core.router import CiviWikiRouter
from django.conf import settings
from django.conf.urls.static import static
@@ -47,6 +46,7 @@
),
re_path(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT}),
path("__debug__/", include("debug_toolbar.urls")),
+ path("__reload__/", include("django_browser_reload.urls")),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| {"golden_diff": "diff --git a/project/core/settings.py b/project/core/settings.py\n--- a/project/core/settings.py\n+++ b/project/core/settings.py\n@@ -36,6 +36,7 @@\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n+ \"django_browser_reload\",\n )\n \n MIDDLEWARE = [\n@@ -50,6 +51,7 @@\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n+ \"django_browser_reload.middleware.BrowserReloadMiddleware\",\n ]\n \n INTERNAL_IPS = [\ndiff --git a/project/core/urls.py b/project/core/urls.py\n--- a/project/core/urls.py\n+++ b/project/core/urls.py\n@@ -13,7 +13,6 @@\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n \"\"\"\n-\n from core.router import CiviWikiRouter\n from django.conf import settings\n from django.conf.urls.static import static\n@@ -47,6 +46,7 @@\n ),\n re_path(r\"^media/(?P<path>.*)$\", serve, {\"document_root\": settings.MEDIA_ROOT}),\n path(\"__debug__/\", include(\"debug_toolbar.urls\")),\n+ path(\"__reload__/\", include(\"django_browser_reload.urls\")),\n ]\n \n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n", "issue": "add django-browser-reload\n## Task\r\n\r\n- [ ] follow the [installation instructions](https://github.com/adamchainz/django-browser-reload#installation) to enable `django-browser-reload` for this project\n", "before_files": [{"content": "\"\"\"civiwiki URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom core.router import CiviWikiRouter\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom django.views.generic.base import RedirectView\nfrom django.views.static import serve\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/v1/\", include(CiviWikiRouter.urls)),\n path(\"api/\", include(\"threads.urls.api\")),\n path(\"\", include(\"accounts.urls\")),\n path(\"\", include(\"threads.urls.urls\")),\n path(\n \"inbox/notifications/\",\n include(\"notifications.urls\", namespace=\"notifications\"),\n ),\n path(\"favicon.ico\", RedirectView.as_view(url=\"/static/favicon/favicon.ico\")),\n path(\n \"favicon-32x32.png\",\n RedirectView.as_view(url=\"/static/favicon/favicon-32x32.png\"),\n ),\n path(\n \"apple-touch-icon.png\",\n RedirectView.as_view(url=\"/static/favicon/apple-touch-icon.png\"),\n ),\n path(\n \"mstile-144x144.png\",\n RedirectView.as_view(url=\"/static/favicon/mstile-144x144.png\"),\n ),\n re_path(r\"^media/(?P<path>.*)$\", serve, {\"document_root\": settings.MEDIA_ROOT}),\n path(\"__debug__/\", include(\"debug_toolbar.urls\")),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "project/core/urls.py"}, {"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"django_linear_migrations\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n)\n\nMIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\nPROFILE_IMG = {\n \"SIZE\": (171, 171),\n \"THUMB_SIZE\": (40, 40),\n \"WHITE_BG\": (255, 255, 255),\n}\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"WARNING\",\n },\n}\n", "path": "project/core/settings.py"}], "after_files": [{"content": "\"\"\"civiwiki URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom core.router import CiviWikiRouter\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom django.views.generic.base import RedirectView\nfrom django.views.static import serve\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/v1/\", include(CiviWikiRouter.urls)),\n path(\"api/\", include(\"threads.urls.api\")),\n path(\"\", include(\"accounts.urls\")),\n path(\"\", include(\"threads.urls.urls\")),\n path(\n \"inbox/notifications/\",\n include(\"notifications.urls\", namespace=\"notifications\"),\n ),\n path(\"favicon.ico\", RedirectView.as_view(url=\"/static/favicon/favicon.ico\")),\n path(\n \"favicon-32x32.png\",\n RedirectView.as_view(url=\"/static/favicon/favicon-32x32.png\"),\n ),\n path(\n \"apple-touch-icon.png\",\n RedirectView.as_view(url=\"/static/favicon/apple-touch-icon.png\"),\n ),\n path(\n \"mstile-144x144.png\",\n RedirectView.as_view(url=\"/static/favicon/mstile-144x144.png\"),\n ),\n re_path(r\"^media/(?P<path>.*)$\", serve, {\"document_root\": settings.MEDIA_ROOT}),\n path(\"__debug__/\", include(\"debug_toolbar.urls\")),\n path(\"__reload__/\", include(\"django_browser_reload.urls\")),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "project/core/urls.py"}, {"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"django_linear_migrations\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n \"django_browser_reload\",\n)\n\nMIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django_browser_reload.middleware.BrowserReloadMiddleware\",\n]\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\nPROFILE_IMG = {\n \"SIZE\": (171, 171),\n \"THUMB_SIZE\": (40, 40),\n \"WHITE_BG\": (255, 255, 255),\n}\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": \"WARNING\",\n },\n}\n", "path": "project/core/settings.py"}]} | 2,817 | 316 |
gh_patches_debug_30888 | rasdani/github-patches | git_diff | mozilla__telemetry-analysis-service-551 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EMR release form shows inactive records
The EMR release model has a column for `is_active`, but it's not being considered when querying the list of EMR releases in the form.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `atmo/clusters/queries.py`
Content:
```
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, you can obtain one at http://mozilla.org/MPL/2.0/.
4 from django.db import models
5
6
7 class EMRReleaseQuerySet(models.QuerySet):
8
9 def stable(self):
10 return self.filter(
11 is_experimental=False,
12 is_deprecated=False,
13 is_active=True,
14 )
15
16 def experimental(self):
17 return self.filter(
18 is_experimental=True,
19 is_active=True,
20 )
21
22 def deprecated(self):
23 return self.filter(
24 is_deprecated=True,
25 is_active=True,
26 )
27
28
29 class ClusterQuerySet(models.QuerySet):
30
31 def active(self):
32 return self.filter(
33 most_recent_status__in=self.model.ACTIVE_STATUS_LIST,
34 )
35
36 def terminated(self):
37 return self.filter(
38 most_recent_status__in=self.model.TERMINATED_STATUS_LIST,
39 )
40
41 def failed(self):
42 return self.filter(
43 most_recent_status__in=self.model.FAILED_STATUS_LIST,
44 )
45
```
Path: `atmo/clusters/forms.py`
Content:
```
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, you can obtain one at http://mozilla.org/MPL/2.0/.
4 from django import forms
5 from django.conf import settings
6 from django.core.urlresolvers import reverse
7
8 from . import models
9 from ..forms.mixins import AutoClassFormMixin, CreatedByModelFormMixin
10 from ..keys.models import SSHKey
11
12
13 class EMRReleaseChoiceField(forms.ModelChoiceField):
14 def __init__(self, *args, **kwargs):
15 super().__init__(
16 label='EMR release',
17 queryset=models.EMRRelease.objects.all(),
18 required=True,
19 empty_label=None,
20 widget=forms.RadioSelect(attrs={
21 'required': 'required',
22 'class': 'radioset',
23 }),
24 help_text=models.Cluster.EMR_RELEASE_HELP,
25 )
26
27 def label_from_instance(self, obj):
28 label = obj.version
29 extra = []
30 if obj.is_experimental:
31 extra.append('experimental')
32 elif obj.is_deprecated:
33 extra.append('deprecated')
34 if extra:
35 label = '%s (%s)' % (label, ', '.join(extra))
36 return label
37
38
39 class NewClusterForm(AutoClassFormMixin, CreatedByModelFormMixin,
40 forms.ModelForm):
41 prefix = 'new'
42
43 identifier = forms.RegexField(
44 required=True,
45 label='Identifier',
46 regex=r'^[a-z0-9-]{1,100}$',
47 widget=forms.TextInput(attrs={
48 'pattern': r'[a-z0-9-]{1,100}',
49 'data-parsley-pattern-message': 'Identifier contains invalid characters.',
50 }),
51 help_text='A unique identifier for your cluster, visible in '
52 'the AWS management console. (Lowercase, use hyphens '
53 'instead of spaces.)'
54 )
55 size = forms.IntegerField(
56 label='Size',
57 required=True,
58 min_value=1,
59 max_value=settings.AWS_CONFIG['MAX_CLUSTER_SIZE'],
60 widget=forms.NumberInput(attrs={
61 'min': '1',
62 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_SIZE']),
63 }),
64 help_text=('Number of workers to use in the cluster, between 1 and %s. '
65 'For testing or development 1 is recommended.' %
66 settings.AWS_CONFIG['MAX_CLUSTER_SIZE'])
67 )
68 lifetime = forms.IntegerField(
69 label='Lifetime',
70 required=True,
71 min_value=2,
72 max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],
73 widget=forms.NumberInput(attrs={
74 'min': '2',
75 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),
76 }),
77 help_text=('Lifetime in hours after which the cluster is automatically '
78 'terminated, between 2 and %s.' %
79 settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])
80 )
81 ssh_key = forms.ModelChoiceField(
82 label='SSH key',
83 queryset=SSHKey.objects.all(),
84 required=True,
85 empty_label=None,
86 )
87 emr_release = EMRReleaseChoiceField()
88
89 class Meta:
90 model = models.Cluster
91 fields = ['identifier', 'size', 'lifetime', 'ssh_key', 'emr_release']
92
93 def __init__(self, *args, **kwargs):
94 super().__init__(*args, **kwargs)
95 user_sshkeys = self.created_by.created_sshkeys.all()
96 self.fields['ssh_key'].queryset = user_sshkeys.all()
97 self.fields['ssh_key'].help_text = (
98 'The SSH key to deploy to the cluster. '
99 'See <a href="%s">your keys</a> or '
100 '<a href="%s">add a new one</a>.' %
101 (reverse('keys-list'), reverse('keys-new'))
102 )
103 # if there are fewer options we just show radio select buttons
104 if user_sshkeys.count() <= 6:
105 self.fields['ssh_key'].widget = forms.RadioSelect(
106 choices=self.fields['ssh_key'].choices,
107 attrs={
108 'class': 'radioset',
109 },
110 )
111
112
113 class ExtendClusterForm(AutoClassFormMixin, forms.Form):
114 prefix = 'extend'
115 extension = forms.IntegerField(
116 label='Lifetime extension in hours',
117 required=True,
118 min_value=2,
119 max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],
120 widget=forms.NumberInput(attrs={
121 'min': '2',
122 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),
123 }),
124 help_text=("Number of hours to extend the cluster's lifetime with, between 2 and %s." %
125 settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])
126 )
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/atmo/clusters/forms.py b/atmo/clusters/forms.py
--- a/atmo/clusters/forms.py
+++ b/atmo/clusters/forms.py
@@ -4,6 +4,7 @@
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
+from django.utils.safestring import mark_safe
from . import models
from ..forms.mixins import AutoClassFormMixin, CreatedByModelFormMixin
@@ -14,7 +15,7 @@
def __init__(self, *args, **kwargs):
super().__init__(
label='EMR release',
- queryset=models.EMRRelease.objects.all(),
+ queryset=models.EMRRelease.objects.active(),
required=True,
empty_label=None,
widget=forms.RadioSelect(attrs={
@@ -28,11 +29,11 @@
label = obj.version
extra = []
if obj.is_experimental:
- extra.append('experimental')
+ extra.append('<span class="label label-info">experimental</span>')
elif obj.is_deprecated:
- extra.append('deprecated')
+ extra.append('<span class="label label-warning">deprecated</span>')
if extra:
- label = '%s (%s)' % (label, ', '.join(extra))
+ label = mark_safe('%s %s' % (label, ''.join(extra)))
return label
diff --git a/atmo/clusters/queries.py b/atmo/clusters/queries.py
--- a/atmo/clusters/queries.py
+++ b/atmo/clusters/queries.py
@@ -6,6 +6,11 @@
class EMRReleaseQuerySet(models.QuerySet):
+ def active(self):
+ return self.filter(
+ is_active=True,
+ )
+
def stable(self):
return self.filter(
is_experimental=False,
| {"golden_diff": "diff --git a/atmo/clusters/forms.py b/atmo/clusters/forms.py\n--- a/atmo/clusters/forms.py\n+++ b/atmo/clusters/forms.py\n@@ -4,6 +4,7 @@\n from django import forms\n from django.conf import settings\n from django.core.urlresolvers import reverse\n+from django.utils.safestring import mark_safe\n \n from . import models\n from ..forms.mixins import AutoClassFormMixin, CreatedByModelFormMixin\n@@ -14,7 +15,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(\n label='EMR release',\n- queryset=models.EMRRelease.objects.all(),\n+ queryset=models.EMRRelease.objects.active(),\n required=True,\n empty_label=None,\n widget=forms.RadioSelect(attrs={\n@@ -28,11 +29,11 @@\n label = obj.version\n extra = []\n if obj.is_experimental:\n- extra.append('experimental')\n+ extra.append('<span class=\"label label-info\">experimental</span>')\n elif obj.is_deprecated:\n- extra.append('deprecated')\n+ extra.append('<span class=\"label label-warning\">deprecated</span>')\n if extra:\n- label = '%s (%s)' % (label, ', '.join(extra))\n+ label = mark_safe('%s %s' % (label, ''.join(extra)))\n return label\n \n \ndiff --git a/atmo/clusters/queries.py b/atmo/clusters/queries.py\n--- a/atmo/clusters/queries.py\n+++ b/atmo/clusters/queries.py\n@@ -6,6 +6,11 @@\n \n class EMRReleaseQuerySet(models.QuerySet):\n \n+ def active(self):\n+ return self.filter(\n+ is_active=True,\n+ )\n+\n def stable(self):\n return self.filter(\n is_experimental=False,\n", "issue": "EMR release form shows inactive records\nThe EMR release model has a column for `is_active`, but it's not being considered when querying the list of EMR releases in the form. \n", "before_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django.db import models\n\n\nclass EMRReleaseQuerySet(models.QuerySet):\n\n def stable(self):\n return self.filter(\n is_experimental=False,\n is_deprecated=False,\n is_active=True,\n )\n\n def experimental(self):\n return self.filter(\n is_experimental=True,\n is_active=True,\n )\n\n def deprecated(self):\n return self.filter(\n is_deprecated=True,\n is_active=True,\n )\n\n\nclass ClusterQuerySet(models.QuerySet):\n\n def active(self):\n return self.filter(\n most_recent_status__in=self.model.ACTIVE_STATUS_LIST,\n )\n\n def terminated(self):\n return self.filter(\n most_recent_status__in=self.model.TERMINATED_STATUS_LIST,\n )\n\n def failed(self):\n return self.filter(\n most_recent_status__in=self.model.FAILED_STATUS_LIST,\n )\n", "path": "atmo/clusters/queries.py"}, {"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\n\nfrom . import models\nfrom ..forms.mixins import AutoClassFormMixin, CreatedByModelFormMixin\nfrom ..keys.models import SSHKey\n\n\nclass EMRReleaseChoiceField(forms.ModelChoiceField):\n def __init__(self, *args, **kwargs):\n super().__init__(\n label='EMR release',\n queryset=models.EMRRelease.objects.all(),\n required=True,\n empty_label=None,\n widget=forms.RadioSelect(attrs={\n 'required': 'required',\n 'class': 'radioset',\n }),\n help_text=models.Cluster.EMR_RELEASE_HELP,\n )\n\n def label_from_instance(self, obj):\n label = obj.version\n extra = []\n if obj.is_experimental:\n extra.append('experimental')\n elif obj.is_deprecated:\n extra.append('deprecated')\n if extra:\n label = '%s (%s)' % (label, ', '.join(extra))\n return label\n\n\nclass NewClusterForm(AutoClassFormMixin, CreatedByModelFormMixin,\n forms.ModelForm):\n prefix = 'new'\n\n identifier = forms.RegexField(\n required=True,\n label='Identifier',\n regex=r'^[a-z0-9-]{1,100}$',\n widget=forms.TextInput(attrs={\n 'pattern': r'[a-z0-9-]{1,100}',\n 'data-parsley-pattern-message': 'Identifier contains invalid characters.',\n }),\n help_text='A unique identifier for your cluster, visible in '\n 'the AWS management console. (Lowercase, use hyphens '\n 'instead of spaces.)'\n )\n size = forms.IntegerField(\n label='Size',\n required=True,\n min_value=1,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_SIZE'],\n widget=forms.NumberInput(attrs={\n 'min': '1',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_SIZE']),\n }),\n help_text=('Number of workers to use in the cluster, between 1 and %s. '\n 'For testing or development 1 is recommended.' %\n settings.AWS_CONFIG['MAX_CLUSTER_SIZE'])\n )\n lifetime = forms.IntegerField(\n label='Lifetime',\n required=True,\n min_value=2,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],\n widget=forms.NumberInput(attrs={\n 'min': '2',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),\n }),\n help_text=('Lifetime in hours after which the cluster is automatically '\n 'terminated, between 2 and %s.' %\n settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])\n )\n ssh_key = forms.ModelChoiceField(\n label='SSH key',\n queryset=SSHKey.objects.all(),\n required=True,\n empty_label=None,\n )\n emr_release = EMRReleaseChoiceField()\n\n class Meta:\n model = models.Cluster\n fields = ['identifier', 'size', 'lifetime', 'ssh_key', 'emr_release']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n user_sshkeys = self.created_by.created_sshkeys.all()\n self.fields['ssh_key'].queryset = user_sshkeys.all()\n self.fields['ssh_key'].help_text = (\n 'The SSH key to deploy to the cluster. '\n 'See <a href=\"%s\">your keys</a> or '\n '<a href=\"%s\">add a new one</a>.' %\n (reverse('keys-list'), reverse('keys-new'))\n )\n # if there are fewer options we just show radio select buttons\n if user_sshkeys.count() <= 6:\n self.fields['ssh_key'].widget = forms.RadioSelect(\n choices=self.fields['ssh_key'].choices,\n attrs={\n 'class': 'radioset',\n },\n )\n\n\nclass ExtendClusterForm(AutoClassFormMixin, forms.Form):\n prefix = 'extend'\n extension = forms.IntegerField(\n label='Lifetime extension in hours',\n required=True,\n min_value=2,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],\n widget=forms.NumberInput(attrs={\n 'min': '2',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),\n }),\n help_text=(\"Number of hours to extend the cluster's lifetime with, between 2 and %s.\" %\n settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])\n )\n", "path": "atmo/clusters/forms.py"}], "after_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django.db import models\n\n\nclass EMRReleaseQuerySet(models.QuerySet):\n\n def active(self):\n return self.filter(\n is_active=True,\n )\n\n def stable(self):\n return self.filter(\n is_experimental=False,\n is_deprecated=False,\n is_active=True,\n )\n\n def experimental(self):\n return self.filter(\n is_experimental=True,\n is_active=True,\n )\n\n def deprecated(self):\n return self.filter(\n is_deprecated=True,\n is_active=True,\n )\n\n\nclass ClusterQuerySet(models.QuerySet):\n\n def active(self):\n return self.filter(\n most_recent_status__in=self.model.ACTIVE_STATUS_LIST,\n )\n\n def terminated(self):\n return self.filter(\n most_recent_status__in=self.model.TERMINATED_STATUS_LIST,\n )\n\n def failed(self):\n return self.filter(\n most_recent_status__in=self.model.FAILED_STATUS_LIST,\n )\n", "path": "atmo/clusters/queries.py"}, {"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.utils.safestring import mark_safe\n\nfrom . import models\nfrom ..forms.mixins import AutoClassFormMixin, CreatedByModelFormMixin\nfrom ..keys.models import SSHKey\n\n\nclass EMRReleaseChoiceField(forms.ModelChoiceField):\n def __init__(self, *args, **kwargs):\n super().__init__(\n label='EMR release',\n queryset=models.EMRRelease.objects.active(),\n required=True,\n empty_label=None,\n widget=forms.RadioSelect(attrs={\n 'required': 'required',\n 'class': 'radioset',\n }),\n help_text=models.Cluster.EMR_RELEASE_HELP,\n )\n\n def label_from_instance(self, obj):\n label = obj.version\n extra = []\n if obj.is_experimental:\n extra.append('<span class=\"label label-info\">experimental</span>')\n elif obj.is_deprecated:\n extra.append('<span class=\"label label-warning\">deprecated</span>')\n if extra:\n label = mark_safe('%s %s' % (label, ''.join(extra)))\n return label\n\n\nclass NewClusterForm(AutoClassFormMixin, CreatedByModelFormMixin,\n forms.ModelForm):\n prefix = 'new'\n\n identifier = forms.RegexField(\n required=True,\n label='Identifier',\n regex=r'^[a-z0-9-]{1,100}$',\n widget=forms.TextInput(attrs={\n 'pattern': r'[a-z0-9-]{1,100}',\n 'data-parsley-pattern-message': 'Identifier contains invalid characters.',\n }),\n help_text='A unique identifier for your cluster, visible in '\n 'the AWS management console. (Lowercase, use hyphens '\n 'instead of spaces.)'\n )\n size = forms.IntegerField(\n label='Size',\n required=True,\n min_value=1,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_SIZE'],\n widget=forms.NumberInput(attrs={\n 'min': '1',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_SIZE']),\n }),\n help_text=('Number of workers to use in the cluster, between 1 and %s. '\n 'For testing or development 1 is recommended.' %\n settings.AWS_CONFIG['MAX_CLUSTER_SIZE'])\n )\n lifetime = forms.IntegerField(\n label='Lifetime',\n required=True,\n min_value=2,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],\n widget=forms.NumberInput(attrs={\n 'min': '2',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),\n }),\n help_text=('Lifetime in hours after which the cluster is automatically '\n 'terminated, between 2 and %s.' %\n settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])\n )\n ssh_key = forms.ModelChoiceField(\n label='SSH key',\n queryset=SSHKey.objects.all(),\n required=True,\n empty_label=None,\n )\n emr_release = EMRReleaseChoiceField()\n\n class Meta:\n model = models.Cluster\n fields = ['identifier', 'size', 'lifetime', 'ssh_key', 'emr_release']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n user_sshkeys = self.created_by.created_sshkeys.all()\n self.fields['ssh_key'].queryset = user_sshkeys.all()\n self.fields['ssh_key'].help_text = (\n 'The SSH key to deploy to the cluster. '\n 'See <a href=\"%s\">your keys</a> or '\n '<a href=\"%s\">add a new one</a>.' %\n (reverse('keys-list'), reverse('keys-new'))\n )\n # if there are fewer options we just show radio select buttons\n if user_sshkeys.count() <= 6:\n self.fields['ssh_key'].widget = forms.RadioSelect(\n choices=self.fields['ssh_key'].choices,\n attrs={\n 'class': 'radioset',\n },\n )\n\n\nclass ExtendClusterForm(AutoClassFormMixin, forms.Form):\n prefix = 'extend'\n extension = forms.IntegerField(\n label='Lifetime extension in hours',\n required=True,\n min_value=2,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],\n widget=forms.NumberInput(attrs={\n 'min': '2',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),\n }),\n help_text=(\"Number of hours to extend the cluster's lifetime with, between 2 and %s.\" %\n settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])\n )\n", "path": "atmo/clusters/forms.py"}]} | 1,930 | 406 |
gh_patches_debug_17498 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3256 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IndexError: list index out of range
https://sentry.liqd.net/sentry/meinberlin-prod/issues/828/
```
IndexError: list index out of range
(13 additional frame(s) were not displayed)
...
File "rest_framework/serializers.py", line 526, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "rest_framework/fields.py", line 1873, in to_representation
return method(value)
File "meinberlin/apps/projects/serializers.py", line 231, in get_active_phase
time_left = instance.time_left
File "django/utils/functional.py", line 80, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "adhocracy4/projects/models.py", line 433, in time_left
best_unit = time_delta_list[0]
Internal Server Error: /projekte/
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/projects/serializers.py`
Content:
```
1 from functools import lru_cache
2
3 from django.utils import timezone
4 from django.utils.translation import ugettext as _
5 from easy_thumbnails.files import get_thumbnailer
6 from rest_framework import serializers
7
8 from adhocracy4.phases.models import Phase
9 from adhocracy4.projects.models import Project
10 from meinberlin.apps.projects import get_project_type
11
12
13 class CommonFields:
14
15 def get_district(self, instance):
16 city_wide = _('City wide')
17 district_name = str(city_wide)
18 if instance.administrative_district:
19 district_name = instance.administrative_district.name
20 return district_name
21
22 def get_point(self, instance):
23 point = instance.point
24 if not point:
25 point = ''
26 return point
27
28 def get_organisation(self, instance):
29 return instance.organisation.name
30
31 def get_created_or_modified(self, instance):
32 if instance.modified:
33 return str(instance.modified)
34 return str(instance.created)
35
36
37 class ProjectSerializer(serializers.ModelSerializer, CommonFields):
38 type = serializers.SerializerMethodField()
39 subtype = serializers.SerializerMethodField()
40 title = serializers.SerializerMethodField()
41 url = serializers.SerializerMethodField()
42 point = serializers.SerializerMethodField()
43 point_label = serializers.SerializerMethodField()
44 cost = serializers.SerializerMethodField()
45 district = serializers.SerializerMethodField()
46 status = serializers.SerializerMethodField()
47 organisation = serializers.SerializerMethodField()
48 participation = serializers.SerializerMethodField()
49 participation_display = serializers.SerializerMethodField()
50 participation_active = serializers.SerializerMethodField()
51 participation_string = serializers.SerializerMethodField()
52 future_phase = serializers.SerializerMethodField()
53 active_phase = serializers.SerializerMethodField()
54 past_phase = serializers.SerializerMethodField()
55 tile_image = serializers.SerializerMethodField()
56 tile_image_copyright = serializers.SerializerMethodField()
57 plan_url = serializers.SerializerMethodField()
58 plan_title = serializers.SerializerMethodField()
59 published_projects_count = serializers.SerializerMethodField()
60 created_or_modified = serializers.SerializerMethodField()
61
62 def __init__(self, *args, **kwargs):
63 self.now = kwargs.pop('now')
64 super().__init__(args, kwargs)
65
66 class Meta:
67 model = Project
68 fields = ['type', 'subtype', 'title', 'url',
69 'organisation', 'tile_image',
70 'tile_image_copyright',
71 'point', 'point_label', 'cost',
72 'district', 'topics', 'is_public',
73 'status',
74 'participation_string',
75 'participation_active',
76 'participation', 'participation_display', 'description',
77 'future_phase', 'active_phase',
78 'past_phase', 'plan_url', 'plan_title',
79 'published_projects_count', 'created_or_modified']
80
81 @lru_cache(maxsize=1)
82 def _get_participation_status_project(self, instance):
83 if hasattr(instance, 'projectcontainer') and instance.projectcontainer:
84 if instance.projectcontainer.active_project_count > 0:
85 return _('running'), True
86 elif instance.projectcontainer.future_project_count > 0:
87 return _('starts in the future'), True
88 else:
89 return _('done'), False
90 else:
91 project_phases = instance.phases
92
93 if project_phases.active_phases():
94 return _('running'), True
95
96 if project_phases.future_phases():
97 try:
98 return (_('starts at {}').format
99 (project_phases.future_phases().first().
100 start_date.strftime('%d.%m.%Y')),
101 True)
102 except AttributeError:
103 return (_('starts in the future'),
104 True)
105 else:
106 return _('done'), False
107
108 def get_type(self, instance):
109 return 'project'
110
111 def get_subtype(self, instance):
112 subtype = get_project_type(instance)
113 if subtype in ('external', 'bplan'):
114 return 'external'
115 return subtype
116
117 def get_title(self, instance):
118 return instance.name
119
120 def get_url(self, instance):
121 if get_project_type(instance) in ('external', 'bplan'):
122 return instance.externalproject.url
123 return instance.get_absolute_url()
124
125 def get_tile_image(self, instance):
126 image_url = ''
127 if instance.tile_image:
128 image = get_thumbnailer(instance.tile_image)['project_tile']
129 image_url = image.url
130 elif instance.image:
131 image = get_thumbnailer(instance.image)['project_tile']
132 image_url = image.url
133 return image_url
134
135 def get_tile_image_copyright(self, instance):
136 if instance.tile_image:
137 return instance.tile_image_copyright
138 elif instance.image:
139 return instance.image_copyright
140 else:
141 return None
142
143 def get_status(self, instance):
144 project_phases = instance.phases
145 if project_phases.active_phases() or project_phases.future_phases():
146 return 0
147 return 1
148
149 def get_participation(self, instance):
150 return 0
151
152 def get_participation_display(self, instance):
153 return _('Yes')
154
155 def get_future_phase(self, instance):
156 if (instance.future_modules and
157 instance.future_modules.first().module_start):
158 return str(
159 instance.future_modules.first().module_start)
160 return False
161
162 def get_active_phase(self, instance):
163 if instance.active_phase_ends_next:
164 progress = instance.module_running_progress
165 time_left = instance.module_running_time_left
166 end_date = str(instance.running_module_ends_next.module_end)
167 return [progress, time_left, end_date]
168 return False
169
170 def get_past_phase(self, instance):
171 if (instance.past_modules and
172 instance.past_modules.first().module_end):
173 return str(
174 instance.past_modules.first().module_end)
175 return False
176
177 def get_participation_string(self, instance):
178 participation_string, participation_active = \
179 self._get_participation_status_project(instance)
180 return str(participation_string)
181
182 def get_participation_active(self, instance):
183 participation_string, participation_active = \
184 self._get_participation_status_project(instance)
185 return participation_active
186
187 def get_plan_url(self, instance):
188 if instance.plans.exists():
189 return instance.plans.first().get_absolute_url()
190 return None
191
192 def get_plan_title(self, instance):
193 if instance.plans.exists():
194 return instance.plans.first().title
195 return None
196
197 def get_published_projects_count(self, instance):
198 return 0
199
200 def get_point_label(self, instance):
201 return ''
202
203 def get_cost(self, instance):
204 return ''
205
206
207 class ActiveProjectSerializer(ProjectSerializer):
208
209 def seconds_in_units(self, seconds):
210 unit_totals = []
211
212 unit_limits = [
213 ([_('day'), _('days')], 24 * 3600),
214 ([_('hour'), _('hours')], 3600),
215 ([_('minute'), _('minutes')], 60)
216 ]
217
218 for unit_name, limit in unit_limits:
219 if seconds >= limit:
220 amount = int(float(seconds) / limit)
221 if amount > 1:
222 unit_totals.append((unit_name[1], amount))
223 else:
224 unit_totals.append((unit_name[0], amount))
225 seconds = seconds - (amount * limit)
226 return unit_totals
227
228 def get_active_phase(self, instance):
229 progress = instance.module_running_progress
230 time_left = instance.module_running_time_left
231 end_date = str(instance.running_module_ends_next.module_end)
232 return [progress, time_left, end_date]
233
234 def get_status(self, instance):
235 return 0
236
237 def get_future_phase(self, instance):
238 return False
239
240 def get_past_phase(self, instance):
241 return False
242
243 def get_participation_string(self, instance):
244 return _('running')
245
246 def get_participation_active(self, instance):
247 return True
248
249
250 class FutureProjectSerializer(ProjectSerializer):
251
252 def __init__(self, *args, **kwargs):
253 super().__init__(*args, **kwargs)
254 self._future_phases = Phase.objects\
255 .filter(start_date__gt=self.now,
256 module__is_draft=False)\
257 .order_by('start_date')
258
259 def get_active_phase(self, instance):
260 return False
261
262 def get_status(self, instance):
263 return 0
264
265 def get_future_phase(self, instance):
266 future_phase = self._future_phases\
267 .filter(module__project=instance)\
268 .first()
269 return str(future_phase.start_date)
270
271 def get_past_phase(self, instance):
272 return False
273
274 def get_participation_string(self, instance):
275 return _('starts in the future')
276
277 def get_participation_active(self, instance):
278 return True
279
280
281 class PastProjectSerializer(ProjectSerializer):
282
283 def __init__(self, *args, **kwargs):
284 super().__init__(*args, **kwargs)
285 self._past_phases = Phase.objects\
286 .filter(end_date__lt=timezone.now(),
287 module__is_draft=False)\
288 .order_by('start_date')
289
290 def get_active_phase(self, instance):
291 return False
292
293 def get_status(self, instance):
294 return 1
295
296 def get_future_phase(self, instance):
297 return False
298
299 def get_past_phase(self, instance):
300 past_phase = self._past_phases.filter(module__project=instance).first()
301 return str(past_phase.end_date)
302
303 def get_participation_string(self, instance):
304 return _('done')
305
306 def get_participation_active(self, instance):
307 return False
308
```
Path: `meinberlin/apps/plans/forms.py`
Content:
```
1 from django import forms
2 from django.conf import settings
3 from django.utils.translation import ugettext_lazy as _
4
5 from adhocracy4.dashboard.components.forms import ProjectDashboardForm
6 from adhocracy4.maps import widgets as maps_widgets
7 from adhocracy4.projects import models as project_models
8 from meinberlin.apps.contrib.widgets import Select2Widget
9
10 from . import models
11
12
13 class PlanForm(forms.ModelForm):
14
15 class Meta:
16 model = models.Plan
17 fields = [
18 'title',
19 'description_image',
20 'description_image_copyright',
21 'contact',
22 'point',
23 'point_label',
24 'district',
25 'cost',
26 'description',
27 'topics',
28 'status',
29 'participation']
30 widgets = {
31 'point': maps_widgets.MapChoosePointWidget(
32 polygon=settings.BERLIN_POLYGON)
33 }
34 error_messages = {
35 'point': {
36 'required': _('Please locate the plan on the map.')
37 }
38 }
39
40 def __init__(self, *args, **kwargs):
41 super().__init__(*args, **kwargs)
42 self.fields['district'].empty_label = _('City wide')
43
44 def save(self, commit=True):
45 plan = super().save(commit=False)
46 group = plan._get_group(plan.creator, plan.organisation)
47 plan.group = group
48 if commit:
49 plan.save()
50 return plan
51
52
53 class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):
54
55 widget = forms.Select
56
57 def clean(self, value):
58 if not value:
59 return super().clean([])
60 return super().clean([value])
61
62
63 class ProjectPlansDashboardForm(ProjectDashboardForm):
64 plans = CustomMultipleChoiceField(queryset=None,
65 label=_('Plans'))
66
67 class Meta:
68 model = project_models.Project
69 fields = ['plans']
70 required = False
71 widgets = {
72 'plans': Select2Widget,
73 }
74
75 def save(self, commit=False):
76 plans = self.cleaned_data['plans']
77 self.instance.plans.set(plans)
78
79 def __init__(self, *args, **kwargs):
80 super().__init__(*args, **kwargs)
81 self.initial['plans'] = self.instance.plans.all()
82 self.fields['plans'].required = False
83 self.fields['plans'].empty_label = '----------'
84 self.fields['plans'].queryset = \
85 self.instance.organisation.plan_set.all()
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py
--- a/meinberlin/apps/plans/forms.py
+++ b/meinberlin/apps/plans/forms.py
@@ -43,8 +43,9 @@
def save(self, commit=True):
plan = super().save(commit=False)
- group = plan._get_group(plan.creator, plan.organisation)
- plan.group = group
+ if not plan.group:
+ group = plan._get_group(plan.creator, plan.organisation)
+ plan.group = group
if commit:
plan.save()
return plan
diff --git a/meinberlin/apps/projects/serializers.py b/meinberlin/apps/projects/serializers.py
--- a/meinberlin/apps/projects/serializers.py
+++ b/meinberlin/apps/projects/serializers.py
@@ -212,7 +212,8 @@
unit_limits = [
([_('day'), _('days')], 24 * 3600),
([_('hour'), _('hours')], 3600),
- ([_('minute'), _('minutes')], 60)
+ ([_('minute'), _('minutes')], 60),
+ ([_('second'), _('seconds')], 1)
]
for unit_name, limit in unit_limits:
| {"golden_diff": "diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py\n--- a/meinberlin/apps/plans/forms.py\n+++ b/meinberlin/apps/plans/forms.py\n@@ -43,8 +43,9 @@\n \n def save(self, commit=True):\n plan = super().save(commit=False)\n- group = plan._get_group(plan.creator, plan.organisation)\n- plan.group = group\n+ if not plan.group:\n+ group = plan._get_group(plan.creator, plan.organisation)\n+ plan.group = group\n if commit:\n plan.save()\n return plan\ndiff --git a/meinberlin/apps/projects/serializers.py b/meinberlin/apps/projects/serializers.py\n--- a/meinberlin/apps/projects/serializers.py\n+++ b/meinberlin/apps/projects/serializers.py\n@@ -212,7 +212,8 @@\n unit_limits = [\n ([_('day'), _('days')], 24 * 3600),\n ([_('hour'), _('hours')], 3600),\n- ([_('minute'), _('minutes')], 60)\n+ ([_('minute'), _('minutes')], 60),\n+ ([_('second'), _('seconds')], 1)\n ]\n \n for unit_name, limit in unit_limits:\n", "issue": "IndexError: list index out of range\nhttps://sentry.liqd.net/sentry/meinberlin-prod/issues/828/\n\n```\nIndexError: list index out of range\n(13 additional frame(s) were not displayed)\n...\n File \"rest_framework/serializers.py\", line 526, in to_representation\n ret[field.field_name] = field.to_representation(attribute)\n File \"rest_framework/fields.py\", line 1873, in to_representation\n return method(value)\n File \"meinberlin/apps/projects/serializers.py\", line 231, in get_active_phase\n time_left = instance.time_left\n File \"django/utils/functional.py\", line 80, in __get__\n res = instance.__dict__[self.name] = self.func(instance)\n File \"adhocracy4/projects/models.py\", line 433, in time_left\n best_unit = time_delta_list[0]\n\nInternal Server Error: /projekte/\n```\n", "before_files": [{"content": "from functools import lru_cache\n\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\nfrom easy_thumbnails.files import get_thumbnailer\nfrom rest_framework import serializers\n\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\n\nclass CommonFields:\n\n def get_district(self, instance):\n city_wide = _('City wide')\n district_name = str(city_wide)\n if instance.administrative_district:\n district_name = instance.administrative_district.name\n return district_name\n\n def get_point(self, instance):\n point = instance.point\n if not point:\n point = ''\n return point\n\n def get_organisation(self, instance):\n return instance.organisation.name\n\n def get_created_or_modified(self, instance):\n if instance.modified:\n return str(instance.modified)\n return str(instance.created)\n\n\nclass ProjectSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n subtype = serializers.SerializerMethodField()\n title = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n point = serializers.SerializerMethodField()\n point_label = serializers.SerializerMethodField()\n cost = serializers.SerializerMethodField()\n district = serializers.SerializerMethodField()\n status = serializers.SerializerMethodField()\n organisation = serializers.SerializerMethodField()\n participation = serializers.SerializerMethodField()\n participation_display = serializers.SerializerMethodField()\n participation_active = serializers.SerializerMethodField()\n participation_string = serializers.SerializerMethodField()\n future_phase = serializers.SerializerMethodField()\n active_phase = serializers.SerializerMethodField()\n past_phase = serializers.SerializerMethodField()\n tile_image = serializers.SerializerMethodField()\n tile_image_copyright = serializers.SerializerMethodField()\n plan_url = serializers.SerializerMethodField()\n plan_title = serializers.SerializerMethodField()\n published_projects_count = serializers.SerializerMethodField()\n created_or_modified = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n self.now = kwargs.pop('now')\n super().__init__(args, kwargs)\n\n class Meta:\n model = Project\n fields = ['type', 'subtype', 'title', 'url',\n 'organisation', 'tile_image',\n 'tile_image_copyright',\n 'point', 'point_label', 'cost',\n 'district', 'topics', 'is_public',\n 'status',\n 'participation_string',\n 'participation_active',\n 'participation', 'participation_display', 'description',\n 'future_phase', 'active_phase',\n 'past_phase', 'plan_url', 'plan_title',\n 'published_projects_count', 'created_or_modified']\n\n @lru_cache(maxsize=1)\n def _get_participation_status_project(self, instance):\n if hasattr(instance, 'projectcontainer') and instance.projectcontainer:\n if instance.projectcontainer.active_project_count > 0:\n return _('running'), True\n elif instance.projectcontainer.future_project_count > 0:\n return _('starts in the future'), True\n else:\n return _('done'), False\n else:\n project_phases = instance.phases\n\n if project_phases.active_phases():\n return _('running'), True\n\n if project_phases.future_phases():\n try:\n return (_('starts at {}').format\n (project_phases.future_phases().first().\n start_date.strftime('%d.%m.%Y')),\n True)\n except AttributeError:\n return (_('starts in the future'),\n True)\n else:\n return _('done'), False\n\n def get_type(self, instance):\n return 'project'\n\n def get_subtype(self, instance):\n subtype = get_project_type(instance)\n if subtype in ('external', 'bplan'):\n return 'external'\n return subtype\n\n def get_title(self, instance):\n return instance.name\n\n def get_url(self, instance):\n if get_project_type(instance) in ('external', 'bplan'):\n return instance.externalproject.url\n return instance.get_absolute_url()\n\n def get_tile_image(self, instance):\n image_url = ''\n if instance.tile_image:\n image = get_thumbnailer(instance.tile_image)['project_tile']\n image_url = image.url\n elif instance.image:\n image = get_thumbnailer(instance.image)['project_tile']\n image_url = image.url\n return image_url\n\n def get_tile_image_copyright(self, instance):\n if instance.tile_image:\n return instance.tile_image_copyright\n elif instance.image:\n return instance.image_copyright\n else:\n return None\n\n def get_status(self, instance):\n project_phases = instance.phases\n if project_phases.active_phases() or project_phases.future_phases():\n return 0\n return 1\n\n def get_participation(self, instance):\n return 0\n\n def get_participation_display(self, instance):\n return _('Yes')\n\n def get_future_phase(self, instance):\n if (instance.future_modules and\n instance.future_modules.first().module_start):\n return str(\n instance.future_modules.first().module_start)\n return False\n\n def get_active_phase(self, instance):\n if instance.active_phase_ends_next:\n progress = instance.module_running_progress\n time_left = instance.module_running_time_left\n end_date = str(instance.running_module_ends_next.module_end)\n return [progress, time_left, end_date]\n return False\n\n def get_past_phase(self, instance):\n if (instance.past_modules and\n instance.past_modules.first().module_end):\n return str(\n instance.past_modules.first().module_end)\n return False\n\n def get_participation_string(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return str(participation_string)\n\n def get_participation_active(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return participation_active\n\n def get_plan_url(self, instance):\n if instance.plans.exists():\n return instance.plans.first().get_absolute_url()\n return None\n\n def get_plan_title(self, instance):\n if instance.plans.exists():\n return instance.plans.first().title\n return None\n\n def get_published_projects_count(self, instance):\n return 0\n\n def get_point_label(self, instance):\n return ''\n\n def get_cost(self, instance):\n return ''\n\n\nclass ActiveProjectSerializer(ProjectSerializer):\n\n def seconds_in_units(self, seconds):\n unit_totals = []\n\n unit_limits = [\n ([_('day'), _('days')], 24 * 3600),\n ([_('hour'), _('hours')], 3600),\n ([_('minute'), _('minutes')], 60)\n ]\n\n for unit_name, limit in unit_limits:\n if seconds >= limit:\n amount = int(float(seconds) / limit)\n if amount > 1:\n unit_totals.append((unit_name[1], amount))\n else:\n unit_totals.append((unit_name[0], amount))\n seconds = seconds - (amount * limit)\n return unit_totals\n\n def get_active_phase(self, instance):\n progress = instance.module_running_progress\n time_left = instance.module_running_time_left\n end_date = str(instance.running_module_ends_next.module_end)\n return [progress, time_left, end_date]\n\n def get_status(self, instance):\n return 0\n\n def get_future_phase(self, instance):\n return False\n\n def get_past_phase(self, instance):\n return False\n\n def get_participation_string(self, instance):\n return _('running')\n\n def get_participation_active(self, instance):\n return True\n\n\nclass FutureProjectSerializer(ProjectSerializer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._future_phases = Phase.objects\\\n .filter(start_date__gt=self.now,\n module__is_draft=False)\\\n .order_by('start_date')\n\n def get_active_phase(self, instance):\n return False\n\n def get_status(self, instance):\n return 0\n\n def get_future_phase(self, instance):\n future_phase = self._future_phases\\\n .filter(module__project=instance)\\\n .first()\n return str(future_phase.start_date)\n\n def get_past_phase(self, instance):\n return False\n\n def get_participation_string(self, instance):\n return _('starts in the future')\n\n def get_participation_active(self, instance):\n return True\n\n\nclass PastProjectSerializer(ProjectSerializer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._past_phases = Phase.objects\\\n .filter(end_date__lt=timezone.now(),\n module__is_draft=False)\\\n .order_by('start_date')\n\n def get_active_phase(self, instance):\n return False\n\n def get_status(self, instance):\n return 1\n\n def get_future_phase(self, instance):\n return False\n\n def get_past_phase(self, instance):\n past_phase = self._past_phases.filter(module__project=instance).first()\n return str(past_phase.end_date)\n\n def get_participation_string(self, instance):\n return _('done')\n\n def get_participation_active(self, instance):\n return False\n", "path": "meinberlin/apps/projects/serializers.py"}, {"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.components.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects import models as project_models\nfrom meinberlin.apps.contrib.widgets import Select2Widget\n\nfrom . import models\n\n\nclass PlanForm(forms.ModelForm):\n\n class Meta:\n model = models.Plan\n fields = [\n 'title',\n 'description_image',\n 'description_image_copyright',\n 'contact',\n 'point',\n 'point_label',\n 'district',\n 'cost',\n 'description',\n 'topics',\n 'status',\n 'participation']\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON)\n }\n error_messages = {\n 'point': {\n 'required': _('Please locate the plan on the map.')\n }\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n\n def save(self, commit=True):\n plan = super().save(commit=False)\n group = plan._get_group(plan.creator, plan.organisation)\n plan.group = group\n if commit:\n plan.save()\n return plan\n\n\nclass CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n widget = forms.Select\n\n def clean(self, value):\n if not value:\n return super().clean([])\n return super().clean([value])\n\n\nclass ProjectPlansDashboardForm(ProjectDashboardForm):\n plans = CustomMultipleChoiceField(queryset=None,\n label=_('Plans'))\n\n class Meta:\n model = project_models.Project\n fields = ['plans']\n required = False\n widgets = {\n 'plans': Select2Widget,\n }\n\n def save(self, commit=False):\n plans = self.cleaned_data['plans']\n self.instance.plans.set(plans)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n self.fields['plans'].required = False\n self.fields['plans'].empty_label = '----------'\n self.fields['plans'].queryset = \\\n self.instance.organisation.plan_set.all()\n", "path": "meinberlin/apps/plans/forms.py"}], "after_files": [{"content": "from functools import lru_cache\n\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\nfrom easy_thumbnails.files import get_thumbnailer\nfrom rest_framework import serializers\n\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\n\nclass CommonFields:\n\n def get_district(self, instance):\n city_wide = _('City wide')\n district_name = str(city_wide)\n if instance.administrative_district:\n district_name = instance.administrative_district.name\n return district_name\n\n def get_point(self, instance):\n point = instance.point\n if not point:\n point = ''\n return point\n\n def get_organisation(self, instance):\n return instance.organisation.name\n\n def get_created_or_modified(self, instance):\n if instance.modified:\n return str(instance.modified)\n return str(instance.created)\n\n\nclass ProjectSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n subtype = serializers.SerializerMethodField()\n title = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n point = serializers.SerializerMethodField()\n point_label = serializers.SerializerMethodField()\n cost = serializers.SerializerMethodField()\n district = serializers.SerializerMethodField()\n status = serializers.SerializerMethodField()\n organisation = serializers.SerializerMethodField()\n participation = serializers.SerializerMethodField()\n participation_display = serializers.SerializerMethodField()\n participation_active = serializers.SerializerMethodField()\n participation_string = serializers.SerializerMethodField()\n future_phase = serializers.SerializerMethodField()\n active_phase = serializers.SerializerMethodField()\n past_phase = serializers.SerializerMethodField()\n tile_image = serializers.SerializerMethodField()\n tile_image_copyright = serializers.SerializerMethodField()\n plan_url = serializers.SerializerMethodField()\n plan_title = serializers.SerializerMethodField()\n published_projects_count = serializers.SerializerMethodField()\n created_or_modified = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n self.now = kwargs.pop('now')\n super().__init__(args, kwargs)\n\n class Meta:\n model = Project\n fields = ['type', 'subtype', 'title', 'url',\n 'organisation', 'tile_image',\n 'tile_image_copyright',\n 'point', 'point_label', 'cost',\n 'district', 'topics', 'is_public',\n 'status',\n 'participation_string',\n 'participation_active',\n 'participation', 'participation_display', 'description',\n 'future_phase', 'active_phase',\n 'past_phase', 'plan_url', 'plan_title',\n 'published_projects_count', 'created_or_modified']\n\n @lru_cache(maxsize=1)\n def _get_participation_status_project(self, instance):\n if hasattr(instance, 'projectcontainer') and instance.projectcontainer:\n if instance.projectcontainer.active_project_count > 0:\n return _('running'), True\n elif instance.projectcontainer.future_project_count > 0:\n return _('starts in the future'), True\n else:\n return _('done'), False\n else:\n project_phases = instance.phases\n\n if project_phases.active_phases():\n return _('running'), True\n\n if project_phases.future_phases():\n try:\n return (_('starts at {}').format\n (project_phases.future_phases().first().\n start_date.strftime('%d.%m.%Y')),\n True)\n except AttributeError:\n return (_('starts in the future'),\n True)\n else:\n return _('done'), False\n\n def get_type(self, instance):\n return 'project'\n\n def get_subtype(self, instance):\n subtype = get_project_type(instance)\n if subtype in ('external', 'bplan'):\n return 'external'\n return subtype\n\n def get_title(self, instance):\n return instance.name\n\n def get_url(self, instance):\n if get_project_type(instance) in ('external', 'bplan'):\n return instance.externalproject.url\n return instance.get_absolute_url()\n\n def get_tile_image(self, instance):\n image_url = ''\n if instance.tile_image:\n image = get_thumbnailer(instance.tile_image)['project_tile']\n image_url = image.url\n elif instance.image:\n image = get_thumbnailer(instance.image)['project_tile']\n image_url = image.url\n return image_url\n\n def get_tile_image_copyright(self, instance):\n if instance.tile_image:\n return instance.tile_image_copyright\n elif instance.image:\n return instance.image_copyright\n else:\n return None\n\n def get_status(self, instance):\n project_phases = instance.phases\n if project_phases.active_phases() or project_phases.future_phases():\n return 0\n return 1\n\n def get_participation(self, instance):\n return 0\n\n def get_participation_display(self, instance):\n return _('Yes')\n\n def get_future_phase(self, instance):\n if (instance.future_modules and\n instance.future_modules.first().module_start):\n return str(\n instance.future_modules.first().module_start)\n return False\n\n def get_active_phase(self, instance):\n if instance.active_phase_ends_next:\n progress = instance.module_running_progress\n time_left = instance.module_running_time_left\n end_date = str(instance.running_module_ends_next.module_end)\n return [progress, time_left, end_date]\n return False\n\n def get_past_phase(self, instance):\n if (instance.past_modules and\n instance.past_modules.first().module_end):\n return str(\n instance.past_modules.first().module_end)\n return False\n\n def get_participation_string(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return str(participation_string)\n\n def get_participation_active(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return participation_active\n\n def get_plan_url(self, instance):\n if instance.plans.exists():\n return instance.plans.first().get_absolute_url()\n return None\n\n def get_plan_title(self, instance):\n if instance.plans.exists():\n return instance.plans.first().title\n return None\n\n def get_published_projects_count(self, instance):\n return 0\n\n def get_point_label(self, instance):\n return ''\n\n def get_cost(self, instance):\n return ''\n\n\nclass ActiveProjectSerializer(ProjectSerializer):\n\n def seconds_in_units(self, seconds):\n unit_totals = []\n\n unit_limits = [\n ([_('day'), _('days')], 24 * 3600),\n ([_('hour'), _('hours')], 3600),\n ([_('minute'), _('minutes')], 60),\n ([_('second'), _('seconds')], 1)\n ]\n\n for unit_name, limit in unit_limits:\n if seconds >= limit:\n amount = int(float(seconds) / limit)\n if amount > 1:\n unit_totals.append((unit_name[1], amount))\n else:\n unit_totals.append((unit_name[0], amount))\n seconds = seconds - (amount * limit)\n return unit_totals\n\n def get_active_phase(self, instance):\n progress = instance.module_running_progress\n time_left = instance.module_running_time_left\n end_date = str(instance.running_module_ends_next.module_end)\n return [progress, time_left, end_date]\n\n def get_status(self, instance):\n return 0\n\n def get_future_phase(self, instance):\n return False\n\n def get_past_phase(self, instance):\n return False\n\n def get_participation_string(self, instance):\n return _('running')\n\n def get_participation_active(self, instance):\n return True\n\n\nclass FutureProjectSerializer(ProjectSerializer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._future_phases = Phase.objects\\\n .filter(start_date__gt=self.now,\n module__is_draft=False)\\\n .order_by('start_date')\n\n def get_active_phase(self, instance):\n return False\n\n def get_status(self, instance):\n return 0\n\n def get_future_phase(self, instance):\n future_phase = self._future_phases\\\n .filter(module__project=instance)\\\n .first()\n return str(future_phase.start_date)\n\n def get_past_phase(self, instance):\n return False\n\n def get_participation_string(self, instance):\n return _('starts in the future')\n\n def get_participation_active(self, instance):\n return True\n\n\nclass PastProjectSerializer(ProjectSerializer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._past_phases = Phase.objects\\\n .filter(end_date__lt=timezone.now(),\n module__is_draft=False)\\\n .order_by('start_date')\n\n def get_active_phase(self, instance):\n return False\n\n def get_status(self, instance):\n return 1\n\n def get_future_phase(self, instance):\n return False\n\n def get_past_phase(self, instance):\n past_phase = self._past_phases.filter(module__project=instance).first()\n return str(past_phase.end_date)\n\n def get_participation_string(self, instance):\n return _('done')\n\n def get_participation_active(self, instance):\n return False\n", "path": "meinberlin/apps/projects/serializers.py"}, {"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.components.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects import models as project_models\nfrom meinberlin.apps.contrib.widgets import Select2Widget\n\nfrom . import models\n\n\nclass PlanForm(forms.ModelForm):\n\n class Meta:\n model = models.Plan\n fields = [\n 'title',\n 'description_image',\n 'description_image_copyright',\n 'contact',\n 'point',\n 'point_label',\n 'district',\n 'cost',\n 'description',\n 'topics',\n 'status',\n 'participation']\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON)\n }\n error_messages = {\n 'point': {\n 'required': _('Please locate the plan on the map.')\n }\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n\n def save(self, commit=True):\n plan = super().save(commit=False)\n if not plan.group:\n group = plan._get_group(plan.creator, plan.organisation)\n plan.group = group\n if commit:\n plan.save()\n return plan\n\n\nclass CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n widget = forms.Select\n\n def clean(self, value):\n if not value:\n return super().clean([])\n return super().clean([value])\n\n\nclass ProjectPlansDashboardForm(ProjectDashboardForm):\n plans = CustomMultipleChoiceField(queryset=None,\n label=_('Plans'))\n\n class Meta:\n model = project_models.Project\n fields = ['plans']\n required = False\n widgets = {\n 'plans': Select2Widget,\n }\n\n def save(self, commit=False):\n plans = self.cleaned_data['plans']\n self.instance.plans.set(plans)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n self.fields['plans'].required = False\n self.fields['plans'].empty_label = '----------'\n self.fields['plans'].queryset = \\\n self.instance.organisation.plan_set.all()\n", "path": "meinberlin/apps/plans/forms.py"}]} | 4,018 | 300 |
gh_patches_debug_22288 | rasdani/github-patches | git_diff | dask__distributed-8381 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dashboards fail with 500 status code when using `bokeh<3.3.0`
When using the latest `main` with `bokeh<3.3.0`, the dashboards fail with a 500 status code.
Scheduler traceback:
```
2023-11-30 18:00:07,300 - tornado.application - ERROR - Uncaught exception GET /status (192.168.178.45)
HTTPServerRequest(protocol='http', host='192.168.178.45:8787', method='GET', uri='/status', version='HTTP/1.1', remote_ip='192.168.178.45')
Traceback (most recent call last):
File "/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.11/site-packages/tornado/web.py", line 1786, in _execute
result = await result
^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.11/site-packages/bokeh/server/views/doc_handler.py", line 57, in get
resources=self.application.resources(),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/hendrikmakait/projects/dask/distributed/distributed/dashboard/core.py", line 37, in resources
return super().resources(absolute_url)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.11/site-packages/bokeh/server/tornado.py", line 621, in resources
return Resources(mode="server", root_url=root_url, path_versioner=StaticHandler.append_version)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.11/site-packages/bokeh/resources.py", line 377, in __init__
if root_url and not root_url.endswith("/"):
^^^^^^^^^^^^^^^^^
AttributeError: 'bool' object has no attribute 'endswith'
```
git bisect blames #8347
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/dashboard/core.py`
Content:
```
1 from __future__ import annotations
2
3 import functools
4 import warnings
5
6 from bokeh.application import Application
7 from bokeh.application.handlers.function import FunctionHandler
8 from bokeh.resources import Resources
9 from bokeh.server.server import BokehTornado
10 from bokeh.server.util import create_hosts_allowlist
11
12 import dask
13
14 from distributed.dashboard.utils import BOKEH_VERSION
15 from distributed.versions import BOKEH_REQUIREMENT
16
17 # Set `prereleases=True` to allow for use with dev versions of `bokeh`
18 if not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):
19 warnings.warn(
20 f"\nDask needs {BOKEH_REQUIREMENT} for the dashboard."
21 f"\nYou have bokeh={BOKEH_VERSION}."
22 "\nContinuing without the dashboard."
23 )
24 raise ImportError(
25 f"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}"
26 )
27
28
29 if BOKEH_VERSION.major < 3:
30 from bokeh.models import Panel as TabPanel # noqa: F401
31 else:
32 from bokeh.models import TabPanel # noqa: F401
33
34
35 class DaskBokehTornado(BokehTornado):
36 def resources(self, absolute_url: str | bool | None = True) -> Resources:
37 return super().resources(absolute_url)
38
39
40 def BokehApplication(applications, server, prefix="/", template_variables=None):
41 template_variables = template_variables or {}
42 prefix = "/" + prefix.strip("/") + "/" if prefix else "/"
43
44 extra = {"prefix": prefix, **template_variables}
45
46 funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}
47 apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}
48
49 kwargs = dask.config.get("distributed.scheduler.dashboard.bokeh-application").copy()
50 extra_websocket_origins = create_hosts_allowlist(
51 kwargs.pop("allow_websocket_origin"), server.http_server.port
52 )
53
54 return DaskBokehTornado(
55 apps,
56 prefix=prefix,
57 use_index=False,
58 extra_websocket_origins=extra_websocket_origins,
59 absolute_url="",
60 **kwargs,
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py
--- a/distributed/dashboard/core.py
+++ b/distributed/dashboard/core.py
@@ -6,8 +6,8 @@
from bokeh.application import Application
from bokeh.application.handlers.function import FunctionHandler
from bokeh.resources import Resources
-from bokeh.server.server import BokehTornado
from bokeh.server.util import create_hosts_allowlist
+from packaging.version import parse as parse_version
import dask
@@ -32,9 +32,14 @@
from bokeh.models import TabPanel # noqa: F401
-class DaskBokehTornado(BokehTornado):
- def resources(self, absolute_url: str | bool | None = True) -> Resources:
- return super().resources(absolute_url)
+if BOKEH_VERSION < parse_version("3.3.0"):
+ from bokeh.server.server import BokehTornado as DaskBokehTornado
+else:
+ from bokeh.server.server import BokehTornado
+
+ class DaskBokehTornado(BokehTornado): # type: ignore[no-redef]
+ def resources(self, absolute_url: str | bool | None = True) -> Resources:
+ return super().resources(absolute_url)
def BokehApplication(applications, server, prefix="/", template_variables=None):
| {"golden_diff": "diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py\n--- a/distributed/dashboard/core.py\n+++ b/distributed/dashboard/core.py\n@@ -6,8 +6,8 @@\n from bokeh.application import Application\n from bokeh.application.handlers.function import FunctionHandler\n from bokeh.resources import Resources\n-from bokeh.server.server import BokehTornado\n from bokeh.server.util import create_hosts_allowlist\n+from packaging.version import parse as parse_version\n \n import dask\n \n@@ -32,9 +32,14 @@\n from bokeh.models import TabPanel # noqa: F401\n \n \n-class DaskBokehTornado(BokehTornado):\n- def resources(self, absolute_url: str | bool | None = True) -> Resources:\n- return super().resources(absolute_url)\n+if BOKEH_VERSION < parse_version(\"3.3.0\"):\n+ from bokeh.server.server import BokehTornado as DaskBokehTornado\n+else:\n+ from bokeh.server.server import BokehTornado\n+\n+ class DaskBokehTornado(BokehTornado): # type: ignore[no-redef]\n+ def resources(self, absolute_url: str | bool | None = True) -> Resources:\n+ return super().resources(absolute_url)\n \n \n def BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n", "issue": "Dashboards fail with 500 status code when using `bokeh<3.3.0`\nWhen using the latest `main` with `bokeh<3.3.0`, the dashboards fail with a 500 status code.\r\n\r\nScheduler traceback:\r\n```\r\n2023-11-30 18:00:07,300 - tornado.application - ERROR - Uncaught exception GET /status (192.168.178.45)\r\nHTTPServerRequest(protocol='http', host='192.168.178.45:8787', method='GET', uri='/status', version='HTTP/1.1', remote_ip='192.168.178.45')\r\nTraceback (most recent call last):\r\n File \"/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.11/site-packages/tornado/web.py\", line 1786, in _execute\r\n result = await result\r\n ^^^^^^^^^^^^\r\n File \"/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.11/site-packages/bokeh/server/views/doc_handler.py\", line 57, in get\r\n resources=self.application.resources(),\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/Users/hendrikmakait/projects/dask/distributed/distributed/dashboard/core.py\", line 37, in resources\r\n return super().resources(absolute_url)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.11/site-packages/bokeh/server/tornado.py\", line 621, in resources\r\n return Resources(mode=\"server\", root_url=root_url, path_versioner=StaticHandler.append_version)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.11/site-packages/bokeh/resources.py\", line 377, in __init__\r\n if root_url and not root_url.endswith(\"/\"):\r\n ^^^^^^^^^^^^^^^^^\r\nAttributeError: 'bool' object has no attribute 'endswith'\r\n```\r\n\r\ngit bisect blames #8347\n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport warnings\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers.function import FunctionHandler\nfrom bokeh.resources import Resources\nfrom bokeh.server.server import BokehTornado\nfrom bokeh.server.util import create_hosts_allowlist\n\nimport dask\n\nfrom distributed.dashboard.utils import BOKEH_VERSION\nfrom distributed.versions import BOKEH_REQUIREMENT\n\n# Set `prereleases=True` to allow for use with dev versions of `bokeh`\nif not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):\n warnings.warn(\n f\"\\nDask needs {BOKEH_REQUIREMENT} for the dashboard.\"\n f\"\\nYou have bokeh={BOKEH_VERSION}.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\n f\"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}\"\n )\n\n\nif BOKEH_VERSION.major < 3:\n from bokeh.models import Panel as TabPanel # noqa: F401\nelse:\n from bokeh.models import TabPanel # noqa: F401\n\n\nclass DaskBokehTornado(BokehTornado):\n def resources(self, absolute_url: str | bool | None = True) -> Resources:\n return super().resources(absolute_url)\n\n\ndef BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n\n extra = {\"prefix\": prefix, **template_variables}\n\n funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}\n apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}\n\n kwargs = dask.config.get(\"distributed.scheduler.dashboard.bokeh-application\").copy()\n extra_websocket_origins = create_hosts_allowlist(\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n\n return DaskBokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n absolute_url=\"\",\n **kwargs,\n )\n", "path": "distributed/dashboard/core.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport warnings\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers.function import FunctionHandler\nfrom bokeh.resources import Resources\nfrom bokeh.server.util import create_hosts_allowlist\nfrom packaging.version import parse as parse_version\n\nimport dask\n\nfrom distributed.dashboard.utils import BOKEH_VERSION\nfrom distributed.versions import BOKEH_REQUIREMENT\n\n# Set `prereleases=True` to allow for use with dev versions of `bokeh`\nif not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):\n warnings.warn(\n f\"\\nDask needs {BOKEH_REQUIREMENT} for the dashboard.\"\n f\"\\nYou have bokeh={BOKEH_VERSION}.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\n f\"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}\"\n )\n\n\nif BOKEH_VERSION.major < 3:\n from bokeh.models import Panel as TabPanel # noqa: F401\nelse:\n from bokeh.models import TabPanel # noqa: F401\n\n\nif BOKEH_VERSION < parse_version(\"3.3.0\"):\n from bokeh.server.server import BokehTornado as DaskBokehTornado\nelse:\n from bokeh.server.server import BokehTornado\n\n class DaskBokehTornado(BokehTornado): # type: ignore[no-redef]\n def resources(self, absolute_url: str | bool | None = True) -> Resources:\n return super().resources(absolute_url)\n\n\ndef BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n\n extra = {\"prefix\": prefix, **template_variables}\n\n funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}\n apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}\n\n kwargs = dask.config.get(\"distributed.scheduler.dashboard.bokeh-application\").copy()\n extra_websocket_origins = create_hosts_allowlist(\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n\n return DaskBokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n absolute_url=\"\",\n **kwargs,\n )\n", "path": "distributed/dashboard/core.py"}]} | 1,386 | 301 |
gh_patches_debug_33228 | rasdani/github-patches | git_diff | svthalia__concrexit-1113 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clarifying text on summary reward
Hi! I know you're less productive due to Corona, but on behalf of the educacie and with approval of the board I have this two-sided feature request. Thanks in advance!
### Is your feature request related to a problem? Please describe.
1) There is a rule in place that let's us reward the industrious students that write summaries and show them to others via [the Course Overview](https://thalia.nu/education/courses/). However, barely anyone knows this.
2) The feedback system for this rule, as people now just randomly get money on their bankaccounts. This might be confusing.
### Describe the solution you'd like
1) I want to show a little text to everyone visiting the course overview page.
2) Adding a field in the summary upload form in which the uploader can give his phone number. (maybe just a consent button and then take the phone number of the profile) This way we can contact them about it! 🌈
### Motivation
It will encourage people to send in summaries and clear up the obscureness of this rule.
### Describe alternatives you've considered
Not do 2) or give people the option to type their own phone number instead of taking it from their account.
### Additional context
The little text mentioned before:
> You can submit your own summary! Better even, you might even get a monetary reward if your summary is:
>
> - In the current language of the course
> - Adds significant information that was not already present in the summary overview
> - Corresponds to the current course
> - Is positively peer-reviewed by people appointed by the educacie
>
> Don’t forget to authorise us to use your phone number, so we can inform you about your possible reward.
>
> Je kan je eigen samenvatting inleveren! Beter zelfs, je kan zelfs een kleine geldelijke vergoeding krijgen als je samenvatting aan het volgende voldoet:
>
> - Hij is in de taal van de cursus
> - Hij voegt significante informatie toe die niet in eerdere samenvattingen aanwezig was
> - Hij moet overeenkomen met de inhoud van het vak
> - Hij moet positief gepeer-reviewed worden door mensen die aangewezen zijn door de educacie
>
> Vergeet ons niet toestemming te geven tot je telefoonnummer, dan kunnen we je op de hoogte houden van je mogelijke beloning.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/education/forms.py`
Content:
```
1 """The forms defined by the education package"""
2 import datetime
3
4 from django.conf import settings
5 from django.forms import (
6 ChoiceField,
7 DateField,
8 ModelChoiceField,
9 ModelForm,
10 SelectDateWidget,
11 TypedChoiceField,
12 )
13 from django.utils import timezone
14
15 from utils.snippets import datetime_to_lectureyear
16 from .models import Course, Exam, Summary
17
18
19 class AddExamForm(ModelForm):
20 """Custom form to add exams, changes the possible years of the date"""
21
22 this_year = datetime.date.today().year
23 years = list(reversed(range(this_year - 8, this_year + 1)))
24
25 exam_date = DateField(
26 widget=SelectDateWidget(years=years), initial=datetime.date.today
27 )
28 course = ModelChoiceField(
29 queryset=Course.objects.order_by("name_" + settings.LANGUAGE_CODE),
30 empty_label=None,
31 )
32 type = ChoiceField(choices=Exam.EXAM_TYPES)
33
34 class Meta:
35 model = Exam
36 fields = ("file", "course", "type", "language", "exam_date")
37
38
39 class AddSummaryForm(ModelForm):
40 """
41 Custom form to add summaries, orders courses by name and formats the
42 year as lecture years
43 """
44
45 course = ModelChoiceField(
46 queryset=Course.objects.order_by("name_" + settings.LANGUAGE_CODE),
47 empty_label=None,
48 )
49
50 this_year = datetime_to_lectureyear(timezone.now())
51 years = reversed(
52 [(x, "{} - {}".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]
53 )
54
55 year = TypedChoiceField(choices=years, coerce=int, empty_value=this_year)
56
57 class Meta:
58 model = Summary
59 fields = ("name", "year", "language", "file", "course", "author")
60
```
Path: `website/education/admin.py`
Content:
```
1 """
2 This module registers admin pages for the models
3 """
4 import csv
5
6 from django.contrib import admin
7 from django.http import HttpResponse
8 from django.utils.translation import gettext_lazy as _
9
10 from utils.translation import TranslatedModelAdmin
11
12 from . import models
13
14 admin.site.register(models.Category)
15
16
17 @admin.register(models.Course)
18 class CourseAdmin(TranslatedModelAdmin):
19 fields = (
20 "name",
21 "course_code",
22 "ec",
23 "since",
24 "until",
25 "categories",
26 "old_courses",
27 )
28 list_filter = ("categories", "ec")
29 search_fields = ("name", "course_code")
30
31
32 @admin.register(models.Exam)
33 class ExamAdmin(TranslatedModelAdmin):
34 list_display = (
35 "type",
36 "course",
37 "exam_date",
38 "uploader",
39 "accepted",
40 "language",
41 "download_count",
42 )
43 readonly_fields = ("download_count",)
44 list_filter = ("accepted", "exam_date", "type", "language")
45 search_fields = (
46 "name",
47 "uploader__first_name",
48 "uploader__last_name",
49 "course__name_nl",
50 "course__name_en",
51 )
52 actions = ["accept", "reject", "reset_download_count", "download_csv"]
53
54 def accept(self, request, queryset):
55 queryset.update(accepted=True)
56
57 accept.short_description = _("Mark exams as accepted")
58
59 def reject(self, request, queryset):
60 queryset.update(accepted=False)
61
62 reject.short_description = _("Mark exams as rejected")
63
64 def reset_download_count(self, request, queryset):
65 queryset.update(download_count=0)
66
67 reset_download_count.short_description = _(
68 "Reset the marked exams " "download count"
69 )
70
71 def download_csv(self, request, queryset):
72 opts = queryset.model._meta
73 response = HttpResponse(content_type="text/csv")
74 # force download.
75 response["Content-Disposition"] = "attachment;filename=export.csv"
76 # the csv writer
77 writer = csv.writer(response)
78 field_names = [field.name for field in opts.fields]
79 # Write a first row with header information
80 writer.writerow(field_names)
81 # Write data rows
82 for obj in queryset:
83 writer.writerow([getattr(obj, field) for field in field_names])
84 return response
85
86 download_csv.short_description = _("Download marked as csv")
87
88
89 @admin.register(models.Summary)
90 class SummaryAdmin(TranslatedModelAdmin):
91 list_display = (
92 "name",
93 "course",
94 "uploader",
95 "accepted",
96 "language",
97 "download_count",
98 )
99 readonly_fields = ("download_count",)
100 list_filter = ("accepted", "language")
101 search_fields = (
102 "name",
103 "uploader__first_name",
104 "uploader__last_name",
105 "course__name_nl",
106 "course__name_en",
107 )
108 actions = ["accept", "reject", "reset_download_count", "download_csv"]
109
110 def accept(self, request, queryset):
111 queryset.update(accepted=True)
112
113 accept.short_description = _("Mark summaries as accepted")
114
115 def reject(self, request, queryset):
116 queryset.update(accepted=False)
117
118 reject.short_description = _("Mark summaries as rejected")
119
120 def reset_download_count(self, request, queryset):
121 queryset.update(download_count=0)
122
123 reset_download_count.short_description = _(
124 "Reset the marked summaries " "download count"
125 )
126
127 def download_csv(self, request, queryset):
128 opts = queryset.model._meta
129 response = HttpResponse(content_type="text/csv")
130 # force download.
131 response["Content-Disposition"] = "attachment;filename=export.csv"
132 # the csv writer
133 writer = csv.writer(response)
134 field_names = [field.name for field in opts.fields]
135 # Write a first row with header information
136 writer.writerow(field_names)
137 # Write data rows
138 for obj in queryset:
139 writer.writerow([getattr(obj, field) for field in field_names])
140 return response
141
142 download_csv.short_description = _("Download marked as csv")
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/education/admin.py b/website/education/admin.py
--- a/website/education/admin.py
+++ b/website/education/admin.py
@@ -8,8 +8,8 @@
from django.utils.translation import gettext_lazy as _
from utils.translation import TranslatedModelAdmin
-
from . import models
+from .forms import SummaryAdminForm
admin.site.register(models.Category)
@@ -106,6 +106,7 @@
"course__name_en",
)
actions = ["accept", "reject", "reset_download_count", "download_csv"]
+ form = SummaryAdminForm
def accept(self, request, queryset):
queryset.update(accepted=True)
diff --git a/website/education/forms.py b/website/education/forms.py
--- a/website/education/forms.py
+++ b/website/education/forms.py
@@ -9,8 +9,10 @@
ModelForm,
SelectDateWidget,
TypedChoiceField,
+ CharField,
)
from django.utils import timezone
+from django.utils.translation import gettext_lazy as _
from utils.snippets import datetime_to_lectureyear
from .models import Course, Exam, Summary
@@ -57,3 +59,26 @@
class Meta:
model = Summary
fields = ("name", "year", "language", "file", "course", "author")
+
+
+class SummaryAdminForm(ModelForm):
+ """
+ Custom form for summaries so that we can show more data in the admin
+ """
+
+ def __init__(self, data=None, files=None, **kwargs):
+ super().__init__(data, files, **kwargs)
+ obj = kwargs.get("instance", None)
+ if not obj:
+ self.fields["phone"].widget = self.fields["phone"].hidden_widget()
+ self.fields["email"].widget = self.fields["email"].hidden_widget()
+ else:
+ self.fields["phone"].initial = obj.uploader.profile.phone_number
+ self.fields["email"].initial = obj.uploader.email
+
+ phone = CharField(label=_("Uploader phone"), disabled=True, required=False)
+ email = CharField(label=_("Uploader email"), disabled=True, required=False)
+
+ class Meta:
+ model = Summary
+ fields = "__all__"
| {"golden_diff": "diff --git a/website/education/admin.py b/website/education/admin.py\n--- a/website/education/admin.py\n+++ b/website/education/admin.py\n@@ -8,8 +8,8 @@\n from django.utils.translation import gettext_lazy as _\n \n from utils.translation import TranslatedModelAdmin\n-\n from . import models\n+from .forms import SummaryAdminForm\n \n admin.site.register(models.Category)\n \n@@ -106,6 +106,7 @@\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n+ form = SummaryAdminForm\n \n def accept(self, request, queryset):\n queryset.update(accepted=True)\ndiff --git a/website/education/forms.py b/website/education/forms.py\n--- a/website/education/forms.py\n+++ b/website/education/forms.py\n@@ -9,8 +9,10 @@\n ModelForm,\n SelectDateWidget,\n TypedChoiceField,\n+ CharField,\n )\n from django.utils import timezone\n+from django.utils.translation import gettext_lazy as _\n \n from utils.snippets import datetime_to_lectureyear\n from .models import Course, Exam, Summary\n@@ -57,3 +59,26 @@\n class Meta:\n model = Summary\n fields = (\"name\", \"year\", \"language\", \"file\", \"course\", \"author\")\n+\n+\n+class SummaryAdminForm(ModelForm):\n+ \"\"\"\n+ Custom form for summaries so that we can show more data in the admin\n+ \"\"\"\n+\n+ def __init__(self, data=None, files=None, **kwargs):\n+ super().__init__(data, files, **kwargs)\n+ obj = kwargs.get(\"instance\", None)\n+ if not obj:\n+ self.fields[\"phone\"].widget = self.fields[\"phone\"].hidden_widget()\n+ self.fields[\"email\"].widget = self.fields[\"email\"].hidden_widget()\n+ else:\n+ self.fields[\"phone\"].initial = obj.uploader.profile.phone_number\n+ self.fields[\"email\"].initial = obj.uploader.email\n+\n+ phone = CharField(label=_(\"Uploader phone\"), disabled=True, required=False)\n+ email = CharField(label=_(\"Uploader email\"), disabled=True, required=False)\n+\n+ class Meta:\n+ model = Summary\n+ fields = \"__all__\"\n", "issue": "Clarifying text on summary reward\nHi! I know you're less productive due to Corona, but on behalf of the educacie and with approval of the board I have this two-sided feature request. Thanks in advance!\r\n\r\n### Is your feature request related to a problem? Please describe.\r\n1) There is a rule in place that let's us reward the industrious students that write summaries and show them to others via [the Course Overview](https://thalia.nu/education/courses/). However, barely anyone knows this.\r\n\r\n2) The feedback system for this rule, as people now just randomly get money on their bankaccounts. This might be confusing.\r\n\r\n### Describe the solution you'd like\r\n1) I want to show a little text to everyone visiting the course overview page.\r\n\r\n2) Adding a field in the summary upload form in which the uploader can give his phone number. (maybe just a consent button and then take the phone number of the profile) This way we can contact them about it! \ud83c\udf08 \r\n\r\n### Motivation\r\nIt will encourage people to send in summaries and clear up the obscureness of this rule.\r\n\r\n### Describe alternatives you've considered\r\nNot do 2) or give people the option to type their own phone number instead of taking it from their account.\r\n\r\n### Additional context\r\n\r\nThe little text mentioned before:\r\n\r\n> You can submit your own summary! Better even, you might even get a monetary reward if your summary is:\r\n> \r\n> - In the current language of the course\r\n> - Adds significant information that was not already present in the summary overview\r\n> - Corresponds to the current course\r\n> - Is positively peer-reviewed by people appointed by the educacie\r\n> \r\n> Don\u2019t forget to authorise us to use your phone number, so we can inform you about your possible reward.\r\n\r\n> \r\n> Je kan je eigen samenvatting inleveren! Beter zelfs, je kan zelfs een kleine geldelijke vergoeding krijgen als je samenvatting aan het volgende voldoet:\r\n> \r\n> - Hij is in de taal van de cursus\r\n> - Hij voegt significante informatie toe die niet in eerdere samenvattingen aanwezig was\r\n> - Hij moet overeenkomen met de inhoud van het vak\r\n> - Hij moet positief gepeer-reviewed worden door mensen die aangewezen zijn door de educacie\r\n> \r\n> Vergeet ons niet toestemming te geven tot je telefoonnummer, dan kunnen we je op de hoogte houden van je mogelijke beloning.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"The forms defined by the education package\"\"\"\nimport datetime\n\nfrom django.conf import settings\nfrom django.forms import (\n ChoiceField,\n DateField,\n ModelChoiceField,\n ModelForm,\n SelectDateWidget,\n TypedChoiceField,\n)\nfrom django.utils import timezone\n\nfrom utils.snippets import datetime_to_lectureyear\nfrom .models import Course, Exam, Summary\n\n\nclass AddExamForm(ModelForm):\n \"\"\"Custom form to add exams, changes the possible years of the date\"\"\"\n\n this_year = datetime.date.today().year\n years = list(reversed(range(this_year - 8, this_year + 1)))\n\n exam_date = DateField(\n widget=SelectDateWidget(years=years), initial=datetime.date.today\n )\n course = ModelChoiceField(\n queryset=Course.objects.order_by(\"name_\" + settings.LANGUAGE_CODE),\n empty_label=None,\n )\n type = ChoiceField(choices=Exam.EXAM_TYPES)\n\n class Meta:\n model = Exam\n fields = (\"file\", \"course\", \"type\", \"language\", \"exam_date\")\n\n\nclass AddSummaryForm(ModelForm):\n \"\"\"\n Custom form to add summaries, orders courses by name and formats the\n year as lecture years\n \"\"\"\n\n course = ModelChoiceField(\n queryset=Course.objects.order_by(\"name_\" + settings.LANGUAGE_CODE),\n empty_label=None,\n )\n\n this_year = datetime_to_lectureyear(timezone.now())\n years = reversed(\n [(x, \"{} - {}\".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]\n )\n\n year = TypedChoiceField(choices=years, coerce=int, empty_value=this_year)\n\n class Meta:\n model = Summary\n fields = (\"name\", \"year\", \"language\", \"file\", \"course\", \"author\")\n", "path": "website/education/forms.py"}, {"content": "\"\"\"\nThis module registers admin pages for the models\n\"\"\"\nimport csv\n\nfrom django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom utils.translation import TranslatedModelAdmin\n\nfrom . import models\n\nadmin.site.register(models.Category)\n\n\[email protected](models.Course)\nclass CourseAdmin(TranslatedModelAdmin):\n fields = (\n \"name\",\n \"course_code\",\n \"ec\",\n \"since\",\n \"until\",\n \"categories\",\n \"old_courses\",\n )\n list_filter = (\"categories\", \"ec\")\n search_fields = (\"name\", \"course_code\")\n\n\[email protected](models.Exam)\nclass ExamAdmin(TranslatedModelAdmin):\n list_display = (\n \"type\",\n \"course\",\n \"exam_date\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"exam_date\", \"type\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark exams as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark exams as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\n \"Reset the marked exams \" \"download count\"\n )\n\n def download_csv(self, request, queryset):\n opts = queryset.model._meta\n response = HttpResponse(content_type=\"text/csv\")\n # force download.\n response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n # the csv writer\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response\n\n download_csv.short_description = _(\"Download marked as csv\")\n\n\[email protected](models.Summary)\nclass SummaryAdmin(TranslatedModelAdmin):\n list_display = (\n \"name\",\n \"course\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark summaries as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark summaries as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\n \"Reset the marked summaries \" \"download count\"\n )\n\n def download_csv(self, request, queryset):\n opts = queryset.model._meta\n response = HttpResponse(content_type=\"text/csv\")\n # force download.\n response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n # the csv writer\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response\n\n download_csv.short_description = _(\"Download marked as csv\")\n", "path": "website/education/admin.py"}], "after_files": [{"content": "\"\"\"The forms defined by the education package\"\"\"\nimport datetime\n\nfrom django.conf import settings\nfrom django.forms import (\n ChoiceField,\n DateField,\n ModelChoiceField,\n ModelForm,\n SelectDateWidget,\n TypedChoiceField,\n CharField,\n)\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom utils.snippets import datetime_to_lectureyear\nfrom .models import Course, Exam, Summary\n\n\nclass AddExamForm(ModelForm):\n \"\"\"Custom form to add exams, changes the possible years of the date\"\"\"\n\n this_year = datetime.date.today().year\n years = list(reversed(range(this_year - 8, this_year + 1)))\n\n exam_date = DateField(\n widget=SelectDateWidget(years=years), initial=datetime.date.today\n )\n course = ModelChoiceField(\n queryset=Course.objects.order_by(\"name_\" + settings.LANGUAGE_CODE),\n empty_label=None,\n )\n type = ChoiceField(choices=Exam.EXAM_TYPES)\n\n class Meta:\n model = Exam\n fields = (\"file\", \"course\", \"type\", \"language\", \"exam_date\")\n\n\nclass AddSummaryForm(ModelForm):\n \"\"\"\n Custom form to add summaries, orders courses by name and formats the\n year as lecture years\n \"\"\"\n\n course = ModelChoiceField(\n queryset=Course.objects.order_by(\"name_\" + settings.LANGUAGE_CODE),\n empty_label=None,\n )\n\n this_year = datetime_to_lectureyear(timezone.now())\n years = reversed(\n [(x, \"{} - {}\".format(x, x + 1)) for x in range(this_year - 20, this_year + 1)]\n )\n\n year = TypedChoiceField(choices=years, coerce=int, empty_value=this_year)\n\n class Meta:\n model = Summary\n fields = (\"name\", \"year\", \"language\", \"file\", \"course\", \"author\")\n\n\nclass SummaryAdminForm(ModelForm):\n \"\"\"\n Custom form for summaries so that we can show more data in the admin\n \"\"\"\n\n def __init__(self, data=None, files=None, **kwargs):\n super().__init__(data, files, **kwargs)\n obj = kwargs.get(\"instance\", None)\n if not obj:\n self.fields[\"phone\"].widget = self.fields[\"phone\"].hidden_widget()\n self.fields[\"email\"].widget = self.fields[\"email\"].hidden_widget()\n else:\n self.fields[\"phone\"].initial = obj.uploader.profile.phone_number\n self.fields[\"email\"].initial = obj.uploader.email\n\n phone = CharField(label=_(\"Uploader phone\"), disabled=True, required=False)\n email = CharField(label=_(\"Uploader email\"), disabled=True, required=False)\n\n class Meta:\n model = Summary\n fields = \"__all__\"\n", "path": "website/education/forms.py"}, {"content": "\"\"\"\nThis module registers admin pages for the models\n\"\"\"\nimport csv\n\nfrom django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom utils.translation import TranslatedModelAdmin\nfrom . import models\nfrom .forms import SummaryAdminForm\n\nadmin.site.register(models.Category)\n\n\[email protected](models.Course)\nclass CourseAdmin(TranslatedModelAdmin):\n fields = (\n \"name\",\n \"course_code\",\n \"ec\",\n \"since\",\n \"until\",\n \"categories\",\n \"old_courses\",\n )\n list_filter = (\"categories\", \"ec\")\n search_fields = (\"name\", \"course_code\")\n\n\[email protected](models.Exam)\nclass ExamAdmin(TranslatedModelAdmin):\n list_display = (\n \"type\",\n \"course\",\n \"exam_date\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"exam_date\", \"type\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark exams as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark exams as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\n \"Reset the marked exams \" \"download count\"\n )\n\n def download_csv(self, request, queryset):\n opts = queryset.model._meta\n response = HttpResponse(content_type=\"text/csv\")\n # force download.\n response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n # the csv writer\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response\n\n download_csv.short_description = _(\"Download marked as csv\")\n\n\[email protected](models.Summary)\nclass SummaryAdmin(TranslatedModelAdmin):\n list_display = (\n \"name\",\n \"course\",\n \"uploader\",\n \"accepted\",\n \"language\",\n \"download_count\",\n )\n readonly_fields = (\"download_count\",)\n list_filter = (\"accepted\", \"language\")\n search_fields = (\n \"name\",\n \"uploader__first_name\",\n \"uploader__last_name\",\n \"course__name_nl\",\n \"course__name_en\",\n )\n actions = [\"accept\", \"reject\", \"reset_download_count\", \"download_csv\"]\n form = SummaryAdminForm\n\n def accept(self, request, queryset):\n queryset.update(accepted=True)\n\n accept.short_description = _(\"Mark summaries as accepted\")\n\n def reject(self, request, queryset):\n queryset.update(accepted=False)\n\n reject.short_description = _(\"Mark summaries as rejected\")\n\n def reset_download_count(self, request, queryset):\n queryset.update(download_count=0)\n\n reset_download_count.short_description = _(\n \"Reset the marked summaries \" \"download count\"\n )\n\n def download_csv(self, request, queryset):\n opts = queryset.model._meta\n response = HttpResponse(content_type=\"text/csv\")\n # force download.\n response[\"Content-Disposition\"] = \"attachment;filename=export.csv\"\n # the csv writer\n writer = csv.writer(response)\n field_names = [field.name for field in opts.fields]\n # Write a first row with header information\n writer.writerow(field_names)\n # Write data rows\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n return response\n\n download_csv.short_description = _(\"Download marked as csv\")\n", "path": "website/education/admin.py"}]} | 2,517 | 504 |
gh_patches_debug_20800 | rasdani/github-patches | git_diff | Flexget__Flexget-2264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flexget is incompatible with python 3.7
<!---
Before opening an issue, verify:
- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget
- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions
- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon
- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues
- Did you fill out the issue template as completely as possible?
The issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!
--->
### Expected behaviour:
<!---
Please don't just say "it doesn't crash" or "it works". Explain what the expected result is.
--->
I run `flexget --cron execute`, and it does its job and exits silently. It however fails to start because of a dependency error.
### Actual behaviour:
The program crashes during the import phase, since a dependency, `rpyc`, uses a newly reserved word in Python 3.7.
The actual fix is simple: bump the `rpyc` requirement to at least 4.0.0. In my own set-up, this appeared to fix it, but it did complain about package versions. It might be that some plugins do suffer, my config isn't that special.
### Steps to reproduce:
- Step 1: Run flexget with Python 3.7
- Stop 2: Observe crash
#### Config:
Config isn't relevant to this issue, running Python 3.7 is. Either way:
```
tasks:
Arch Linux ISOs:
rss: https://www.archlinux.org/feeds/releases/
accept_all: yes
transmission: SECRET CONFIG
schedules:
- tasks: '*'
interval:
hours: 1
```
#### Log:
```
Traceback (most recent call last):
File "/var/lib/flexget/.local/bin/flexget", line 7, in <module>
from flexget import main
File "/var/lib/flexget/.local/lib/python3.7/site-packages/flexget/__init__.py", line 12, in <module>
from flexget.manager import Manager
File "/var/lib/flexget/.local/lib/python3.7/site-packages/flexget/manager.py", line 38, in <module>
from flexget.ipc import IPCClient, IPCServer # noqa
File "/var/lib/flexget/.local/lib/python3.7/site-packages/flexget/ipc.py", line 10, in <module>
import rpyc
File "/var/lib/flexget/.local/lib/python3.7/site-packages/rpyc/__init__.py", line 50
from rpyc.utils.helpers import async, timed, buffiter, BgServingThread, restricted
^
SyntaxError: invalid syntax
```
### Additional information:
- FlexGet version: 2.14.15
- Python version: 3.7.0
- Installation method: `pip install --user`
- Using daemon (yes/no): no
- OS and version: Arch Linux ARM
<!---
In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.
Please verify that the following data is present before submitting your issue:
- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.
- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).
- FlexGet version (use `flexget -V` to get it).
- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.
- Installation method (pip, git install, etc).
- Whether or not you're running FlexGet as a daemon.
- OS and version.
- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.
--->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/ipc.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5 import random
6 import string
7 import sys
8 import threading
9
10 import rpyc
11 from rpyc.utils.server import ThreadedServer
12 from terminaltables.terminal_io import terminal_size
13
14 from flexget import terminal
15 from flexget.logger import capture_output
16 from flexget.terminal import console
17 from flexget.options import get_parser
18
19 log = logging.getLogger('ipc')
20
21 # Allow some attributes from dict interface to be called over the wire
22 rpyc.core.protocol.DEFAULT_CONFIG['safe_attrs'].update(['items'])
23 rpyc.core.protocol.DEFAULT_CONFIG['allow_pickle'] = True
24
25 IPC_VERSION = 4
26 AUTH_ERROR = b'authentication error'
27 AUTH_SUCCESS = b'authentication success'
28
29
30 class RemoteStream(object):
31 """
32 Used as a filelike to stream text to remote client. If client disconnects while this is in use, an error will be
33 logged, but no exception raised.
34 """
35
36 def __init__(self, writer):
37 """
38 :param writer: A function which writes a line of text to remote client.
39 """
40 self.buffer = ''
41 self.writer = writer
42
43 def write(self, text):
44 self.buffer += text
45 if '\n' in self.buffer:
46 self.flush()
47
48 def flush(self):
49 if self.buffer is None or self.writer is None:
50 return
51 try:
52 self.writer(self.buffer, end='')
53 except EOFError:
54 self.writer = None
55 log.error('Client ended connection while still streaming output.')
56 finally:
57 self.buffer = ''
58
59
60 class DaemonService(rpyc.Service):
61 # This will be populated when the server is started
62 manager = None
63
64 def exposed_version(self):
65 return IPC_VERSION
66
67 def exposed_handle_cli(self, args):
68 args = rpyc.utils.classic.obtain(args)
69 log.verbose('Running command `%s` for client.' % ' '.join(args))
70 parser = get_parser()
71 try:
72 options = parser.parse_args(args, file=self.client_out_stream)
73 except SystemExit as e:
74 if e.code:
75 # TODO: Not sure how to properly propagate the exit code back to client
76 log.debug('Parsing cli args caused system exit with status %s.' % e.code)
77 return
78 # Saving original terminal size to restore after monkeypatch
79 original_terminal_info = terminal.terminal_info
80 # Monkeypatching terminal_size so it'll work using IPC
81 terminal.terminal_info = self._conn.root.terminal_info
82 try:
83 if not options.cron:
84 with capture_output(self.client_out_stream, loglevel=options.loglevel):
85 self.manager.handle_cli(options)
86 else:
87 self.manager.handle_cli(options)
88 finally:
89 # Restoring original terminal_size value
90 terminal.terminal_info = original_terminal_info
91
92 def client_console(self, text):
93 self._conn.root.console(text)
94
95 @property
96 def client_out_stream(self):
97 return RemoteStream(self._conn.root.console)
98
99
100 class ClientService(rpyc.Service):
101 def on_connect(self):
102 """Make sure the client version matches our own."""
103 daemon_version = self._conn.root.version()
104 if IPC_VERSION != daemon_version:
105 self._conn.close()
106 raise ValueError('Daemon is different version than client.')
107
108 def exposed_version(self):
109 return IPC_VERSION
110
111 def exposed_console(self, text, *args, **kwargs):
112 console(text, *args, **kwargs)
113
114 def exposed_terminal_info(self):
115 return {'size': terminal_size(), 'isatty': sys.stdout.isatty()}
116
117
118 class IPCServer(threading.Thread):
119 def __init__(self, manager, port=None):
120 super(IPCServer, self).__init__(name='ipc_server')
121 self.daemon = True
122 self.manager = manager
123 self.host = '127.0.0.1'
124 self.port = port or 0
125 self.password = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(15))
126 self.server = None
127
128 def authenticator(self, sock):
129 channel = rpyc.Channel(rpyc.SocketStream(sock))
130 password = channel.recv().decode('utf-8')
131 if password != self.password:
132 channel.send(AUTH_ERROR)
133 raise rpyc.utils.authenticators.AuthenticationError('Invalid password from client.')
134 channel.send(AUTH_SUCCESS)
135 return sock, self.password
136
137 def run(self):
138 # Make the rpyc logger a bit quieter when we aren't in debugging.
139 rpyc_logger = logging.getLogger('ipc.rpyc')
140 if logging.getLogger().getEffectiveLevel() > logging.DEBUG:
141 rpyc_logger.setLevel(logging.WARNING)
142 DaemonService.manager = self.manager
143 self.server = ThreadedServer(
144 DaemonService, hostname=self.host, port=self.port, authenticator=self.authenticator, logger=rpyc_logger
145 )
146 # If we just chose an open port, write save the chosen one
147 self.port = self.server.listener.getsockname()[1]
148 self.manager.write_lock(ipc_info={'port': self.port, 'password': self.password})
149 self.server.start()
150
151 def shutdown(self):
152 if self.server:
153 self.server.close()
154
155
156 class IPCClient(object):
157 def __init__(self, port, password):
158 channel = rpyc.Channel(rpyc.SocketStream.connect('127.0.0.1', port))
159 channel.send(password.encode('utf-8'))
160 response = channel.recv()
161 if response == AUTH_ERROR:
162 # TODO: What to raise here. I guess we create a custom error
163 raise ValueError('Invalid password for daemon')
164 self.conn = rpyc.utils.factory.connect_channel(channel, service=ClientService)
165
166 def close(self):
167 self.conn.close()
168
169 def __getattr__(self, item):
170 """Proxy all other calls to the exposed daemon service."""
171 return getattr(self.conn.root, item)
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/ipc.py b/flexget/ipc.py
--- a/flexget/ipc.py
+++ b/flexget/ipc.py
@@ -61,6 +61,10 @@
# This will be populated when the server is started
manager = None
+ def on_connect(self, conn):
+ self._conn = conn
+ super(DaemonService, self).on_connect(conn)
+
def exposed_version(self):
return IPC_VERSION
@@ -98,12 +102,14 @@
class ClientService(rpyc.Service):
- def on_connect(self):
+ def on_connect(self, conn):
+ self._conn = conn
"""Make sure the client version matches our own."""
daemon_version = self._conn.root.version()
if IPC_VERSION != daemon_version:
self._conn.close()
raise ValueError('Daemon is different version than client.')
+ super(ClientService, self).on_connect(conn)
def exposed_version(self):
return IPC_VERSION
| {"golden_diff": "diff --git a/flexget/ipc.py b/flexget/ipc.py\n--- a/flexget/ipc.py\n+++ b/flexget/ipc.py\n@@ -61,6 +61,10 @@\n # This will be populated when the server is started\n manager = None\n \n+ def on_connect(self, conn):\n+ self._conn = conn\n+ super(DaemonService, self).on_connect(conn)\n+\n def exposed_version(self):\n return IPC_VERSION\n \n@@ -98,12 +102,14 @@\n \n \n class ClientService(rpyc.Service):\n- def on_connect(self):\n+ def on_connect(self, conn):\n+ self._conn = conn\n \"\"\"Make sure the client version matches our own.\"\"\"\n daemon_version = self._conn.root.version()\n if IPC_VERSION != daemon_version:\n self._conn.close()\n raise ValueError('Daemon is different version than client.')\n+ super(ClientService, self).on_connect(conn)\n \n def exposed_version(self):\n return IPC_VERSION\n", "issue": "Flexget is incompatible with python 3.7\n<!---\r\nBefore opening an issue, verify:\r\n\r\n- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget\r\n- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions\r\n- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon\r\n- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues\r\n- Did you fill out the issue template as completely as possible?\r\n\r\nThe issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!\r\n--->\r\n### Expected behaviour:\r\n<!---\r\nPlease don't just say \"it doesn't crash\" or \"it works\". Explain what the expected result is.\r\n--->\r\nI run `flexget --cron execute`, and it does its job and exits silently. It however fails to start because of a dependency error.\r\n\r\n\r\n### Actual behaviour:\r\n\r\nThe program crashes during the import phase, since a dependency, `rpyc`, uses a newly reserved word in Python 3.7.\r\n\r\nThe actual fix is simple: bump the `rpyc` requirement to at least 4.0.0. In my own set-up, this appeared to fix it, but it did complain about package versions. It might be that some plugins do suffer, my config isn't that special.\r\n\r\n### Steps to reproduce:\r\n- Step 1: Run flexget with Python 3.7\r\n- Stop 2: Observe crash\r\n\r\n#### Config:\r\nConfig isn't relevant to this issue, running Python 3.7 is. Either way:\r\n```\r\ntasks:\r\n Arch Linux ISOs:\r\n rss: https://www.archlinux.org/feeds/releases/\r\n accept_all: yes\r\n transmission: SECRET CONFIG\r\n\r\nschedules:\r\n - tasks: '*'\r\n interval:\r\n hours: 1\r\n```\r\n \r\n#### Log:\r\n```\r\nTraceback (most recent call last):\r\n File \"/var/lib/flexget/.local/bin/flexget\", line 7, in <module>\r\n from flexget import main\r\n File \"/var/lib/flexget/.local/lib/python3.7/site-packages/flexget/__init__.py\", line 12, in <module> \r\n from flexget.manager import Manager\r\n File \"/var/lib/flexget/.local/lib/python3.7/site-packages/flexget/manager.py\", line 38, in <module> \r\n from flexget.ipc import IPCClient, IPCServer # noqa\r\n File \"/var/lib/flexget/.local/lib/python3.7/site-packages/flexget/ipc.py\", line 10, in <module> \r\n import rpyc\r\n File \"/var/lib/flexget/.local/lib/python3.7/site-packages/rpyc/__init__.py\", line 50\r\n from rpyc.utils.helpers import async, timed, buffiter, BgServingThread, restricted\r\n ^\r\nSyntaxError: invalid syntax\r\n```\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 2.14.15\r\n- Python version: 3.7.0\r\n- Installation method: `pip install --user`\r\n- Using daemon (yes/no): no\r\n- OS and version: Arch Linux ARM\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). Note that FlexGet is not supported for use with Python v3.0, 3.1, 3.2 or 3.6.\r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport random\nimport string\nimport sys\nimport threading\n\nimport rpyc\nfrom rpyc.utils.server import ThreadedServer\nfrom terminaltables.terminal_io import terminal_size\n\nfrom flexget import terminal\nfrom flexget.logger import capture_output\nfrom flexget.terminal import console\nfrom flexget.options import get_parser\n\nlog = logging.getLogger('ipc')\n\n# Allow some attributes from dict interface to be called over the wire\nrpyc.core.protocol.DEFAULT_CONFIG['safe_attrs'].update(['items'])\nrpyc.core.protocol.DEFAULT_CONFIG['allow_pickle'] = True\n\nIPC_VERSION = 4\nAUTH_ERROR = b'authentication error'\nAUTH_SUCCESS = b'authentication success'\n\n\nclass RemoteStream(object):\n \"\"\"\n Used as a filelike to stream text to remote client. If client disconnects while this is in use, an error will be\n logged, but no exception raised.\n \"\"\"\n\n def __init__(self, writer):\n \"\"\"\n :param writer: A function which writes a line of text to remote client.\n \"\"\"\n self.buffer = ''\n self.writer = writer\n\n def write(self, text):\n self.buffer += text\n if '\\n' in self.buffer:\n self.flush()\n\n def flush(self):\n if self.buffer is None or self.writer is None:\n return\n try:\n self.writer(self.buffer, end='')\n except EOFError:\n self.writer = None\n log.error('Client ended connection while still streaming output.')\n finally:\n self.buffer = ''\n\n\nclass DaemonService(rpyc.Service):\n # This will be populated when the server is started\n manager = None\n\n def exposed_version(self):\n return IPC_VERSION\n\n def exposed_handle_cli(self, args):\n args = rpyc.utils.classic.obtain(args)\n log.verbose('Running command `%s` for client.' % ' '.join(args))\n parser = get_parser()\n try:\n options = parser.parse_args(args, file=self.client_out_stream)\n except SystemExit as e:\n if e.code:\n # TODO: Not sure how to properly propagate the exit code back to client\n log.debug('Parsing cli args caused system exit with status %s.' % e.code)\n return\n # Saving original terminal size to restore after monkeypatch\n original_terminal_info = terminal.terminal_info\n # Monkeypatching terminal_size so it'll work using IPC\n terminal.terminal_info = self._conn.root.terminal_info\n try:\n if not options.cron:\n with capture_output(self.client_out_stream, loglevel=options.loglevel):\n self.manager.handle_cli(options)\n else:\n self.manager.handle_cli(options)\n finally:\n # Restoring original terminal_size value\n terminal.terminal_info = original_terminal_info\n\n def client_console(self, text):\n self._conn.root.console(text)\n\n @property\n def client_out_stream(self):\n return RemoteStream(self._conn.root.console)\n\n\nclass ClientService(rpyc.Service):\n def on_connect(self):\n \"\"\"Make sure the client version matches our own.\"\"\"\n daemon_version = self._conn.root.version()\n if IPC_VERSION != daemon_version:\n self._conn.close()\n raise ValueError('Daemon is different version than client.')\n\n def exposed_version(self):\n return IPC_VERSION\n\n def exposed_console(self, text, *args, **kwargs):\n console(text, *args, **kwargs)\n\n def exposed_terminal_info(self):\n return {'size': terminal_size(), 'isatty': sys.stdout.isatty()}\n\n\nclass IPCServer(threading.Thread):\n def __init__(self, manager, port=None):\n super(IPCServer, self).__init__(name='ipc_server')\n self.daemon = True\n self.manager = manager\n self.host = '127.0.0.1'\n self.port = port or 0\n self.password = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(15))\n self.server = None\n\n def authenticator(self, sock):\n channel = rpyc.Channel(rpyc.SocketStream(sock))\n password = channel.recv().decode('utf-8')\n if password != self.password:\n channel.send(AUTH_ERROR)\n raise rpyc.utils.authenticators.AuthenticationError('Invalid password from client.')\n channel.send(AUTH_SUCCESS)\n return sock, self.password\n\n def run(self):\n # Make the rpyc logger a bit quieter when we aren't in debugging.\n rpyc_logger = logging.getLogger('ipc.rpyc')\n if logging.getLogger().getEffectiveLevel() > logging.DEBUG:\n rpyc_logger.setLevel(logging.WARNING)\n DaemonService.manager = self.manager\n self.server = ThreadedServer(\n DaemonService, hostname=self.host, port=self.port, authenticator=self.authenticator, logger=rpyc_logger\n )\n # If we just chose an open port, write save the chosen one\n self.port = self.server.listener.getsockname()[1]\n self.manager.write_lock(ipc_info={'port': self.port, 'password': self.password})\n self.server.start()\n\n def shutdown(self):\n if self.server:\n self.server.close()\n\n\nclass IPCClient(object):\n def __init__(self, port, password):\n channel = rpyc.Channel(rpyc.SocketStream.connect('127.0.0.1', port))\n channel.send(password.encode('utf-8'))\n response = channel.recv()\n if response == AUTH_ERROR:\n # TODO: What to raise here. I guess we create a custom error\n raise ValueError('Invalid password for daemon')\n self.conn = rpyc.utils.factory.connect_channel(channel, service=ClientService)\n\n def close(self):\n self.conn.close()\n\n def __getattr__(self, item):\n \"\"\"Proxy all other calls to the exposed daemon service.\"\"\"\n return getattr(self.conn.root, item)\n", "path": "flexget/ipc.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\nimport random\nimport string\nimport sys\nimport threading\n\nimport rpyc\nfrom rpyc.utils.server import ThreadedServer\nfrom terminaltables.terminal_io import terminal_size\n\nfrom flexget import terminal\nfrom flexget.logger import capture_output\nfrom flexget.terminal import console\nfrom flexget.options import get_parser\n\nlog = logging.getLogger('ipc')\n\n# Allow some attributes from dict interface to be called over the wire\nrpyc.core.protocol.DEFAULT_CONFIG['safe_attrs'].update(['items'])\nrpyc.core.protocol.DEFAULT_CONFIG['allow_pickle'] = True\n\nIPC_VERSION = 4\nAUTH_ERROR = b'authentication error'\nAUTH_SUCCESS = b'authentication success'\n\n\nclass RemoteStream(object):\n \"\"\"\n Used as a filelike to stream text to remote client. If client disconnects while this is in use, an error will be\n logged, but no exception raised.\n \"\"\"\n\n def __init__(self, writer):\n \"\"\"\n :param writer: A function which writes a line of text to remote client.\n \"\"\"\n self.buffer = ''\n self.writer = writer\n\n def write(self, text):\n self.buffer += text\n if '\\n' in self.buffer:\n self.flush()\n\n def flush(self):\n if self.buffer is None or self.writer is None:\n return\n try:\n self.writer(self.buffer, end='')\n except EOFError:\n self.writer = None\n log.error('Client ended connection while still streaming output.')\n finally:\n self.buffer = ''\n\n\nclass DaemonService(rpyc.Service):\n # This will be populated when the server is started\n manager = None\n\n def on_connect(self, conn):\n self._conn = conn\n super(DaemonService, self).on_connect(conn)\n\n def exposed_version(self):\n return IPC_VERSION\n\n def exposed_handle_cli(self, args):\n args = rpyc.utils.classic.obtain(args)\n log.verbose('Running command `%s` for client.' % ' '.join(args))\n parser = get_parser()\n try:\n options = parser.parse_args(args, file=self.client_out_stream)\n except SystemExit as e:\n if e.code:\n # TODO: Not sure how to properly propagate the exit code back to client\n log.debug('Parsing cli args caused system exit with status %s.' % e.code)\n return\n # Saving original terminal size to restore after monkeypatch\n original_terminal_info = terminal.terminal_info\n # Monkeypatching terminal_size so it'll work using IPC\n terminal.terminal_info = self._conn.root.terminal_info\n try:\n if not options.cron:\n with capture_output(self.client_out_stream, loglevel=options.loglevel):\n self.manager.handle_cli(options)\n else:\n self.manager.handle_cli(options)\n finally:\n # Restoring original terminal_size value\n terminal.terminal_info = original_terminal_info\n\n def client_console(self, text):\n self._conn.root.console(text)\n\n @property\n def client_out_stream(self):\n return RemoteStream(self._conn.root.console)\n\n\nclass ClientService(rpyc.Service):\n def on_connect(self, conn):\n self._conn = conn\n \"\"\"Make sure the client version matches our own.\"\"\"\n daemon_version = self._conn.root.version()\n if IPC_VERSION != daemon_version:\n self._conn.close()\n raise ValueError('Daemon is different version than client.')\n super(ClientService, self).on_connect(conn)\n\n def exposed_version(self):\n return IPC_VERSION\n\n def exposed_console(self, text, *args, **kwargs):\n console(text, *args, **kwargs)\n\n def exposed_terminal_info(self):\n return {'size': terminal_size(), 'isatty': sys.stdout.isatty()}\n\n\nclass IPCServer(threading.Thread):\n def __init__(self, manager, port=None):\n super(IPCServer, self).__init__(name='ipc_server')\n self.daemon = True\n self.manager = manager\n self.host = '127.0.0.1'\n self.port = port or 0\n self.password = ''.join(random.choice(string.ascii_letters + string.digits) for x in range(15))\n self.server = None\n\n def authenticator(self, sock):\n channel = rpyc.Channel(rpyc.SocketStream(sock))\n password = channel.recv().decode('utf-8')\n if password != self.password:\n channel.send(AUTH_ERROR)\n raise rpyc.utils.authenticators.AuthenticationError('Invalid password from client.')\n channel.send(AUTH_SUCCESS)\n return sock, self.password\n\n def run(self):\n # Make the rpyc logger a bit quieter when we aren't in debugging.\n rpyc_logger = logging.getLogger('ipc.rpyc')\n if logging.getLogger().getEffectiveLevel() > logging.DEBUG:\n rpyc_logger.setLevel(logging.WARNING)\n DaemonService.manager = self.manager\n self.server = ThreadedServer(\n DaemonService, hostname=self.host, port=self.port, authenticator=self.authenticator, logger=rpyc_logger\n )\n # If we just chose an open port, write save the chosen one\n self.port = self.server.listener.getsockname()[1]\n self.manager.write_lock(ipc_info={'port': self.port, 'password': self.password})\n self.server.start()\n\n def shutdown(self):\n if self.server:\n self.server.close()\n\n\nclass IPCClient(object):\n def __init__(self, port, password):\n channel = rpyc.Channel(rpyc.SocketStream.connect('127.0.0.1', port))\n channel.send(password.encode('utf-8'))\n response = channel.recv()\n if response == AUTH_ERROR:\n # TODO: What to raise here. I guess we create a custom error\n raise ValueError('Invalid password for daemon')\n self.conn = rpyc.utils.factory.connect_channel(channel, service=ClientService)\n\n def close(self):\n self.conn.close()\n\n def __getattr__(self, item):\n \"\"\"Proxy all other calls to the exposed daemon service.\"\"\"\n return getattr(self.conn.root, item)\n", "path": "flexget/ipc.py"}]} | 2,995 | 227 |
gh_patches_debug_23710 | rasdani/github-patches | git_diff | mindsdb__mindsdb-2704 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Endpoint to return handler's icons
At the moment we return icons for handlers by general `GET /handlers` route. Icons are return in svg or base64, which is not effective. We need new endpoint to return handler icon:
`GET /handlers/{name}/icon/{icon_file_name}`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/api/http/namespaces/handlers.py`
Content:
```
1 from flask import request
2 from flask_restx import Resource
3
4 from mindsdb.api.http.utils import http_error
5 from mindsdb.api.http.namespaces.configs.handlers import ns_conf
6 from mindsdb.integrations.utilities.install import install_dependencies
7
8
9 @ns_conf.route('/')
10 class HandlersList(Resource):
11 @ns_conf.doc('handlers_list')
12 def get(self):
13 '''List all db handlers'''
14 handlers = request.integration_controller.get_handlers_import_status()
15 result = []
16 for handler_type, handler_meta in handlers.items():
17 row = {'name': handler_type}
18 row.update(handler_meta)
19 result.append(row)
20 return result
21
22
23 @ns_conf.route('/<handler_name>/install')
24 class InstallDependencies(Resource):
25 @ns_conf.param('handler_name', 'Handler name')
26 def post(self, handler_name):
27 handler_import_status = request.integration_controller.get_handlers_import_status()
28 if handler_name not in handler_import_status:
29 return f'Unkown handler: {handler_name}', 400
30
31 if handler_import_status[handler_name].get('import', {}).get('success', False) is True:
32 return 'Installed', 200
33
34 handler_meta = handler_import_status[handler_name]
35
36 dependencies = handler_meta['import']['dependencies']
37 if len(dependencies) == 0:
38 return 'Installed', 200
39
40 result = install_dependencies(dependencies)
41
42 # reload it if any result, so we can get new error message
43 request.integration_controller.reload_handler_module(handler_name)
44 if result.get('success') is True:
45 return '', 200
46 return http_error(
47 500,
48 'Failed to install dependency',
49 result.get('error_message', 'unknown error')
50 )
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/api/http/namespaces/handlers.py b/mindsdb/api/http/namespaces/handlers.py
--- a/mindsdb/api/http/namespaces/handlers.py
+++ b/mindsdb/api/http/namespaces/handlers.py
@@ -1,4 +1,8 @@
-from flask import request
+import os
+import importlib
+from pathlib import Path
+
+from flask import request, send_file, abort
from flask_restx import Resource
from mindsdb.api.http.utils import http_error
@@ -20,6 +24,24 @@
return result
+@ns_conf.route('/<handler_name>/icon')
+class HandlerIcon(Resource):
+ @ns_conf.param('handler_name', 'Handler name')
+ def get(self, handler_name):
+ try:
+ handlers_import_status = request.integration_controller.get_handlers_import_status()
+ icon_name = handlers_import_status[handler_name]['icon']['name']
+ handler_folder = handlers_import_status[handler_name]['import']['folder']
+ mindsdb_path = Path(importlib.util.find_spec('mindsdb').origin).parent
+ icon_path = mindsdb_path.joinpath('integrations/handlers').joinpath(handler_folder).joinpath(icon_name)
+ if icon_path.is_absolute() is False:
+ icon_path = Path(os.getcwd()).joinpath(icon_path)
+ except Exception:
+ return abort(404)
+ else:
+ return send_file(icon_path)
+
+
@ns_conf.route('/<handler_name>/install')
class InstallDependencies(Resource):
@ns_conf.param('handler_name', 'Handler name')
| {"golden_diff": "diff --git a/mindsdb/api/http/namespaces/handlers.py b/mindsdb/api/http/namespaces/handlers.py\n--- a/mindsdb/api/http/namespaces/handlers.py\n+++ b/mindsdb/api/http/namespaces/handlers.py\n@@ -1,4 +1,8 @@\n-from flask import request\n+import os\n+import importlib\n+from pathlib import Path\n+\n+from flask import request, send_file, abort\n from flask_restx import Resource\n \n from mindsdb.api.http.utils import http_error\n@@ -20,6 +24,24 @@\n return result\n \n \n+@ns_conf.route('/<handler_name>/icon')\n+class HandlerIcon(Resource):\n+ @ns_conf.param('handler_name', 'Handler name')\n+ def get(self, handler_name):\n+ try:\n+ handlers_import_status = request.integration_controller.get_handlers_import_status()\n+ icon_name = handlers_import_status[handler_name]['icon']['name']\n+ handler_folder = handlers_import_status[handler_name]['import']['folder']\n+ mindsdb_path = Path(importlib.util.find_spec('mindsdb').origin).parent\n+ icon_path = mindsdb_path.joinpath('integrations/handlers').joinpath(handler_folder).joinpath(icon_name)\n+ if icon_path.is_absolute() is False:\n+ icon_path = Path(os.getcwd()).joinpath(icon_path)\n+ except Exception:\n+ return abort(404)\n+ else:\n+ return send_file(icon_path)\n+\n+\n @ns_conf.route('/<handler_name>/install')\n class InstallDependencies(Resource):\n @ns_conf.param('handler_name', 'Handler name')\n", "issue": "Endpoint to return handler's icons\nAt the moment we return icons for handlers by general `GET /handlers` route. Icons are return in svg or base64, which is not effective. We need new endpoint to return handler icon:\r\n`GET /handlers/{name}/icon/{icon_file_name}`\r\n\n", "before_files": [{"content": "from flask import request\nfrom flask_restx import Resource\n\nfrom mindsdb.api.http.utils import http_error\nfrom mindsdb.api.http.namespaces.configs.handlers import ns_conf\nfrom mindsdb.integrations.utilities.install import install_dependencies\n\n\n@ns_conf.route('/')\nclass HandlersList(Resource):\n @ns_conf.doc('handlers_list')\n def get(self):\n '''List all db handlers'''\n handlers = request.integration_controller.get_handlers_import_status()\n result = []\n for handler_type, handler_meta in handlers.items():\n row = {'name': handler_type}\n row.update(handler_meta)\n result.append(row)\n return result\n\n\n@ns_conf.route('/<handler_name>/install')\nclass InstallDependencies(Resource):\n @ns_conf.param('handler_name', 'Handler name')\n def post(self, handler_name):\n handler_import_status = request.integration_controller.get_handlers_import_status()\n if handler_name not in handler_import_status:\n return f'Unkown handler: {handler_name}', 400\n\n if handler_import_status[handler_name].get('import', {}).get('success', False) is True:\n return 'Installed', 200\n\n handler_meta = handler_import_status[handler_name]\n\n dependencies = handler_meta['import']['dependencies']\n if len(dependencies) == 0:\n return 'Installed', 200\n\n result = install_dependencies(dependencies)\n\n # reload it if any result, so we can get new error message\n request.integration_controller.reload_handler_module(handler_name)\n if result.get('success') is True:\n return '', 200\n return http_error(\n 500,\n 'Failed to install dependency',\n result.get('error_message', 'unknown error')\n )\n", "path": "mindsdb/api/http/namespaces/handlers.py"}], "after_files": [{"content": "import os\nimport importlib\nfrom pathlib import Path\n\nfrom flask import request, send_file, abort\nfrom flask_restx import Resource\n\nfrom mindsdb.api.http.utils import http_error\nfrom mindsdb.api.http.namespaces.configs.handlers import ns_conf\nfrom mindsdb.integrations.utilities.install import install_dependencies\n\n\n@ns_conf.route('/')\nclass HandlersList(Resource):\n @ns_conf.doc('handlers_list')\n def get(self):\n '''List all db handlers'''\n handlers = request.integration_controller.get_handlers_import_status()\n result = []\n for handler_type, handler_meta in handlers.items():\n row = {'name': handler_type}\n row.update(handler_meta)\n result.append(row)\n return result\n\n\n@ns_conf.route('/<handler_name>/icon')\nclass HandlerIcon(Resource):\n @ns_conf.param('handler_name', 'Handler name')\n def get(self, handler_name):\n try:\n handlers_import_status = request.integration_controller.get_handlers_import_status()\n icon_name = handlers_import_status[handler_name]['icon']['name']\n handler_folder = handlers_import_status[handler_name]['import']['folder']\n mindsdb_path = Path(importlib.util.find_spec('mindsdb').origin).parent\n icon_path = mindsdb_path.joinpath('integrations/handlers').joinpath(handler_folder).joinpath(icon_name)\n if icon_path.is_absolute() is False:\n icon_path = Path(os.getcwd()).joinpath(icon_path)\n except Exception:\n return abort(404)\n else:\n return send_file(icon_path)\n\n\n@ns_conf.route('/<handler_name>/install')\nclass InstallDependencies(Resource):\n @ns_conf.param('handler_name', 'Handler name')\n def post(self, handler_name):\n handler_import_status = request.integration_controller.get_handlers_import_status()\n if handler_name not in handler_import_status:\n return f'Unkown handler: {handler_name}', 400\n\n if handler_import_status[handler_name].get('import', {}).get('success', False) is True:\n return 'Installed', 200\n\n handler_meta = handler_import_status[handler_name]\n\n dependencies = handler_meta['import']['dependencies']\n if len(dependencies) == 0:\n return 'Installed', 200\n\n result = install_dependencies(dependencies)\n\n # reload it if any result, so we can get new error message\n request.integration_controller.reload_handler_module(handler_name)\n if result.get('success') is True:\n return '', 200\n return http_error(\n 500,\n 'Failed to install dependency',\n result.get('error_message', 'unknown error')\n )\n", "path": "mindsdb/api/http/namespaces/handlers.py"}]} | 796 | 358 |
gh_patches_debug_34423 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-34 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Subscriptions are returning data not wrapped into a field
It should return:
```json
{
"data": {
"time": 1
}
}
```

Repro:
```python
from __future__ import annotations
import asyncio
import strawberry
@strawberry.type
class Query:
hello: str = "demo"
@strawberry.type
class Subscription:
@strawberry.subscription
async def time(self, info) -> int:
i = 0
while True:
yield i
i += 1
await asyncio.sleep(1)
schema = strawberry.Schema(
query=Query, subscription=Subscription
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/field.py`
Content:
```
1 from typing import get_type_hints
2
3 from graphql import GraphQLField
4
5 from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT
6 from .exceptions import MissingArgumentsAnnotationsError, MissingReturnAnnotationError
7 from .type_converter import get_graphql_type_for_annotation
8 from .utils.dict_to_type import dict_to_type
9 from .utils.inspect import get_func_args
10
11
12 def field(wrap, *, is_subscription=False):
13 setattr(wrap, IS_STRAWBERRY_FIELD, True)
14 annotations = get_type_hints(wrap)
15
16 name = wrap.__name__
17
18 if "return" not in annotations:
19 raise MissingReturnAnnotationError(name)
20
21 field_type = get_graphql_type_for_annotation(annotations["return"], name)
22
23 function_arguments = set(get_func_args(wrap)) - {"self", "info"}
24
25 arguments_annotations = {
26 key: value
27 for key, value in annotations.items()
28 if key not in ["info", "return"]
29 }
30
31 annotated_function_arguments = set(arguments_annotations.keys())
32 arguments_missing_annotations = function_arguments - annotated_function_arguments
33
34 if len(arguments_missing_annotations) > 0:
35 raise MissingArgumentsAnnotationsError(name, arguments_missing_annotations)
36
37 arguments = {
38 name: get_graphql_type_for_annotation(annotation, name)
39 for name, annotation in arguments_annotations.items()
40 }
41
42 def convert_args(args):
43 converted_args = {}
44
45 for key, value in args.items():
46 if getattr(arguments_annotations[key], IS_STRAWBERRY_INPUT):
47 converted_args[key] = dict_to_type(value, arguments_annotations[key])
48 else:
49 converted_args[key] = value
50
51 return converted_args
52
53 def resolver(source, info, **args):
54 args = convert_args(args)
55
56 return wrap(source, info, **args)
57
58 if is_subscription:
59 kwargs = {"subscribe": resolver}
60 else:
61 kwargs = {"resolve": resolver}
62
63 wrap.field = GraphQLField(field_type, args=arguments, **kwargs)
64 return wrap
65
```
Path: `strawberry/type_converter.py`
Content:
```
1 from graphql import (
2 GraphQLBoolean,
3 GraphQLFloat,
4 GraphQLID,
5 GraphQLInt,
6 GraphQLList,
7 GraphQLNonNull,
8 GraphQLString,
9 GraphQLUnionType,
10 )
11
12 from .scalars import ID
13
14
15 REGISTRY = {
16 str: GraphQLString,
17 int: GraphQLInt,
18 float: GraphQLFloat,
19 bool: GraphQLBoolean,
20 ID: GraphQLID,
21 }
22
23
24 # TODO: make so that we don't pass force optional
25 # we use that when trying to get the type for a
26 # option field (which can either be a scalar or an object type)
27 def get_graphql_type_for_annotation(
28 annotation, field_name: str, force_optional: bool = False
29 ):
30 # TODO: this might lead to issues with types that have a field value
31 is_optional = force_optional
32
33 if hasattr(annotation, "field"):
34 graphql_type = annotation.field
35 else:
36 annotation_name = getattr(annotation, "_name", None)
37
38 if annotation_name == "List":
39 list_of_type = get_graphql_type_for_annotation(
40 annotation.__args__[0], field_name
41 )
42
43 return GraphQLList(list_of_type)
44
45 # for some reason _name is None for Optional and Union types, so we check if we
46 # have __args__ populated, there might be some edge cases where __args__ is
47 # populated but the type is not an Union, like in the above case with Lists
48 if hasattr(annotation, "__args__"):
49 types = annotation.__args__
50 non_none_types = [x for x in types if x != None.__class__] # noqa:E721
51
52 # optionals are represented as Union[type, None]
53 if len(non_none_types) == 1:
54 is_optional = True
55 graphql_type = get_graphql_type_for_annotation(
56 non_none_types[0], field_name, force_optional=True
57 )
58 else:
59 is_optional = None.__class__ in types
60
61 # TODO: union types don't work with scalar types
62 # so we want to return a nice error
63 # also we want to make sure we have been passed
64 # strawberry types
65 graphql_type = GraphQLUnionType(
66 field_name, [type.field for type in types]
67 )
68 else:
69 graphql_type = REGISTRY.get(annotation)
70
71 if not graphql_type:
72 raise ValueError(f"Unable to get GraphQL type for {annotation}")
73
74 if is_optional:
75 return graphql_type
76
77 return GraphQLNonNull(graphql_type)
78
```
Path: `strawberry/contrib/starlette/app/graphql_ws_app.py`
Content:
```
1 import functools
2 import typing
3
4 # from graphql.error import GraphQLError, format_error as format_graphql_error
5 from graphql.language import parse
6 from graphql.subscription import subscribe
7 from starlette.types import ASGIInstance, Receive, Scope, Send
8 from starlette.websockets import WebSocket
9
10 from .base import BaseApp
11
12
13 class GraphQLSubscriptionApp(BaseApp):
14 def __call__(self, scope: Scope) -> ASGIInstance:
15 return functools.partial(self.asgi, scope=scope)
16
17 async def execute(self, query, variables=None, context=None, operation_name=None):
18 return await subscribe(
19 self.schema,
20 parse(query),
21 variable_values=variables,
22 operation_name=operation_name,
23 context_value=context,
24 )
25
26 async def _send_message(
27 self,
28 websocket: WebSocket,
29 type_: str,
30 payload: typing.Any = None,
31 id_: str = None,
32 ) -> None:
33 data = {"type": type_}
34
35 if id_ is not None:
36 data["id"] = id_
37
38 if payload is not None:
39 data["payload"] = payload
40
41 return await websocket.send_json(data)
42
43 async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:
44 assert scope["type"] == "websocket"
45
46 websocket = WebSocket(scope, receive=receive, send=send)
47 await websocket.accept(subprotocol="graphql-ws")
48 await self._send_message(websocket, "connection_ack")
49
50 # TODO: we should check that this is a proper connection init message
51 await websocket.receive_json()
52 data = await websocket.receive_json()
53
54 id_ = data.get("id", "1")
55 payload = data.get("payload", {})
56
57 data = await self.execute(
58 payload["query"],
59 payload["variables"],
60 operation_name=payload["operationName"],
61 )
62
63 async for result in data.iterator:
64 await self._send_message(websocket, "data", {"data": result}, id_)
65
66 await self._send_message(websocket, "complete")
67 await websocket.close()
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/contrib/starlette/app/graphql_ws_app.py b/strawberry/contrib/starlette/app/graphql_ws_app.py
--- a/strawberry/contrib/starlette/app/graphql_ws_app.py
+++ b/strawberry/contrib/starlette/app/graphql_ws_app.py
@@ -60,8 +60,10 @@
operation_name=payload["operationName"],
)
- async for result in data.iterator:
- await self._send_message(websocket, "data", {"data": result}, id_)
+ async for result in data:
+ # TODO: send errors if any
+
+ await self._send_message(websocket, "data", {"data": result.data}, id_)
await self._send_message(websocket, "complete")
await websocket.close()
diff --git a/strawberry/field.py b/strawberry/field.py
--- a/strawberry/field.py
+++ b/strawberry/field.py
@@ -56,7 +56,11 @@
return wrap(source, info, **args)
if is_subscription:
- kwargs = {"subscribe": resolver}
+
+ def _resolve(event, info):
+ return event
+
+ kwargs = {"subscribe": resolver, "resolve": _resolve}
else:
kwargs = {"resolve": resolver}
diff --git a/strawberry/type_converter.py b/strawberry/type_converter.py
--- a/strawberry/type_converter.py
+++ b/strawberry/type_converter.py
@@ -1,3 +1,5 @@
+from collections.abc import AsyncGenerator
+
from graphql import (
GraphQLBoolean,
GraphQLFloat,
@@ -42,10 +44,17 @@
return GraphQLList(list_of_type)
+ annotation_origin = getattr(annotation, "__origin__", None)
+
+ if annotation_origin == AsyncGenerator:
+ # async generators are used in subscription, we only need the yield type
+ # https://docs.python.org/3/library/typing.html#typing.AsyncGenerator
+ return get_graphql_type_for_annotation(annotation.__args__[0], field_name)
+
# for some reason _name is None for Optional and Union types, so we check if we
# have __args__ populated, there might be some edge cases where __args__ is
# populated but the type is not an Union, like in the above case with Lists
- if hasattr(annotation, "__args__"):
+ elif hasattr(annotation, "__args__"):
types = annotation.__args__
non_none_types = [x for x in types if x != None.__class__] # noqa:E721
| {"golden_diff": "diff --git a/strawberry/contrib/starlette/app/graphql_ws_app.py b/strawberry/contrib/starlette/app/graphql_ws_app.py\n--- a/strawberry/contrib/starlette/app/graphql_ws_app.py\n+++ b/strawberry/contrib/starlette/app/graphql_ws_app.py\n@@ -60,8 +60,10 @@\n operation_name=payload[\"operationName\"],\n )\n \n- async for result in data.iterator:\n- await self._send_message(websocket, \"data\", {\"data\": result}, id_)\n+ async for result in data:\n+ # TODO: send errors if any\n+\n+ await self._send_message(websocket, \"data\", {\"data\": result.data}, id_)\n \n await self._send_message(websocket, \"complete\")\n await websocket.close()\ndiff --git a/strawberry/field.py b/strawberry/field.py\n--- a/strawberry/field.py\n+++ b/strawberry/field.py\n@@ -56,7 +56,11 @@\n return wrap(source, info, **args)\n \n if is_subscription:\n- kwargs = {\"subscribe\": resolver}\n+\n+ def _resolve(event, info):\n+ return event\n+\n+ kwargs = {\"subscribe\": resolver, \"resolve\": _resolve}\n else:\n kwargs = {\"resolve\": resolver}\n \ndiff --git a/strawberry/type_converter.py b/strawberry/type_converter.py\n--- a/strawberry/type_converter.py\n+++ b/strawberry/type_converter.py\n@@ -1,3 +1,5 @@\n+from collections.abc import AsyncGenerator\n+\n from graphql import (\n GraphQLBoolean,\n GraphQLFloat,\n@@ -42,10 +44,17 @@\n \n return GraphQLList(list_of_type)\n \n+ annotation_origin = getattr(annotation, \"__origin__\", None)\n+\n+ if annotation_origin == AsyncGenerator:\n+ # async generators are used in subscription, we only need the yield type\n+ # https://docs.python.org/3/library/typing.html#typing.AsyncGenerator\n+ return get_graphql_type_for_annotation(annotation.__args__[0], field_name)\n+\n # for some reason _name is None for Optional and Union types, so we check if we\n # have __args__ populated, there might be some edge cases where __args__ is\n # populated but the type is not an Union, like in the above case with Lists\n- if hasattr(annotation, \"__args__\"):\n+ elif hasattr(annotation, \"__args__\"):\n types = annotation.__args__\n non_none_types = [x for x in types if x != None.__class__] # noqa:E721\n", "issue": "Subscriptions are returning data not wrapped into a field\nIt should return:\r\n\r\n```json\r\n{\r\n \"data\": {\r\n \"time\": 1\r\n }\r\n}\r\n```\r\n\r\n\r\n\r\nRepro:\r\n\r\n```python\r\nfrom __future__ import annotations\r\n\r\n\r\nimport asyncio\r\nimport strawberry\r\n\r\[email protected]\r\nclass Query:\r\n hello: str = \"demo\"\r\n\r\[email protected]\r\nclass Subscription:\r\n @strawberry.subscription\r\n async def time(self, info) -> int:\r\n i = 0\r\n while True:\r\n yield i\r\n i += 1\r\n\r\n await asyncio.sleep(1)\r\n\r\n\r\nschema = strawberry.Schema(\r\n query=Query, subscription=Subscription\r\n)\r\n```\r\n\n", "before_files": [{"content": "from typing import get_type_hints\n\nfrom graphql import GraphQLField\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT\nfrom .exceptions import MissingArgumentsAnnotationsError, MissingReturnAnnotationError\nfrom .type_converter import get_graphql_type_for_annotation\nfrom .utils.dict_to_type import dict_to_type\nfrom .utils.inspect import get_func_args\n\n\ndef field(wrap, *, is_subscription=False):\n setattr(wrap, IS_STRAWBERRY_FIELD, True)\n annotations = get_type_hints(wrap)\n\n name = wrap.__name__\n\n if \"return\" not in annotations:\n raise MissingReturnAnnotationError(name)\n\n field_type = get_graphql_type_for_annotation(annotations[\"return\"], name)\n\n function_arguments = set(get_func_args(wrap)) - {\"self\", \"info\"}\n\n arguments_annotations = {\n key: value\n for key, value in annotations.items()\n if key not in [\"info\", \"return\"]\n }\n\n annotated_function_arguments = set(arguments_annotations.keys())\n arguments_missing_annotations = function_arguments - annotated_function_arguments\n\n if len(arguments_missing_annotations) > 0:\n raise MissingArgumentsAnnotationsError(name, arguments_missing_annotations)\n\n arguments = {\n name: get_graphql_type_for_annotation(annotation, name)\n for name, annotation in arguments_annotations.items()\n }\n\n def convert_args(args):\n converted_args = {}\n\n for key, value in args.items():\n if getattr(arguments_annotations[key], IS_STRAWBERRY_INPUT):\n converted_args[key] = dict_to_type(value, arguments_annotations[key])\n else:\n converted_args[key] = value\n\n return converted_args\n\n def resolver(source, info, **args):\n args = convert_args(args)\n\n return wrap(source, info, **args)\n\n if is_subscription:\n kwargs = {\"subscribe\": resolver}\n else:\n kwargs = {\"resolve\": resolver}\n\n wrap.field = GraphQLField(field_type, args=arguments, **kwargs)\n return wrap\n", "path": "strawberry/field.py"}, {"content": "from graphql import (\n GraphQLBoolean,\n GraphQLFloat,\n GraphQLID,\n GraphQLInt,\n GraphQLList,\n GraphQLNonNull,\n GraphQLString,\n GraphQLUnionType,\n)\n\nfrom .scalars import ID\n\n\nREGISTRY = {\n str: GraphQLString,\n int: GraphQLInt,\n float: GraphQLFloat,\n bool: GraphQLBoolean,\n ID: GraphQLID,\n}\n\n\n# TODO: make so that we don't pass force optional\n# we use that when trying to get the type for a\n# option field (which can either be a scalar or an object type)\ndef get_graphql_type_for_annotation(\n annotation, field_name: str, force_optional: bool = False\n):\n # TODO: this might lead to issues with types that have a field value\n is_optional = force_optional\n\n if hasattr(annotation, \"field\"):\n graphql_type = annotation.field\n else:\n annotation_name = getattr(annotation, \"_name\", None)\n\n if annotation_name == \"List\":\n list_of_type = get_graphql_type_for_annotation(\n annotation.__args__[0], field_name\n )\n\n return GraphQLList(list_of_type)\n\n # for some reason _name is None for Optional and Union types, so we check if we\n # have __args__ populated, there might be some edge cases where __args__ is\n # populated but the type is not an Union, like in the above case with Lists\n if hasattr(annotation, \"__args__\"):\n types = annotation.__args__\n non_none_types = [x for x in types if x != None.__class__] # noqa:E721\n\n # optionals are represented as Union[type, None]\n if len(non_none_types) == 1:\n is_optional = True\n graphql_type = get_graphql_type_for_annotation(\n non_none_types[0], field_name, force_optional=True\n )\n else:\n is_optional = None.__class__ in types\n\n # TODO: union types don't work with scalar types\n # so we want to return a nice error\n # also we want to make sure we have been passed\n # strawberry types\n graphql_type = GraphQLUnionType(\n field_name, [type.field for type in types]\n )\n else:\n graphql_type = REGISTRY.get(annotation)\n\n if not graphql_type:\n raise ValueError(f\"Unable to get GraphQL type for {annotation}\")\n\n if is_optional:\n return graphql_type\n\n return GraphQLNonNull(graphql_type)\n", "path": "strawberry/type_converter.py"}, {"content": "import functools\nimport typing\n\n# from graphql.error import GraphQLError, format_error as format_graphql_error\nfrom graphql.language import parse\nfrom graphql.subscription import subscribe\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\nfrom starlette.websockets import WebSocket\n\nfrom .base import BaseApp\n\n\nclass GraphQLSubscriptionApp(BaseApp):\n def __call__(self, scope: Scope) -> ASGIInstance:\n return functools.partial(self.asgi, scope=scope)\n\n async def execute(self, query, variables=None, context=None, operation_name=None):\n return await subscribe(\n self.schema,\n parse(query),\n variable_values=variables,\n operation_name=operation_name,\n context_value=context,\n )\n\n async def _send_message(\n self,\n websocket: WebSocket,\n type_: str,\n payload: typing.Any = None,\n id_: str = None,\n ) -> None:\n data = {\"type\": type_}\n\n if id_ is not None:\n data[\"id\"] = id_\n\n if payload is not None:\n data[\"payload\"] = payload\n\n return await websocket.send_json(data)\n\n async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:\n assert scope[\"type\"] == \"websocket\"\n\n websocket = WebSocket(scope, receive=receive, send=send)\n await websocket.accept(subprotocol=\"graphql-ws\")\n await self._send_message(websocket, \"connection_ack\")\n\n # TODO: we should check that this is a proper connection init message\n await websocket.receive_json()\n data = await websocket.receive_json()\n\n id_ = data.get(\"id\", \"1\")\n payload = data.get(\"payload\", {})\n\n data = await self.execute(\n payload[\"query\"],\n payload[\"variables\"],\n operation_name=payload[\"operationName\"],\n )\n\n async for result in data.iterator:\n await self._send_message(websocket, \"data\", {\"data\": result}, id_)\n\n await self._send_message(websocket, \"complete\")\n await websocket.close()\n", "path": "strawberry/contrib/starlette/app/graphql_ws_app.py"}], "after_files": [{"content": "from typing import get_type_hints\n\nfrom graphql import GraphQLField\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT\nfrom .exceptions import MissingArgumentsAnnotationsError, MissingReturnAnnotationError\nfrom .type_converter import get_graphql_type_for_annotation\nfrom .utils.dict_to_type import dict_to_type\nfrom .utils.inspect import get_func_args\n\n\ndef field(wrap, *, is_subscription=False):\n setattr(wrap, IS_STRAWBERRY_FIELD, True)\n annotations = get_type_hints(wrap)\n\n name = wrap.__name__\n\n if \"return\" not in annotations:\n raise MissingReturnAnnotationError(name)\n\n field_type = get_graphql_type_for_annotation(annotations[\"return\"], name)\n\n function_arguments = set(get_func_args(wrap)) - {\"self\", \"info\"}\n\n arguments_annotations = {\n key: value\n for key, value in annotations.items()\n if key not in [\"info\", \"return\"]\n }\n\n annotated_function_arguments = set(arguments_annotations.keys())\n arguments_missing_annotations = function_arguments - annotated_function_arguments\n\n if len(arguments_missing_annotations) > 0:\n raise MissingArgumentsAnnotationsError(name, arguments_missing_annotations)\n\n arguments = {\n name: get_graphql_type_for_annotation(annotation, name)\n for name, annotation in arguments_annotations.items()\n }\n\n def convert_args(args):\n converted_args = {}\n\n for key, value in args.items():\n if getattr(arguments_annotations[key], IS_STRAWBERRY_INPUT):\n converted_args[key] = dict_to_type(value, arguments_annotations[key])\n else:\n converted_args[key] = value\n\n return converted_args\n\n def resolver(source, info, **args):\n args = convert_args(args)\n\n return wrap(source, info, **args)\n\n if is_subscription:\n\n def _resolve(event, info):\n return event\n\n kwargs = {\"subscribe\": resolver, \"resolve\": _resolve}\n else:\n kwargs = {\"resolve\": resolver}\n\n wrap.field = GraphQLField(field_type, args=arguments, **kwargs)\n return wrap\n", "path": "strawberry/field.py"}, {"content": "from collections.abc import AsyncGenerator\n\nfrom graphql import (\n GraphQLBoolean,\n GraphQLFloat,\n GraphQLID,\n GraphQLInt,\n GraphQLList,\n GraphQLNonNull,\n GraphQLString,\n GraphQLUnionType,\n)\n\nfrom .scalars import ID\n\n\nREGISTRY = {\n str: GraphQLString,\n int: GraphQLInt,\n float: GraphQLFloat,\n bool: GraphQLBoolean,\n ID: GraphQLID,\n}\n\n\n# TODO: make so that we don't pass force optional\n# we use that when trying to get the type for a\n# option field (which can either be a scalar or an object type)\ndef get_graphql_type_for_annotation(\n annotation, field_name: str, force_optional: bool = False\n):\n # TODO: this might lead to issues with types that have a field value\n is_optional = force_optional\n\n if hasattr(annotation, \"field\"):\n graphql_type = annotation.field\n else:\n annotation_name = getattr(annotation, \"_name\", None)\n\n if annotation_name == \"List\":\n list_of_type = get_graphql_type_for_annotation(\n annotation.__args__[0], field_name\n )\n\n return GraphQLList(list_of_type)\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n if annotation_origin == AsyncGenerator:\n # async generators are used in subscription, we only need the yield type\n # https://docs.python.org/3/library/typing.html#typing.AsyncGenerator\n return get_graphql_type_for_annotation(annotation.__args__[0], field_name)\n\n # for some reason _name is None for Optional and Union types, so we check if we\n # have __args__ populated, there might be some edge cases where __args__ is\n # populated but the type is not an Union, like in the above case with Lists\n elif hasattr(annotation, \"__args__\"):\n types = annotation.__args__\n non_none_types = [x for x in types if x != None.__class__] # noqa:E721\n\n # optionals are represented as Union[type, None]\n if len(non_none_types) == 1:\n is_optional = True\n graphql_type = get_graphql_type_for_annotation(\n non_none_types[0], field_name, force_optional=True\n )\n else:\n is_optional = None.__class__ in types\n\n # TODO: union types don't work with scalar types\n # so we want to return a nice error\n # also we want to make sure we have been passed\n # strawberry types\n graphql_type = GraphQLUnionType(\n field_name, [type.field for type in types]\n )\n else:\n graphql_type = REGISTRY.get(annotation)\n\n if not graphql_type:\n raise ValueError(f\"Unable to get GraphQL type for {annotation}\")\n\n if is_optional:\n return graphql_type\n\n return GraphQLNonNull(graphql_type)\n", "path": "strawberry/type_converter.py"}, {"content": "import functools\nimport typing\n\n# from graphql.error import GraphQLError, format_error as format_graphql_error\nfrom graphql.language import parse\nfrom graphql.subscription import subscribe\nfrom starlette.types import ASGIInstance, Receive, Scope, Send\nfrom starlette.websockets import WebSocket\n\nfrom .base import BaseApp\n\n\nclass GraphQLSubscriptionApp(BaseApp):\n def __call__(self, scope: Scope) -> ASGIInstance:\n return functools.partial(self.asgi, scope=scope)\n\n async def execute(self, query, variables=None, context=None, operation_name=None):\n return await subscribe(\n self.schema,\n parse(query),\n variable_values=variables,\n operation_name=operation_name,\n context_value=context,\n )\n\n async def _send_message(\n self,\n websocket: WebSocket,\n type_: str,\n payload: typing.Any = None,\n id_: str = None,\n ) -> None:\n data = {\"type\": type_}\n\n if id_ is not None:\n data[\"id\"] = id_\n\n if payload is not None:\n data[\"payload\"] = payload\n\n return await websocket.send_json(data)\n\n async def asgi(self, receive: Receive, send: Send, scope: Scope) -> None:\n assert scope[\"type\"] == \"websocket\"\n\n websocket = WebSocket(scope, receive=receive, send=send)\n await websocket.accept(subprotocol=\"graphql-ws\")\n await self._send_message(websocket, \"connection_ack\")\n\n # TODO: we should check that this is a proper connection init message\n await websocket.receive_json()\n data = await websocket.receive_json()\n\n id_ = data.get(\"id\", \"1\")\n payload = data.get(\"payload\", {})\n\n data = await self.execute(\n payload[\"query\"],\n payload[\"variables\"],\n operation_name=payload[\"operationName\"],\n )\n\n async for result in data:\n # TODO: send errors if any\n\n await self._send_message(websocket, \"data\", {\"data\": result.data}, id_)\n\n await self._send_message(websocket, \"complete\")\n await websocket.close()\n", "path": "strawberry/contrib/starlette/app/graphql_ws_app.py"}]} | 2,320 | 586 |
gh_patches_debug_19536 | rasdani/github-patches | git_diff | evennia__evennia-2873 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG - Develop] REST API endpoints for specialized Objects (e.g. Characters) don't work
#### Describe the bug
The endpoints for `characters`, `rooms` etc can't find any objects at all. When accessing a known Character by ID, e.g. `/api/characters/335` it returns `{'detail': 'not found'}` and when checking the list view at `/api/characters/` it returns a list of length 0.
The `objects` endpoint can locate the same objects just fine, however, so they're accessible by the API - the endpoints themselves are what's broken.
I believe the method of filtering by typeclass is broken. e.g.
https://github.com/evennia/evennia/blob/3992f37dada8ae9d495b7bdc087ff107e21e29aa/evennia/web/api/views.py#L115-L117
#### To Reproduce
Steps to reproduce the behavior:
1. Create a game with some objects, rooms, and characters.
2. Turn on the API by adding `REST_API_ENABLED = True` to settings
3. Authenticate and access the endpoints `/api/objects/` `/api/characters/` and `/api/rooms/`
4. See error
#### Expected behavior
I expected the endpoints to return the info for the relevant object, and to correctly return all existing objects of the endpoint type.
#### Develop-branch commit
3992f37da
#### Additional context
The regular `objects` endpoint and all of the non-Object endpoints such as `/api/accounts` and `/api/scripts` work as expected.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evennia/web/api/views.py`
Content:
```
1 """
2 Views are the functions that are called by different url endpoints. The Django
3 Rest Framework provides collections called 'ViewSets', which can generate a
4 number of views for the common CRUD operations.
5
6 """
7 from rest_framework.viewsets import ModelViewSet
8 from rest_framework.decorators import action
9 from rest_framework.response import Response
10 from rest_framework import status
11
12 from django_filters.rest_framework import DjangoFilterBackend
13
14 from evennia.objects.models import ObjectDB
15 from evennia.objects.objects import DefaultCharacter, DefaultExit, DefaultRoom
16 from evennia.accounts.models import AccountDB
17 from evennia.scripts.models import ScriptDB
18 from evennia.help.models import HelpEntry
19 from evennia.web.api import serializers
20 from evennia.web.api import filters
21 from evennia.web.api.permissions import EvenniaPermission
22
23
24 class GeneralViewSetMixin:
25 """
26 Mixin for both typeclass- and non-typeclass entities.
27
28 """
29
30 def get_serializer_class(self):
31 """
32 Allow different serializers for certain actions.
33
34 """
35 if self.action == "list":
36 if hasattr(self, "list_serializer_class"):
37 return self.list_serializer_class
38 return self.serializer_class
39
40
41 class TypeclassViewSetMixin(GeneralViewSetMixin):
42 """
43 This mixin adds some shared functionality to each viewset of a typeclass. They all use the same
44 permission classes and filter backend. You can override any of these in your own viewsets.
45
46 The `set_atribute` action is an example of a custom action added to a
47 viewset. Based on the name of the method, it will create a default url_name
48 (used for reversing) and url_path. The 'pk' argument is automatically
49 passed to this action because it has a url path of the format <object
50 type>/:pk/set-attribute. The get_object method is automatically set in the
51 expected viewset classes that will inherit this, using the pk that's passed
52 along to retrieve the object.
53
54 """
55
56 # permission classes determine who is authorized to call the view
57 permission_classes = [EvenniaPermission]
58 # the filter backend allows for retrieval views to have filter arguments passed to it,
59 # for example: mygame.com/api/objects?db_key=bob to find matches based on objects having a db_key of bob
60 filter_backends = [DjangoFilterBackend]
61
62 @action(detail=True, methods=["put", "post"])
63 def set_attribute(self, request, pk=None):
64 """
65 This action will set an attribute if the db_value is defined, or remove
66 it if no db_value is provided.
67
68 """
69 attr = serializers.AttributeSerializer(data=request.data)
70 obj = self.get_object()
71 if attr.is_valid(raise_exception=True):
72 key = attr.validated_data["db_key"]
73 value = attr.validated_data.get("db_value")
74 category = attr.validated_data.get("db_category")
75 attr_type = attr.validated_data.get("db_attrtype")
76 if attr_type == "nick":
77 handler = obj.nicks
78 else:
79 handler = obj.attributes
80 if value:
81 handler.add(key=key, value=value, category=category)
82 else:
83 handler.remove(key=key, category=category)
84 return Response(
85 serializers.AttributeSerializer(obj.db_attributes.all(), many=True).data,
86 status=status.HTTP_200_OK,
87 )
88 return Response(attr.errors, status=status.HTTP_400_BAD_REQUEST)
89
90
91 class ObjectDBViewSet(TypeclassViewSetMixin, ModelViewSet):
92 """
93 The Object is the parent for all in-game entities that have a location
94 (rooms, exits, characters etc).
95
96 """
97
98 # An example of a basic viewset for all ObjectDB instances. It declares the
99 # serializer to use for both retrieving and changing/creating/deleting
100 # instances. Serializers are similar to django forms, used for the
101 # transmitting of data (typically json).
102
103 serializer_class = serializers.ObjectDBSerializer
104 queryset = ObjectDB.objects.all()
105 filterset_class = filters.ObjectDBFilterSet
106 list_serializer_class = serializers.ObjectListSerializer
107
108
109 class CharacterViewSet(ObjectDBViewSet):
110 """
111 Characters are a type of Object commonly used as player avatars in-game.
112
113 """
114
115 queryset = DefaultCharacter.objects.typeclass_search(
116 DefaultCharacter.path, include_children=True
117 )
118 list_serializer_class = serializers.ObjectListSerializer
119
120
121 class RoomViewSet(ObjectDBViewSet):
122 """
123 Rooms indicate discrete locations in-game.
124
125 """
126
127 queryset = DefaultRoom.objects.typeclass_search(DefaultRoom.path, include_children=True)
128 list_serializer_class = serializers.ObjectListSerializer
129
130
131 class ExitViewSet(ObjectDBViewSet):
132 """
133 Exits are objects with a destination and allows for traversing from one
134 location to another.
135
136 """
137
138 queryset = DefaultExit.objects.typeclass_search(DefaultExit.path, include_children=True)
139 list_serializer_class = serializers.ObjectListSerializer
140
141
142 class AccountDBViewSet(TypeclassViewSetMixin, ModelViewSet):
143 """
144 Accounts represent the players connected to the game
145
146 """
147
148 serializer_class = serializers.AccountSerializer
149 queryset = AccountDB.objects.all()
150 filterset_class = filters.AccountDBFilterSet
151 list_serializer_class = serializers.AccountListSerializer
152
153
154 class ScriptDBViewSet(TypeclassViewSetMixin, ModelViewSet):
155 """
156 Scripts are meta-objects for storing system data, running timers etc. They
157 have no in-game existence.
158
159 """
160
161 serializer_class = serializers.ScriptDBSerializer
162 queryset = ScriptDB.objects.all()
163 filterset_class = filters.ScriptDBFilterSet
164 list_serializer_class = serializers.ScriptListSerializer
165
166
167 class HelpViewSet(GeneralViewSetMixin, ModelViewSet):
168 """
169 Database-stored help entries.
170 Note that command auto-help and file-based help entries are not accessible this way.
171
172 """
173
174 serializer_class = serializers.HelpSerializer
175 queryset = HelpEntry.objects.all()
176 filterset_class = filters.HelpFilterSet
177 list_serializer_class = serializers.HelpListSerializer
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evennia/web/api/views.py b/evennia/web/api/views.py
--- a/evennia/web/api/views.py
+++ b/evennia/web/api/views.py
@@ -112,10 +112,7 @@
"""
- queryset = DefaultCharacter.objects.typeclass_search(
- DefaultCharacter.path, include_children=True
- )
- list_serializer_class = serializers.ObjectListSerializer
+ queryset = DefaultCharacter.objects.all_family()
class RoomViewSet(ObjectDBViewSet):
@@ -124,8 +121,7 @@
"""
- queryset = DefaultRoom.objects.typeclass_search(DefaultRoom.path, include_children=True)
- list_serializer_class = serializers.ObjectListSerializer
+ queryset = DefaultRoom.objects.all_family()
class ExitViewSet(ObjectDBViewSet):
@@ -135,8 +131,7 @@
"""
- queryset = DefaultExit.objects.typeclass_search(DefaultExit.path, include_children=True)
- list_serializer_class = serializers.ObjectListSerializer
+ queryset = DefaultExit.objects.all_family()
class AccountDBViewSet(TypeclassViewSetMixin, ModelViewSet):
| {"golden_diff": "diff --git a/evennia/web/api/views.py b/evennia/web/api/views.py\n--- a/evennia/web/api/views.py\n+++ b/evennia/web/api/views.py\n@@ -112,10 +112,7 @@\n \n \"\"\"\n \n- queryset = DefaultCharacter.objects.typeclass_search(\n- DefaultCharacter.path, include_children=True\n- )\n- list_serializer_class = serializers.ObjectListSerializer\n+ queryset = DefaultCharacter.objects.all_family()\n \n \n class RoomViewSet(ObjectDBViewSet):\n@@ -124,8 +121,7 @@\n \n \"\"\"\n \n- queryset = DefaultRoom.objects.typeclass_search(DefaultRoom.path, include_children=True)\n- list_serializer_class = serializers.ObjectListSerializer\n+ queryset = DefaultRoom.objects.all_family()\n \n \n class ExitViewSet(ObjectDBViewSet):\n@@ -135,8 +131,7 @@\n \n \"\"\"\n \n- queryset = DefaultExit.objects.typeclass_search(DefaultExit.path, include_children=True)\n- list_serializer_class = serializers.ObjectListSerializer\n+ queryset = DefaultExit.objects.all_family()\n \n \n class AccountDBViewSet(TypeclassViewSetMixin, ModelViewSet):\n", "issue": "[BUG - Develop] REST API endpoints for specialized Objects (e.g. Characters) don't work\n#### Describe the bug\r\nThe endpoints for `characters`, `rooms` etc can't find any objects at all. When accessing a known Character by ID, e.g. `/api/characters/335` it returns `{'detail': 'not found'}` and when checking the list view at `/api/characters/` it returns a list of length 0.\r\n\r\nThe `objects` endpoint can locate the same objects just fine, however, so they're accessible by the API - the endpoints themselves are what's broken.\r\n\r\nI believe the method of filtering by typeclass is broken. e.g.\r\nhttps://github.com/evennia/evennia/blob/3992f37dada8ae9d495b7bdc087ff107e21e29aa/evennia/web/api/views.py#L115-L117\r\n\r\n#### To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Create a game with some objects, rooms, and characters.\r\n2. Turn on the API by adding `REST_API_ENABLED = True` to settings\r\n3. Authenticate and access the endpoints `/api/objects/` `/api/characters/` and `/api/rooms/`\r\n4. See error\r\n\r\n#### Expected behavior\r\nI expected the endpoints to return the info for the relevant object, and to correctly return all existing objects of the endpoint type.\r\n\r\n#### Develop-branch commit\r\n3992f37da\r\n\r\n#### Additional context\r\nThe regular `objects` endpoint and all of the non-Object endpoints such as `/api/accounts` and `/api/scripts` work as expected.\n", "before_files": [{"content": "\"\"\"\nViews are the functions that are called by different url endpoints. The Django\nRest Framework provides collections called 'ViewSets', which can generate a\nnumber of views for the common CRUD operations.\n\n\"\"\"\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom django_filters.rest_framework import DjangoFilterBackend\n\nfrom evennia.objects.models import ObjectDB\nfrom evennia.objects.objects import DefaultCharacter, DefaultExit, DefaultRoom\nfrom evennia.accounts.models import AccountDB\nfrom evennia.scripts.models import ScriptDB\nfrom evennia.help.models import HelpEntry\nfrom evennia.web.api import serializers\nfrom evennia.web.api import filters\nfrom evennia.web.api.permissions import EvenniaPermission\n\n\nclass GeneralViewSetMixin:\n \"\"\"\n Mixin for both typeclass- and non-typeclass entities.\n\n \"\"\"\n\n def get_serializer_class(self):\n \"\"\"\n Allow different serializers for certain actions.\n\n \"\"\"\n if self.action == \"list\":\n if hasattr(self, \"list_serializer_class\"):\n return self.list_serializer_class\n return self.serializer_class\n\n\nclass TypeclassViewSetMixin(GeneralViewSetMixin):\n \"\"\"\n This mixin adds some shared functionality to each viewset of a typeclass. They all use the same\n permission classes and filter backend. You can override any of these in your own viewsets.\n\n The `set_atribute` action is an example of a custom action added to a\n viewset. Based on the name of the method, it will create a default url_name\n (used for reversing) and url_path. The 'pk' argument is automatically\n passed to this action because it has a url path of the format <object\n type>/:pk/set-attribute. The get_object method is automatically set in the\n expected viewset classes that will inherit this, using the pk that's passed\n along to retrieve the object.\n\n \"\"\"\n\n # permission classes determine who is authorized to call the view\n permission_classes = [EvenniaPermission]\n # the filter backend allows for retrieval views to have filter arguments passed to it,\n # for example: mygame.com/api/objects?db_key=bob to find matches based on objects having a db_key of bob\n filter_backends = [DjangoFilterBackend]\n\n @action(detail=True, methods=[\"put\", \"post\"])\n def set_attribute(self, request, pk=None):\n \"\"\"\n This action will set an attribute if the db_value is defined, or remove\n it if no db_value is provided.\n\n \"\"\"\n attr = serializers.AttributeSerializer(data=request.data)\n obj = self.get_object()\n if attr.is_valid(raise_exception=True):\n key = attr.validated_data[\"db_key\"]\n value = attr.validated_data.get(\"db_value\")\n category = attr.validated_data.get(\"db_category\")\n attr_type = attr.validated_data.get(\"db_attrtype\")\n if attr_type == \"nick\":\n handler = obj.nicks\n else:\n handler = obj.attributes\n if value:\n handler.add(key=key, value=value, category=category)\n else:\n handler.remove(key=key, category=category)\n return Response(\n serializers.AttributeSerializer(obj.db_attributes.all(), many=True).data,\n status=status.HTTP_200_OK,\n )\n return Response(attr.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ObjectDBViewSet(TypeclassViewSetMixin, ModelViewSet):\n \"\"\"\n The Object is the parent for all in-game entities that have a location\n (rooms, exits, characters etc).\n\n \"\"\"\n\n # An example of a basic viewset for all ObjectDB instances. It declares the\n # serializer to use for both retrieving and changing/creating/deleting\n # instances. Serializers are similar to django forms, used for the\n # transmitting of data (typically json).\n\n serializer_class = serializers.ObjectDBSerializer\n queryset = ObjectDB.objects.all()\n filterset_class = filters.ObjectDBFilterSet\n list_serializer_class = serializers.ObjectListSerializer\n\n\nclass CharacterViewSet(ObjectDBViewSet):\n \"\"\"\n Characters are a type of Object commonly used as player avatars in-game.\n\n \"\"\"\n\n queryset = DefaultCharacter.objects.typeclass_search(\n DefaultCharacter.path, include_children=True\n )\n list_serializer_class = serializers.ObjectListSerializer\n\n\nclass RoomViewSet(ObjectDBViewSet):\n \"\"\"\n Rooms indicate discrete locations in-game.\n\n \"\"\"\n\n queryset = DefaultRoom.objects.typeclass_search(DefaultRoom.path, include_children=True)\n list_serializer_class = serializers.ObjectListSerializer\n\n\nclass ExitViewSet(ObjectDBViewSet):\n \"\"\"\n Exits are objects with a destination and allows for traversing from one\n location to another.\n\n \"\"\"\n\n queryset = DefaultExit.objects.typeclass_search(DefaultExit.path, include_children=True)\n list_serializer_class = serializers.ObjectListSerializer\n\n\nclass AccountDBViewSet(TypeclassViewSetMixin, ModelViewSet):\n \"\"\"\n Accounts represent the players connected to the game\n\n \"\"\"\n\n serializer_class = serializers.AccountSerializer\n queryset = AccountDB.objects.all()\n filterset_class = filters.AccountDBFilterSet\n list_serializer_class = serializers.AccountListSerializer\n\n\nclass ScriptDBViewSet(TypeclassViewSetMixin, ModelViewSet):\n \"\"\"\n Scripts are meta-objects for storing system data, running timers etc. They\n have no in-game existence.\n\n \"\"\"\n\n serializer_class = serializers.ScriptDBSerializer\n queryset = ScriptDB.objects.all()\n filterset_class = filters.ScriptDBFilterSet\n list_serializer_class = serializers.ScriptListSerializer\n\n\nclass HelpViewSet(GeneralViewSetMixin, ModelViewSet):\n \"\"\"\n Database-stored help entries.\n Note that command auto-help and file-based help entries are not accessible this way.\n\n \"\"\"\n\n serializer_class = serializers.HelpSerializer\n queryset = HelpEntry.objects.all()\n filterset_class = filters.HelpFilterSet\n list_serializer_class = serializers.HelpListSerializer\n", "path": "evennia/web/api/views.py"}], "after_files": [{"content": "\"\"\"\nViews are the functions that are called by different url endpoints. The Django\nRest Framework provides collections called 'ViewSets', which can generate a\nnumber of views for the common CRUD operations.\n\n\"\"\"\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom django_filters.rest_framework import DjangoFilterBackend\n\nfrom evennia.objects.models import ObjectDB\nfrom evennia.objects.objects import DefaultCharacter, DefaultExit, DefaultRoom\nfrom evennia.accounts.models import AccountDB\nfrom evennia.scripts.models import ScriptDB\nfrom evennia.help.models import HelpEntry\nfrom evennia.web.api import serializers\nfrom evennia.web.api import filters\nfrom evennia.web.api.permissions import EvenniaPermission\n\n\nclass GeneralViewSetMixin:\n \"\"\"\n Mixin for both typeclass- and non-typeclass entities.\n\n \"\"\"\n\n def get_serializer_class(self):\n \"\"\"\n Allow different serializers for certain actions.\n\n \"\"\"\n if self.action == \"list\":\n if hasattr(self, \"list_serializer_class\"):\n return self.list_serializer_class\n return self.serializer_class\n\n\nclass TypeclassViewSetMixin(GeneralViewSetMixin):\n \"\"\"\n This mixin adds some shared functionality to each viewset of a typeclass. They all use the same\n permission classes and filter backend. You can override any of these in your own viewsets.\n\n The `set_atribute` action is an example of a custom action added to a\n viewset. Based on the name of the method, it will create a default url_name\n (used for reversing) and url_path. The 'pk' argument is automatically\n passed to this action because it has a url path of the format <object\n type>/:pk/set-attribute. The get_object method is automatically set in the\n expected viewset classes that will inherit this, using the pk that's passed\n along to retrieve the object.\n\n \"\"\"\n\n # permission classes determine who is authorized to call the view\n permission_classes = [EvenniaPermission]\n # the filter backend allows for retrieval views to have filter arguments passed to it,\n # for example: mygame.com/api/objects?db_key=bob to find matches based on objects having a db_key of bob\n filter_backends = [DjangoFilterBackend]\n\n @action(detail=True, methods=[\"put\", \"post\"])\n def set_attribute(self, request, pk=None):\n \"\"\"\n This action will set an attribute if the db_value is defined, or remove\n it if no db_value is provided.\n\n \"\"\"\n attr = serializers.AttributeSerializer(data=request.data)\n obj = self.get_object()\n if attr.is_valid(raise_exception=True):\n key = attr.validated_data[\"db_key\"]\n value = attr.validated_data.get(\"db_value\")\n category = attr.validated_data.get(\"db_category\")\n attr_type = attr.validated_data.get(\"db_attrtype\")\n if attr_type == \"nick\":\n handler = obj.nicks\n else:\n handler = obj.attributes\n if value:\n handler.add(key=key, value=value, category=category)\n else:\n handler.remove(key=key, category=category)\n return Response(\n serializers.AttributeSerializer(obj.db_attributes.all(), many=True).data,\n status=status.HTTP_200_OK,\n )\n return Response(attr.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ObjectDBViewSet(TypeclassViewSetMixin, ModelViewSet):\n \"\"\"\n The Object is the parent for all in-game entities that have a location\n (rooms, exits, characters etc).\n\n \"\"\"\n\n # An example of a basic viewset for all ObjectDB instances. It declares the\n # serializer to use for both retrieving and changing/creating/deleting\n # instances. Serializers are similar to django forms, used for the\n # transmitting of data (typically json).\n\n serializer_class = serializers.ObjectDBSerializer\n queryset = ObjectDB.objects.all()\n filterset_class = filters.ObjectDBFilterSet\n list_serializer_class = serializers.ObjectListSerializer\n\n\nclass CharacterViewSet(ObjectDBViewSet):\n \"\"\"\n Characters are a type of Object commonly used as player avatars in-game.\n\n \"\"\"\n\n queryset = DefaultCharacter.objects.all_family()\n\n\nclass RoomViewSet(ObjectDBViewSet):\n \"\"\"\n Rooms indicate discrete locations in-game.\n\n \"\"\"\n\n queryset = DefaultRoom.objects.all_family()\n\n\nclass ExitViewSet(ObjectDBViewSet):\n \"\"\"\n Exits are objects with a destination and allows for traversing from one\n location to another.\n\n \"\"\"\n\n queryset = DefaultExit.objects.all_family()\n\n\nclass AccountDBViewSet(TypeclassViewSetMixin, ModelViewSet):\n \"\"\"\n Accounts represent the players connected to the game\n\n \"\"\"\n\n serializer_class = serializers.AccountSerializer\n queryset = AccountDB.objects.all()\n filterset_class = filters.AccountDBFilterSet\n list_serializer_class = serializers.AccountListSerializer\n\n\nclass ScriptDBViewSet(TypeclassViewSetMixin, ModelViewSet):\n \"\"\"\n Scripts are meta-objects for storing system data, running timers etc. They\n have no in-game existence.\n\n \"\"\"\n\n serializer_class = serializers.ScriptDBSerializer\n queryset = ScriptDB.objects.all()\n filterset_class = filters.ScriptDBFilterSet\n list_serializer_class = serializers.ScriptListSerializer\n\n\nclass HelpViewSet(GeneralViewSetMixin, ModelViewSet):\n \"\"\"\n Database-stored help entries.\n Note that command auto-help and file-based help entries are not accessible this way.\n\n \"\"\"\n\n serializer_class = serializers.HelpSerializer\n queryset = HelpEntry.objects.all()\n filterset_class = filters.HelpFilterSet\n list_serializer_class = serializers.HelpListSerializer\n", "path": "evennia/web/api/views.py"}]} | 2,319 | 247 |
gh_patches_debug_26354 | rasdani/github-patches | git_diff | getsentry__sentry-51016 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add check to integrations for rule snooze
We want to snooze integration notifications if they are muted for everyone since for integrations "mute for me" doesn't make much sense. "mute for me" won't be offered to the users as an option on the front end, but we should still check that the alert is being muted for everyone in these integration checks. Tests are very important here too to confirm that we are correctly muting the integration notifications.
Similar PR from V1 of Mute Alerts: https://github.com/getsentry/sentry/pull/49375
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/incidents/action_handlers.py`
Content:
```
1 from __future__ import annotations
2
3 import abc
4 import logging
5 from typing import Sequence, Set, Tuple
6
7 from django.conf import settings
8 from django.template.defaultfilters import pluralize
9 from django.urls import reverse
10
11 from sentry import features
12 from sentry.charts.types import ChartSize
13 from sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS
14 from sentry.incidents.charts import build_metric_alert_chart
15 from sentry.incidents.models import (
16 INCIDENT_STATUS,
17 AlertRuleThresholdType,
18 AlertRuleTriggerAction,
19 IncidentStatus,
20 TriggerStatus,
21 )
22 from sentry.models.notificationsetting import NotificationSetting
23 from sentry.models.options.user_option import UserOption
24 from sentry.models.user import User
25 from sentry.services.hybrid_cloud.user import RpcUser
26 from sentry.types.integrations import ExternalProviders
27 from sentry.utils import json
28 from sentry.utils.email import MessageBuilder, get_email_addresses
29
30
31 class ActionHandler(metaclass=abc.ABCMeta):
32 status_display = {TriggerStatus.ACTIVE: "Fired", TriggerStatus.RESOLVED: "Resolved"}
33
34 def __init__(self, action, incident, project):
35 self.action = action
36 self.incident = incident
37 self.project = project
38
39 @abc.abstractmethod
40 def fire(self, metric_value: int | float, new_status: IncidentStatus):
41 pass
42
43 @abc.abstractmethod
44 def resolve(self, metric_value: int | float, new_status: IncidentStatus):
45 pass
46
47
48 class DefaultActionHandler(ActionHandler):
49 def fire(self, metric_value: int | float, new_status: IncidentStatus):
50 self.send_alert(metric_value, new_status)
51
52 def resolve(self, metric_value: int | float, new_status: IncidentStatus):
53 self.send_alert(metric_value, new_status)
54
55 @abc.abstractmethod
56 def send_alert(self, metric_value: int | float, new_status: IncidentStatus):
57 pass
58
59
60 @AlertRuleTriggerAction.register_type(
61 "email",
62 AlertRuleTriggerAction.Type.EMAIL,
63 [AlertRuleTriggerAction.TargetType.USER, AlertRuleTriggerAction.TargetType.TEAM],
64 )
65 class EmailActionHandler(ActionHandler):
66 def _get_targets(self) -> Set[int]:
67 target = self.action.target
68 if not target:
69 return set()
70
71 if self.action.target_type == AlertRuleTriggerAction.TargetType.USER.value:
72 return {target.id}
73
74 elif self.action.target_type == AlertRuleTriggerAction.TargetType.TEAM.value:
75 users = NotificationSetting.objects.filter_to_accepting_recipients(
76 self.project,
77 {RpcUser(id=member.user_id) for member in target.member_set},
78 )[ExternalProviders.EMAIL]
79 return {user.id for user in users}
80
81 return set()
82
83 def get_targets(self) -> Sequence[Tuple[int, str]]:
84 return list(get_email_addresses(self._get_targets(), project=self.project).items())
85
86 def fire(self, metric_value: int | float, new_status: IncidentStatus):
87 self.email_users(TriggerStatus.ACTIVE, new_status)
88
89 def resolve(self, metric_value: int | float, new_status: IncidentStatus):
90 self.email_users(TriggerStatus.RESOLVED, new_status)
91
92 def email_users(self, trigger_status: TriggerStatus, incident_status: IncidentStatus) -> None:
93 for user_id, email in self.get_targets():
94 user = User.objects.get_from_cache(id=user_id)
95 email_context = generate_incident_trigger_email_context(
96 self.project,
97 self.incident,
98 self.action.alert_rule_trigger,
99 trigger_status,
100 incident_status,
101 user,
102 )
103 self.build_message(email_context, trigger_status, user_id).send_async(to=[email])
104
105 def build_message(self, context, status, user_id):
106 display = self.status_display[status]
107 return MessageBuilder(
108 subject="[{}] {} - {}".format(
109 context["status"], context["incident_name"], self.project.slug
110 ),
111 template="sentry/emails/incidents/trigger.txt",
112 html_template="sentry/emails/incidents/trigger.html",
113 type=f"incident.alert_rule_{display.lower()}",
114 context=context,
115 headers={"X-SMTPAPI": json.dumps({"category": "metric_alert_email"})},
116 )
117
118
119 @AlertRuleTriggerAction.register_type(
120 "slack",
121 AlertRuleTriggerAction.Type.SLACK,
122 [AlertRuleTriggerAction.TargetType.SPECIFIC],
123 integration_provider="slack",
124 )
125 class SlackActionHandler(DefaultActionHandler):
126 def send_alert(self, metric_value: int | float, new_status: IncidentStatus):
127 from sentry.integrations.slack.utils import send_incident_alert_notification
128
129 send_incident_alert_notification(self.action, self.incident, metric_value, new_status)
130
131
132 @AlertRuleTriggerAction.register_type(
133 "msteams",
134 AlertRuleTriggerAction.Type.MSTEAMS,
135 [AlertRuleTriggerAction.TargetType.SPECIFIC],
136 integration_provider="msteams",
137 )
138 class MsTeamsActionHandler(DefaultActionHandler):
139 def send_alert(self, metric_value: int | float, new_status: IncidentStatus):
140 from sentry.integrations.msteams.utils import send_incident_alert_notification
141
142 send_incident_alert_notification(self.action, self.incident, metric_value, new_status)
143
144
145 @AlertRuleTriggerAction.register_type(
146 "pagerduty",
147 AlertRuleTriggerAction.Type.PAGERDUTY,
148 [AlertRuleTriggerAction.TargetType.SPECIFIC],
149 integration_provider="pagerduty",
150 )
151 class PagerDutyActionHandler(DefaultActionHandler):
152 def send_alert(self, metric_value: int | float, new_status: IncidentStatus):
153 from sentry.integrations.pagerduty.utils import send_incident_alert_notification
154
155 send_incident_alert_notification(self.action, self.incident, metric_value, new_status)
156
157
158 @AlertRuleTriggerAction.register_type(
159 "sentry_app",
160 AlertRuleTriggerAction.Type.SENTRY_APP,
161 [AlertRuleTriggerAction.TargetType.SENTRY_APP],
162 )
163 class SentryAppActionHandler(DefaultActionHandler):
164 def send_alert(self, metric_value: int | float, new_status: IncidentStatus):
165 from sentry.rules.actions.notify_event_service import send_incident_alert_notification
166
167 send_incident_alert_notification(self.action, self.incident, new_status, metric_value)
168
169
170 def format_duration(minutes):
171 """
172 Format minutes into a duration string
173 """
174
175 if minutes >= 1440:
176 days = int(minutes // 1440)
177 return f"{days:d} day{pluralize(days)}"
178
179 if minutes >= 60:
180 hours = int(minutes // 60)
181 return f"{hours:d} hour{pluralize(hours)}"
182
183 if minutes >= 1:
184 minutes = int(minutes)
185 return f"{minutes:d} minute{pluralize(minutes)}"
186
187 seconds = int(minutes // 60)
188 return f"{seconds:d} second{pluralize(seconds)}"
189
190
191 def generate_incident_trigger_email_context(
192 project,
193 incident,
194 alert_rule_trigger,
195 trigger_status,
196 incident_status,
197 user=None,
198 ):
199 trigger = alert_rule_trigger
200 alert_rule = trigger.alert_rule
201 snuba_query = alert_rule.snuba_query
202 is_active = trigger_status == TriggerStatus.ACTIVE
203 is_threshold_type_above = alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value
204
205 # if alert threshold and threshold type is above then show '>'
206 # if resolve threshold and threshold type is *BELOW* then show '>'
207 # we can simplify this to be the below statement
208 show_greater_than_string = is_active == is_threshold_type_above
209 environment_string = snuba_query.environment.name if snuba_query.environment else "All"
210
211 aggregate = alert_rule.snuba_query.aggregate
212 if CRASH_RATE_ALERT_AGGREGATE_ALIAS in aggregate:
213 aggregate = aggregate.split(f"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}")[0].strip()
214
215 threshold = trigger.alert_threshold if is_active else alert_rule.resolve_threshold
216 if threshold is None:
217 # Setting this to trigger threshold because in the case of a resolve if no resolve
218 # threshold is specified this will be None. Since we add a comparison sign to the
219 # string it makes sense to set this to the trigger alert threshold if no threshold is
220 # specified
221 threshold = trigger.alert_threshold
222
223 chart_url = None
224 if features.has("organizations:metric-alert-chartcuterie", incident.organization):
225 try:
226 chart_url = build_metric_alert_chart(
227 organization=incident.organization,
228 alert_rule=incident.alert_rule,
229 selected_incident=incident,
230 size=ChartSize({"width": 600, "height": 200}),
231 )
232 except Exception:
233 logging.exception("Error while attempting to build_metric_alert_chart")
234
235 tz = settings.SENTRY_DEFAULT_TIME_ZONE
236 if user is not None:
237 user_option_tz = UserOption.objects.get_value(user=user, key="timezone")
238 if user_option_tz is not None:
239 tz = user_option_tz
240
241 organization = incident.organization
242 return {
243 "link": organization.absolute_url(
244 reverse(
245 "sentry-metric-alert",
246 kwargs={
247 "organization_slug": organization.slug,
248 "incident_id": incident.identifier,
249 },
250 ),
251 query="referrer=alert_email",
252 ),
253 "rule_link": organization.absolute_url(
254 reverse(
255 "sentry-alert-rule",
256 kwargs={
257 "organization_slug": organization.slug,
258 "project_slug": project.slug,
259 "alert_rule_id": trigger.alert_rule_id,
260 },
261 )
262 ),
263 "project_slug": project.slug,
264 "incident_name": incident.title,
265 "environment": environment_string,
266 "time_window": format_duration(snuba_query.time_window / 60),
267 "triggered_at": incident.date_added,
268 "aggregate": aggregate,
269 "query": snuba_query.query,
270 "threshold": threshold,
271 # if alert threshold and threshold type is above then show '>'
272 # if resolve threshold and threshold type is *BELOW* then show '>'
273 "threshold_direction_string": ">" if show_greater_than_string else "<",
274 "status": INCIDENT_STATUS[incident_status],
275 "status_key": INCIDENT_STATUS[incident_status].lower(),
276 "is_critical": incident_status == IncidentStatus.CRITICAL,
277 "is_warning": incident_status == IncidentStatus.WARNING,
278 "unsubscribe_link": None,
279 "chart_url": chart_url,
280 "timezone": tz,
281 }
282
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/incidents/action_handlers.py b/src/sentry/incidents/action_handlers.py
--- a/src/sentry/incidents/action_handlers.py
+++ b/src/sentry/incidents/action_handlers.py
@@ -21,6 +21,7 @@
)
from sentry.models.notificationsetting import NotificationSetting
from sentry.models.options.user_option import UserOption
+from sentry.models.rulesnooze import RuleSnooze
from sentry.models.user import User
from sentry.services.hybrid_cloud.user import RpcUser
from sentry.types.integrations import ExternalProviders
@@ -47,10 +48,16 @@
class DefaultActionHandler(ActionHandler):
def fire(self, metric_value: int | float, new_status: IncidentStatus):
- self.send_alert(metric_value, new_status)
+ if not RuleSnooze.objects.filter(
+ alert_rule=self.incident.alert_rule, user_id__isnull=True
+ ).exists():
+ self.send_alert(metric_value, new_status)
def resolve(self, metric_value: int | float, new_status: IncidentStatus):
- self.send_alert(metric_value, new_status)
+ if not RuleSnooze.objects.filter(
+ alert_rule=self.incident.alert_rule, user_id__isnull=True
+ ).exists():
+ self.send_alert(metric_value, new_status)
@abc.abstractmethod
def send_alert(self, metric_value: int | float, new_status: IncidentStatus):
| {"golden_diff": "diff --git a/src/sentry/incidents/action_handlers.py b/src/sentry/incidents/action_handlers.py\n--- a/src/sentry/incidents/action_handlers.py\n+++ b/src/sentry/incidents/action_handlers.py\n@@ -21,6 +21,7 @@\n )\n from sentry.models.notificationsetting import NotificationSetting\n from sentry.models.options.user_option import UserOption\n+from sentry.models.rulesnooze import RuleSnooze\n from sentry.models.user import User\n from sentry.services.hybrid_cloud.user import RpcUser\n from sentry.types.integrations import ExternalProviders\n@@ -47,10 +48,16 @@\n \n class DefaultActionHandler(ActionHandler):\n def fire(self, metric_value: int | float, new_status: IncidentStatus):\n- self.send_alert(metric_value, new_status)\n+ if not RuleSnooze.objects.filter(\n+ alert_rule=self.incident.alert_rule, user_id__isnull=True\n+ ).exists():\n+ self.send_alert(metric_value, new_status)\n \n def resolve(self, metric_value: int | float, new_status: IncidentStatus):\n- self.send_alert(metric_value, new_status)\n+ if not RuleSnooze.objects.filter(\n+ alert_rule=self.incident.alert_rule, user_id__isnull=True\n+ ).exists():\n+ self.send_alert(metric_value, new_status)\n \n @abc.abstractmethod\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n", "issue": "Add check to integrations for rule snooze\nWe want to snooze integration notifications if they are muted for everyone since for integrations \"mute for me\" doesn't make much sense. \"mute for me\" won't be offered to the users as an option on the front end, but we should still check that the alert is being muted for everyone in these integration checks. Tests are very important here too to confirm that we are correctly muting the integration notifications. \r\n\r\nSimilar PR from V1 of Mute Alerts: https://github.com/getsentry/sentry/pull/49375\n", "before_files": [{"content": "from __future__ import annotations\n\nimport abc\nimport logging\nfrom typing import Sequence, Set, Tuple\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import pluralize\nfrom django.urls import reverse\n\nfrom sentry import features\nfrom sentry.charts.types import ChartSize\nfrom sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS\nfrom sentry.incidents.charts import build_metric_alert_chart\nfrom sentry.incidents.models import (\n INCIDENT_STATUS,\n AlertRuleThresholdType,\n AlertRuleTriggerAction,\n IncidentStatus,\n TriggerStatus,\n)\nfrom sentry.models.notificationsetting import NotificationSetting\nfrom sentry.models.options.user_option import UserOption\nfrom sentry.models.user import User\nfrom sentry.services.hybrid_cloud.user import RpcUser\nfrom sentry.types.integrations import ExternalProviders\nfrom sentry.utils import json\nfrom sentry.utils.email import MessageBuilder, get_email_addresses\n\n\nclass ActionHandler(metaclass=abc.ABCMeta):\n status_display = {TriggerStatus.ACTIVE: \"Fired\", TriggerStatus.RESOLVED: \"Resolved\"}\n\n def __init__(self, action, incident, project):\n self.action = action\n self.incident = incident\n self.project = project\n\n @abc.abstractmethod\n def fire(self, metric_value: int | float, new_status: IncidentStatus):\n pass\n\n @abc.abstractmethod\n def resolve(self, metric_value: int | float, new_status: IncidentStatus):\n pass\n\n\nclass DefaultActionHandler(ActionHandler):\n def fire(self, metric_value: int | float, new_status: IncidentStatus):\n self.send_alert(metric_value, new_status)\n\n def resolve(self, metric_value: int | float, new_status: IncidentStatus):\n self.send_alert(metric_value, new_status)\n\n @abc.abstractmethod\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n pass\n\n\[email protected]_type(\n \"email\",\n AlertRuleTriggerAction.Type.EMAIL,\n [AlertRuleTriggerAction.TargetType.USER, AlertRuleTriggerAction.TargetType.TEAM],\n)\nclass EmailActionHandler(ActionHandler):\n def _get_targets(self) -> Set[int]:\n target = self.action.target\n if not target:\n return set()\n\n if self.action.target_type == AlertRuleTriggerAction.TargetType.USER.value:\n return {target.id}\n\n elif self.action.target_type == AlertRuleTriggerAction.TargetType.TEAM.value:\n users = NotificationSetting.objects.filter_to_accepting_recipients(\n self.project,\n {RpcUser(id=member.user_id) for member in target.member_set},\n )[ExternalProviders.EMAIL]\n return {user.id for user in users}\n\n return set()\n\n def get_targets(self) -> Sequence[Tuple[int, str]]:\n return list(get_email_addresses(self._get_targets(), project=self.project).items())\n\n def fire(self, metric_value: int | float, new_status: IncidentStatus):\n self.email_users(TriggerStatus.ACTIVE, new_status)\n\n def resolve(self, metric_value: int | float, new_status: IncidentStatus):\n self.email_users(TriggerStatus.RESOLVED, new_status)\n\n def email_users(self, trigger_status: TriggerStatus, incident_status: IncidentStatus) -> None:\n for user_id, email in self.get_targets():\n user = User.objects.get_from_cache(id=user_id)\n email_context = generate_incident_trigger_email_context(\n self.project,\n self.incident,\n self.action.alert_rule_trigger,\n trigger_status,\n incident_status,\n user,\n )\n self.build_message(email_context, trigger_status, user_id).send_async(to=[email])\n\n def build_message(self, context, status, user_id):\n display = self.status_display[status]\n return MessageBuilder(\n subject=\"[{}] {} - {}\".format(\n context[\"status\"], context[\"incident_name\"], self.project.slug\n ),\n template=\"sentry/emails/incidents/trigger.txt\",\n html_template=\"sentry/emails/incidents/trigger.html\",\n type=f\"incident.alert_rule_{display.lower()}\",\n context=context,\n headers={\"X-SMTPAPI\": json.dumps({\"category\": \"metric_alert_email\"})},\n )\n\n\[email protected]_type(\n \"slack\",\n AlertRuleTriggerAction.Type.SLACK,\n [AlertRuleTriggerAction.TargetType.SPECIFIC],\n integration_provider=\"slack\",\n)\nclass SlackActionHandler(DefaultActionHandler):\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n from sentry.integrations.slack.utils import send_incident_alert_notification\n\n send_incident_alert_notification(self.action, self.incident, metric_value, new_status)\n\n\[email protected]_type(\n \"msteams\",\n AlertRuleTriggerAction.Type.MSTEAMS,\n [AlertRuleTriggerAction.TargetType.SPECIFIC],\n integration_provider=\"msteams\",\n)\nclass MsTeamsActionHandler(DefaultActionHandler):\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n from sentry.integrations.msteams.utils import send_incident_alert_notification\n\n send_incident_alert_notification(self.action, self.incident, metric_value, new_status)\n\n\[email protected]_type(\n \"pagerduty\",\n AlertRuleTriggerAction.Type.PAGERDUTY,\n [AlertRuleTriggerAction.TargetType.SPECIFIC],\n integration_provider=\"pagerduty\",\n)\nclass PagerDutyActionHandler(DefaultActionHandler):\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n from sentry.integrations.pagerduty.utils import send_incident_alert_notification\n\n send_incident_alert_notification(self.action, self.incident, metric_value, new_status)\n\n\[email protected]_type(\n \"sentry_app\",\n AlertRuleTriggerAction.Type.SENTRY_APP,\n [AlertRuleTriggerAction.TargetType.SENTRY_APP],\n)\nclass SentryAppActionHandler(DefaultActionHandler):\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n from sentry.rules.actions.notify_event_service import send_incident_alert_notification\n\n send_incident_alert_notification(self.action, self.incident, new_status, metric_value)\n\n\ndef format_duration(minutes):\n \"\"\"\n Format minutes into a duration string\n \"\"\"\n\n if minutes >= 1440:\n days = int(minutes // 1440)\n return f\"{days:d} day{pluralize(days)}\"\n\n if minutes >= 60:\n hours = int(minutes // 60)\n return f\"{hours:d} hour{pluralize(hours)}\"\n\n if minutes >= 1:\n minutes = int(minutes)\n return f\"{minutes:d} minute{pluralize(minutes)}\"\n\n seconds = int(minutes // 60)\n return f\"{seconds:d} second{pluralize(seconds)}\"\n\n\ndef generate_incident_trigger_email_context(\n project,\n incident,\n alert_rule_trigger,\n trigger_status,\n incident_status,\n user=None,\n):\n trigger = alert_rule_trigger\n alert_rule = trigger.alert_rule\n snuba_query = alert_rule.snuba_query\n is_active = trigger_status == TriggerStatus.ACTIVE\n is_threshold_type_above = alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value\n\n # if alert threshold and threshold type is above then show '>'\n # if resolve threshold and threshold type is *BELOW* then show '>'\n # we can simplify this to be the below statement\n show_greater_than_string = is_active == is_threshold_type_above\n environment_string = snuba_query.environment.name if snuba_query.environment else \"All\"\n\n aggregate = alert_rule.snuba_query.aggregate\n if CRASH_RATE_ALERT_AGGREGATE_ALIAS in aggregate:\n aggregate = aggregate.split(f\"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}\")[0].strip()\n\n threshold = trigger.alert_threshold if is_active else alert_rule.resolve_threshold\n if threshold is None:\n # Setting this to trigger threshold because in the case of a resolve if no resolve\n # threshold is specified this will be None. Since we add a comparison sign to the\n # string it makes sense to set this to the trigger alert threshold if no threshold is\n # specified\n threshold = trigger.alert_threshold\n\n chart_url = None\n if features.has(\"organizations:metric-alert-chartcuterie\", incident.organization):\n try:\n chart_url = build_metric_alert_chart(\n organization=incident.organization,\n alert_rule=incident.alert_rule,\n selected_incident=incident,\n size=ChartSize({\"width\": 600, \"height\": 200}),\n )\n except Exception:\n logging.exception(\"Error while attempting to build_metric_alert_chart\")\n\n tz = settings.SENTRY_DEFAULT_TIME_ZONE\n if user is not None:\n user_option_tz = UserOption.objects.get_value(user=user, key=\"timezone\")\n if user_option_tz is not None:\n tz = user_option_tz\n\n organization = incident.organization\n return {\n \"link\": organization.absolute_url(\n reverse(\n \"sentry-metric-alert\",\n kwargs={\n \"organization_slug\": organization.slug,\n \"incident_id\": incident.identifier,\n },\n ),\n query=\"referrer=alert_email\",\n ),\n \"rule_link\": organization.absolute_url(\n reverse(\n \"sentry-alert-rule\",\n kwargs={\n \"organization_slug\": organization.slug,\n \"project_slug\": project.slug,\n \"alert_rule_id\": trigger.alert_rule_id,\n },\n )\n ),\n \"project_slug\": project.slug,\n \"incident_name\": incident.title,\n \"environment\": environment_string,\n \"time_window\": format_duration(snuba_query.time_window / 60),\n \"triggered_at\": incident.date_added,\n \"aggregate\": aggregate,\n \"query\": snuba_query.query,\n \"threshold\": threshold,\n # if alert threshold and threshold type is above then show '>'\n # if resolve threshold and threshold type is *BELOW* then show '>'\n \"threshold_direction_string\": \">\" if show_greater_than_string else \"<\",\n \"status\": INCIDENT_STATUS[incident_status],\n \"status_key\": INCIDENT_STATUS[incident_status].lower(),\n \"is_critical\": incident_status == IncidentStatus.CRITICAL,\n \"is_warning\": incident_status == IncidentStatus.WARNING,\n \"unsubscribe_link\": None,\n \"chart_url\": chart_url,\n \"timezone\": tz,\n }\n", "path": "src/sentry/incidents/action_handlers.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport abc\nimport logging\nfrom typing import Sequence, Set, Tuple\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import pluralize\nfrom django.urls import reverse\n\nfrom sentry import features\nfrom sentry.charts.types import ChartSize\nfrom sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS\nfrom sentry.incidents.charts import build_metric_alert_chart\nfrom sentry.incidents.models import (\n INCIDENT_STATUS,\n AlertRuleThresholdType,\n AlertRuleTriggerAction,\n IncidentStatus,\n TriggerStatus,\n)\nfrom sentry.models.notificationsetting import NotificationSetting\nfrom sentry.models.options.user_option import UserOption\nfrom sentry.models.rulesnooze import RuleSnooze\nfrom sentry.models.user import User\nfrom sentry.services.hybrid_cloud.user import RpcUser\nfrom sentry.types.integrations import ExternalProviders\nfrom sentry.utils import json\nfrom sentry.utils.email import MessageBuilder, get_email_addresses\n\n\nclass ActionHandler(metaclass=abc.ABCMeta):\n status_display = {TriggerStatus.ACTIVE: \"Fired\", TriggerStatus.RESOLVED: \"Resolved\"}\n\n def __init__(self, action, incident, project):\n self.action = action\n self.incident = incident\n self.project = project\n\n @abc.abstractmethod\n def fire(self, metric_value: int | float, new_status: IncidentStatus):\n pass\n\n @abc.abstractmethod\n def resolve(self, metric_value: int | float, new_status: IncidentStatus):\n pass\n\n\nclass DefaultActionHandler(ActionHandler):\n def fire(self, metric_value: int | float, new_status: IncidentStatus):\n if not RuleSnooze.objects.filter(\n alert_rule=self.incident.alert_rule, user_id__isnull=True\n ).exists():\n self.send_alert(metric_value, new_status)\n\n def resolve(self, metric_value: int | float, new_status: IncidentStatus):\n if not RuleSnooze.objects.filter(\n alert_rule=self.incident.alert_rule, user_id__isnull=True\n ).exists():\n self.send_alert(metric_value, new_status)\n\n @abc.abstractmethod\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n pass\n\n\[email protected]_type(\n \"email\",\n AlertRuleTriggerAction.Type.EMAIL,\n [AlertRuleTriggerAction.TargetType.USER, AlertRuleTriggerAction.TargetType.TEAM],\n)\nclass EmailActionHandler(ActionHandler):\n def _get_targets(self) -> Set[int]:\n target = self.action.target\n if not target:\n return set()\n\n if self.action.target_type == AlertRuleTriggerAction.TargetType.USER.value:\n return {target.id}\n\n elif self.action.target_type == AlertRuleTriggerAction.TargetType.TEAM.value:\n users = NotificationSetting.objects.filter_to_accepting_recipients(\n self.project,\n {RpcUser(id=member.user_id) for member in target.member_set},\n )[ExternalProviders.EMAIL]\n return {user.id for user in users}\n\n return set()\n\n def get_targets(self) -> Sequence[Tuple[int, str]]:\n return list(get_email_addresses(self._get_targets(), project=self.project).items())\n\n def fire(self, metric_value: int | float, new_status: IncidentStatus):\n self.email_users(TriggerStatus.ACTIVE, new_status)\n\n def resolve(self, metric_value: int | float, new_status: IncidentStatus):\n self.email_users(TriggerStatus.RESOLVED, new_status)\n\n def email_users(self, trigger_status: TriggerStatus, incident_status: IncidentStatus) -> None:\n for user_id, email in self.get_targets():\n user = User.objects.get_from_cache(id=user_id)\n email_context = generate_incident_trigger_email_context(\n self.project,\n self.incident,\n self.action.alert_rule_trigger,\n trigger_status,\n incident_status,\n user,\n )\n self.build_message(email_context, trigger_status, user_id).send_async(to=[email])\n\n def build_message(self, context, status, user_id):\n display = self.status_display[status]\n return MessageBuilder(\n subject=\"[{}] {} - {}\".format(\n context[\"status\"], context[\"incident_name\"], self.project.slug\n ),\n template=\"sentry/emails/incidents/trigger.txt\",\n html_template=\"sentry/emails/incidents/trigger.html\",\n type=f\"incident.alert_rule_{display.lower()}\",\n context=context,\n headers={\"X-SMTPAPI\": json.dumps({\"category\": \"metric_alert_email\"})},\n )\n\n\[email protected]_type(\n \"slack\",\n AlertRuleTriggerAction.Type.SLACK,\n [AlertRuleTriggerAction.TargetType.SPECIFIC],\n integration_provider=\"slack\",\n)\nclass SlackActionHandler(DefaultActionHandler):\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n from sentry.integrations.slack.utils import send_incident_alert_notification\n\n send_incident_alert_notification(self.action, self.incident, metric_value, new_status)\n\n\[email protected]_type(\n \"msteams\",\n AlertRuleTriggerAction.Type.MSTEAMS,\n [AlertRuleTriggerAction.TargetType.SPECIFIC],\n integration_provider=\"msteams\",\n)\nclass MsTeamsActionHandler(DefaultActionHandler):\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n from sentry.integrations.msteams.utils import send_incident_alert_notification\n\n send_incident_alert_notification(self.action, self.incident, metric_value, new_status)\n\n\[email protected]_type(\n \"pagerduty\",\n AlertRuleTriggerAction.Type.PAGERDUTY,\n [AlertRuleTriggerAction.TargetType.SPECIFIC],\n integration_provider=\"pagerduty\",\n)\nclass PagerDutyActionHandler(DefaultActionHandler):\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n from sentry.integrations.pagerduty.utils import send_incident_alert_notification\n\n send_incident_alert_notification(self.action, self.incident, metric_value, new_status)\n\n\[email protected]_type(\n \"sentry_app\",\n AlertRuleTriggerAction.Type.SENTRY_APP,\n [AlertRuleTriggerAction.TargetType.SENTRY_APP],\n)\nclass SentryAppActionHandler(DefaultActionHandler):\n def send_alert(self, metric_value: int | float, new_status: IncidentStatus):\n from sentry.rules.actions.notify_event_service import send_incident_alert_notification\n\n send_incident_alert_notification(self.action, self.incident, new_status, metric_value)\n\n\ndef format_duration(minutes):\n \"\"\"\n Format minutes into a duration string\n \"\"\"\n\n if minutes >= 1440:\n days = int(minutes // 1440)\n return f\"{days:d} day{pluralize(days)}\"\n\n if minutes >= 60:\n hours = int(minutes // 60)\n return f\"{hours:d} hour{pluralize(hours)}\"\n\n if minutes >= 1:\n minutes = int(minutes)\n return f\"{minutes:d} minute{pluralize(minutes)}\"\n\n seconds = int(minutes // 60)\n return f\"{seconds:d} second{pluralize(seconds)}\"\n\n\ndef generate_incident_trigger_email_context(\n project,\n incident,\n alert_rule_trigger,\n trigger_status,\n incident_status,\n user=None,\n):\n trigger = alert_rule_trigger\n alert_rule = trigger.alert_rule\n snuba_query = alert_rule.snuba_query\n is_active = trigger_status == TriggerStatus.ACTIVE\n is_threshold_type_above = alert_rule.threshold_type == AlertRuleThresholdType.ABOVE.value\n\n # if alert threshold and threshold type is above then show '>'\n # if resolve threshold and threshold type is *BELOW* then show '>'\n # we can simplify this to be the below statement\n show_greater_than_string = is_active == is_threshold_type_above\n environment_string = snuba_query.environment.name if snuba_query.environment else \"All\"\n\n aggregate = alert_rule.snuba_query.aggregate\n if CRASH_RATE_ALERT_AGGREGATE_ALIAS in aggregate:\n aggregate = aggregate.split(f\"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}\")[0].strip()\n\n threshold = trigger.alert_threshold if is_active else alert_rule.resolve_threshold\n if threshold is None:\n # Setting this to trigger threshold because in the case of a resolve if no resolve\n # threshold is specified this will be None. Since we add a comparison sign to the\n # string it makes sense to set this to the trigger alert threshold if no threshold is\n # specified\n threshold = trigger.alert_threshold\n\n chart_url = None\n if features.has(\"organizations:metric-alert-chartcuterie\", incident.organization):\n try:\n chart_url = build_metric_alert_chart(\n organization=incident.organization,\n alert_rule=incident.alert_rule,\n selected_incident=incident,\n size=ChartSize({\"width\": 600, \"height\": 200}),\n )\n except Exception:\n logging.exception(\"Error while attempting to build_metric_alert_chart\")\n\n tz = settings.SENTRY_DEFAULT_TIME_ZONE\n if user is not None:\n user_option_tz = UserOption.objects.get_value(user=user, key=\"timezone\")\n if user_option_tz is not None:\n tz = user_option_tz\n\n organization = incident.organization\n return {\n \"link\": organization.absolute_url(\n reverse(\n \"sentry-metric-alert\",\n kwargs={\n \"organization_slug\": organization.slug,\n \"incident_id\": incident.identifier,\n },\n ),\n query=\"referrer=alert_email\",\n ),\n \"rule_link\": organization.absolute_url(\n reverse(\n \"sentry-alert-rule\",\n kwargs={\n \"organization_slug\": organization.slug,\n \"project_slug\": project.slug,\n \"alert_rule_id\": trigger.alert_rule_id,\n },\n )\n ),\n \"project_slug\": project.slug,\n \"incident_name\": incident.title,\n \"environment\": environment_string,\n \"time_window\": format_duration(snuba_query.time_window / 60),\n \"triggered_at\": incident.date_added,\n \"aggregate\": aggregate,\n \"query\": snuba_query.query,\n \"threshold\": threshold,\n # if alert threshold and threshold type is above then show '>'\n # if resolve threshold and threshold type is *BELOW* then show '>'\n \"threshold_direction_string\": \">\" if show_greater_than_string else \"<\",\n \"status\": INCIDENT_STATUS[incident_status],\n \"status_key\": INCIDENT_STATUS[incident_status].lower(),\n \"is_critical\": incident_status == IncidentStatus.CRITICAL,\n \"is_warning\": incident_status == IncidentStatus.WARNING,\n \"unsubscribe_link\": None,\n \"chart_url\": chart_url,\n \"timezone\": tz,\n }\n", "path": "src/sentry/incidents/action_handlers.py"}]} | 3,370 | 318 |
gh_patches_debug_31861 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSpeech-28 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need adapt demo_server.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deploy/demo_server.py`
Content:
```
1 """Server-end for the ASR demo."""
2 import os
3 import time
4 import random
5 import argparse
6 import functools
7 from time import gmtime, strftime
8 import SocketServer
9 import struct
10 import wave
11 import paddle.v2 as paddle
12 import _init_paths
13 from data_utils.data import DataGenerator
14 from model_utils.model import DeepSpeech2Model
15 from data_utils.utility import read_manifest
16 from utils.utility import add_arguments, print_arguments
17
18 parser = argparse.ArgumentParser(description=__doc__)
19 add_arg = functools.partial(add_arguments, argparser=parser)
20 # yapf: disable
21 add_arg('host_port', int, 8086, "Server's IP port.")
22 add_arg('beam_size', int, 500, "Beam search width.")
23 add_arg('num_conv_layers', int, 2, "# of convolution layers.")
24 add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
25 add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
26 add_arg('alpha', float, 2.15, "Coef of LM for beam search.")
27 add_arg('beta', float, 0.35, "Coef of WC for beam search.")
28 add_arg('cutoff_prob', float, 1.0, "Cutoff probability for pruning.")
29 add_arg('cutoff_top_n', int, 40, "Cutoff number for pruning.")
30 add_arg('use_gru', bool, False, "Use GRUs instead of simple RNNs.")
31 add_arg('use_gpu', bool, True, "Use GPU or not.")
32 add_arg('share_rnn_weights',bool, True, "Share input-hidden weights across "
33 "bi-directional RNNs. Not for GRU.")
34 add_arg('host_ip', str,
35 'localhost',
36 "Server's IP address.")
37 add_arg('speech_save_dir', str,
38 'demo_cache',
39 "Directory to save demo audios.")
40 add_arg('warmup_manifest', str,
41 'data/librispeech/manifest.test-clean',
42 "Filepath of manifest to warm up.")
43 add_arg('mean_std_path', str,
44 'data/librispeech/mean_std.npz',
45 "Filepath of normalizer's mean & std.")
46 add_arg('vocab_path', str,
47 'data/librispeech/eng_vocab.txt',
48 "Filepath of vocabulary.")
49 add_arg('model_path', str,
50 './checkpoints/libri/params.latest.tar.gz',
51 "If None, the training starts from scratch, "
52 "otherwise, it resumes from the pre-trained model.")
53 add_arg('lang_model_path', str,
54 'lm/data/common_crawl_00.prune01111.trie.klm',
55 "Filepath for language model.")
56 add_arg('decoding_method', str,
57 'ctc_beam_search',
58 "Decoding method. Options: ctc_beam_search, ctc_greedy",
59 choices = ['ctc_beam_search', 'ctc_greedy'])
60 add_arg('specgram_type', str,
61 'linear',
62 "Audio feature type. Options: linear, mfcc.",
63 choices=['linear', 'mfcc'])
64 # yapf: disable
65 args = parser.parse_args()
66
67
68 class AsrTCPServer(SocketServer.TCPServer):
69 """The ASR TCP Server."""
70
71 def __init__(self,
72 server_address,
73 RequestHandlerClass,
74 speech_save_dir,
75 audio_process_handler,
76 bind_and_activate=True):
77 self.speech_save_dir = speech_save_dir
78 self.audio_process_handler = audio_process_handler
79 SocketServer.TCPServer.__init__(
80 self, server_address, RequestHandlerClass, bind_and_activate=True)
81
82
83 class AsrRequestHandler(SocketServer.BaseRequestHandler):
84 """The ASR request handler."""
85
86 def handle(self):
87 # receive data through TCP socket
88 chunk = self.request.recv(1024)
89 target_len = struct.unpack('>i', chunk[:4])[0]
90 data = chunk[4:]
91 while len(data) < target_len:
92 chunk = self.request.recv(1024)
93 data += chunk
94 # write to file
95 filename = self._write_to_file(data)
96
97 print("Received utterance[length=%d] from %s, saved to %s." %
98 (len(data), self.client_address[0], filename))
99 start_time = time.time()
100 transcript = self.server.audio_process_handler(filename)
101 finish_time = time.time()
102 print("Response Time: %f, Transcript: %s" %
103 (finish_time - start_time, transcript))
104 self.request.sendall(transcript.encode('utf-8'))
105
106 def _write_to_file(self, data):
107 # prepare save dir and filename
108 if not os.path.exists(self.server.speech_save_dir):
109 os.mkdir(self.server.speech_save_dir)
110 timestamp = strftime("%Y%m%d%H%M%S", gmtime())
111 out_filename = os.path.join(
112 self.server.speech_save_dir,
113 timestamp + "_" + self.client_address[0] + ".wav")
114 # write to wav file
115 file = wave.open(out_filename, 'wb')
116 file.setnchannels(1)
117 file.setsampwidth(4)
118 file.setframerate(16000)
119 file.writeframes(data)
120 file.close()
121 return out_filename
122
123
124 def warm_up_test(audio_process_handler,
125 manifest_path,
126 num_test_cases,
127 random_seed=0):
128 """Warming-up test."""
129 manifest = read_manifest(manifest_path)
130 rng = random.Random(random_seed)
131 samples = rng.sample(manifest, num_test_cases)
132 for idx, sample in enumerate(samples):
133 print("Warm-up Test Case %d: %s", idx, sample['audio_filepath'])
134 start_time = time.time()
135 transcript = audio_process_handler(sample['audio_filepath'])
136 finish_time = time.time()
137 print("Response Time: %f, Transcript: %s" %
138 (finish_time - start_time, transcript))
139
140
141 def start_server():
142 """Start the ASR server"""
143 # prepare data generator
144 data_generator = DataGenerator(
145 vocab_filepath=args.vocab_path,
146 mean_std_filepath=args.mean_std_path,
147 augmentation_config='{}',
148 specgram_type=args.specgram_type,
149 num_threads=1,
150 keep_transcription_text=True)
151 # prepare ASR model
152 ds2_model = DeepSpeech2Model(
153 vocab_size=data_generator.vocab_size,
154 num_conv_layers=args.num_conv_layers,
155 num_rnn_layers=args.num_rnn_layers,
156 rnn_layer_size=args.rnn_layer_size,
157 use_gru=args.use_gru,
158 pretrained_model_path=args.model_path,
159 share_rnn_weights=args.share_rnn_weights)
160
161 vocab_list = [chars.encode("utf-8") for chars in data_generator.vocab_list]
162
163 # prepare ASR inference handler
164 def file_to_transcript(filename):
165 feature = data_generator.process_utterance(filename, "")
166 result_transcript = ds2_model.infer_batch(
167 infer_data=[feature],
168 decoding_method=args.decoding_method,
169 beam_alpha=args.alpha,
170 beam_beta=args.beta,
171 beam_size=args.beam_size,
172 cutoff_prob=args.cutoff_prob,
173 cutoff_top_n=args.cutoff_top_n,
174 vocab_list=vocab_list,
175 language_model_path=args.lang_model_path,
176 num_processes=1)
177 return result_transcript[0]
178
179 # warming up with utterrances sampled from Librispeech
180 print('-----------------------------------------------------------')
181 print('Warming up ...')
182 warm_up_test(
183 audio_process_handler=file_to_transcript,
184 manifest_path=args.warmup_manifest,
185 num_test_cases=3)
186 print('-----------------------------------------------------------')
187
188 # start the server
189 server = AsrTCPServer(
190 server_address=(args.host_ip, args.host_port),
191 RequestHandlerClass=AsrRequestHandler,
192 speech_save_dir=args.speech_save_dir,
193 audio_process_handler=file_to_transcript)
194 print("ASR Server Started.")
195 server.serve_forever()
196
197
198 def main():
199 print_arguments(args)
200 paddle.init(use_gpu=args.use_gpu, trainer_count=1)
201 start_server()
202
203
204 if __name__ == "__main__":
205 main()
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deploy/demo_server.py b/deploy/demo_server.py
--- a/deploy/demo_server.py
+++ b/deploy/demo_server.py
@@ -147,7 +147,8 @@
augmentation_config='{}',
specgram_type=args.specgram_type,
num_threads=1,
- keep_transcription_text=True)
+ keep_transcription_text=True,
+ num_conv_layers=args.num_conv_layers)
# prepare ASR model
ds2_model = DeepSpeech2Model(
vocab_size=data_generator.vocab_size,
@@ -163,8 +164,20 @@
# prepare ASR inference handler
def file_to_transcript(filename):
feature = data_generator.process_utterance(filename, "")
+ ins = []
+ conv0_h = (feature[0].shape[0] - 1) // 2 + 1
+ conv0_w = (feature[0].shape[1] - 1) // 3 + 1
+ ins += [feature[0], feature[1],
+ [0], [conv0_w],
+ [1, 32, 1, conv0_h, conv0_w + 1, conv0_w]]
+ pre_h = conv0_h
+ for i in xrange(args.num_conv_layers - 1):
+ h = (pre_h - 1) // 2 + 1
+ pre_h = h
+ ins += [[1, 32, 1, h, conv0_w + 1, conv0_w]]
+
result_transcript = ds2_model.infer_batch(
- infer_data=[feature],
+ infer_data=[ins],
decoding_method=args.decoding_method,
beam_alpha=args.alpha,
beam_beta=args.beta,
@@ -173,7 +186,8 @@
cutoff_top_n=args.cutoff_top_n,
vocab_list=vocab_list,
language_model_path=args.lang_model_path,
- num_processes=1)
+ num_processes=1,
+ feeding_dict=data_generator.feeding)
return result_transcript[0]
# warming up with utterrances sampled from Librispeech
| {"golden_diff": "diff --git a/deploy/demo_server.py b/deploy/demo_server.py\n--- a/deploy/demo_server.py\n+++ b/deploy/demo_server.py\n@@ -147,7 +147,8 @@\n augmentation_config='{}',\n specgram_type=args.specgram_type,\n num_threads=1,\n- keep_transcription_text=True)\n+ keep_transcription_text=True,\n+ num_conv_layers=args.num_conv_layers)\n # prepare ASR model\n ds2_model = DeepSpeech2Model(\n vocab_size=data_generator.vocab_size,\n@@ -163,8 +164,20 @@\n # prepare ASR inference handler\n def file_to_transcript(filename):\n feature = data_generator.process_utterance(filename, \"\")\n+ ins = []\n+ conv0_h = (feature[0].shape[0] - 1) // 2 + 1\n+ conv0_w = (feature[0].shape[1] - 1) // 3 + 1\n+ ins += [feature[0], feature[1],\n+ [0], [conv0_w],\n+ [1, 32, 1, conv0_h, conv0_w + 1, conv0_w]]\n+ pre_h = conv0_h\n+ for i in xrange(args.num_conv_layers - 1):\n+ h = (pre_h - 1) // 2 + 1\n+ pre_h = h\n+ ins += [[1, 32, 1, h, conv0_w + 1, conv0_w]]\n+\n result_transcript = ds2_model.infer_batch(\n- infer_data=[feature],\n+ infer_data=[ins],\n decoding_method=args.decoding_method,\n beam_alpha=args.alpha,\n beam_beta=args.beta,\n@@ -173,7 +186,8 @@\n cutoff_top_n=args.cutoff_top_n,\n vocab_list=vocab_list,\n language_model_path=args.lang_model_path,\n- num_processes=1)\n+ num_processes=1,\n+ feeding_dict=data_generator.feeding)\n return result_transcript[0]\n \n # warming up with utterrances sampled from Librispeech\n", "issue": "Need adapt demo_server.py\n\n", "before_files": [{"content": "\"\"\"Server-end for the ASR demo.\"\"\"\nimport os\nimport time\nimport random\nimport argparse\nimport functools\nfrom time import gmtime, strftime\nimport SocketServer\nimport struct\nimport wave\nimport paddle.v2 as paddle\nimport _init_paths\nfrom data_utils.data import DataGenerator\nfrom model_utils.model import DeepSpeech2Model\nfrom data_utils.utility import read_manifest\nfrom utils.utility import add_arguments, print_arguments\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('host_port', int, 8086, \"Server's IP port.\")\nadd_arg('beam_size', int, 500, \"Beam search width.\")\nadd_arg('num_conv_layers', int, 2, \"# of convolution layers.\")\nadd_arg('num_rnn_layers', int, 3, \"# of recurrent layers.\")\nadd_arg('rnn_layer_size', int, 2048, \"# of recurrent cells per layer.\")\nadd_arg('alpha', float, 2.15, \"Coef of LM for beam search.\")\nadd_arg('beta', float, 0.35, \"Coef of WC for beam search.\")\nadd_arg('cutoff_prob', float, 1.0, \"Cutoff probability for pruning.\")\nadd_arg('cutoff_top_n', int, 40, \"Cutoff number for pruning.\")\nadd_arg('use_gru', bool, False, \"Use GRUs instead of simple RNNs.\")\nadd_arg('use_gpu', bool, True, \"Use GPU or not.\")\nadd_arg('share_rnn_weights',bool, True, \"Share input-hidden weights across \"\n \"bi-directional RNNs. Not for GRU.\")\nadd_arg('host_ip', str,\n 'localhost',\n \"Server's IP address.\")\nadd_arg('speech_save_dir', str,\n 'demo_cache',\n \"Directory to save demo audios.\")\nadd_arg('warmup_manifest', str,\n 'data/librispeech/manifest.test-clean',\n \"Filepath of manifest to warm up.\")\nadd_arg('mean_std_path', str,\n 'data/librispeech/mean_std.npz',\n \"Filepath of normalizer's mean & std.\")\nadd_arg('vocab_path', str,\n 'data/librispeech/eng_vocab.txt',\n \"Filepath of vocabulary.\")\nadd_arg('model_path', str,\n './checkpoints/libri/params.latest.tar.gz',\n \"If None, the training starts from scratch, \"\n \"otherwise, it resumes from the pre-trained model.\")\nadd_arg('lang_model_path', str,\n 'lm/data/common_crawl_00.prune01111.trie.klm',\n \"Filepath for language model.\")\nadd_arg('decoding_method', str,\n 'ctc_beam_search',\n \"Decoding method. Options: ctc_beam_search, ctc_greedy\",\n choices = ['ctc_beam_search', 'ctc_greedy'])\nadd_arg('specgram_type', str,\n 'linear',\n \"Audio feature type. Options: linear, mfcc.\",\n choices=['linear', 'mfcc'])\n# yapf: disable\nargs = parser.parse_args()\n\n\nclass AsrTCPServer(SocketServer.TCPServer):\n \"\"\"The ASR TCP Server.\"\"\"\n\n def __init__(self,\n server_address,\n RequestHandlerClass,\n speech_save_dir,\n audio_process_handler,\n bind_and_activate=True):\n self.speech_save_dir = speech_save_dir\n self.audio_process_handler = audio_process_handler\n SocketServer.TCPServer.__init__(\n self, server_address, RequestHandlerClass, bind_and_activate=True)\n\n\nclass AsrRequestHandler(SocketServer.BaseRequestHandler):\n \"\"\"The ASR request handler.\"\"\"\n\n def handle(self):\n # receive data through TCP socket\n chunk = self.request.recv(1024)\n target_len = struct.unpack('>i', chunk[:4])[0]\n data = chunk[4:]\n while len(data) < target_len:\n chunk = self.request.recv(1024)\n data += chunk\n # write to file\n filename = self._write_to_file(data)\n\n print(\"Received utterance[length=%d] from %s, saved to %s.\" %\n (len(data), self.client_address[0], filename))\n start_time = time.time()\n transcript = self.server.audio_process_handler(filename)\n finish_time = time.time()\n print(\"Response Time: %f, Transcript: %s\" %\n (finish_time - start_time, transcript))\n self.request.sendall(transcript.encode('utf-8'))\n\n def _write_to_file(self, data):\n # prepare save dir and filename\n if not os.path.exists(self.server.speech_save_dir):\n os.mkdir(self.server.speech_save_dir)\n timestamp = strftime(\"%Y%m%d%H%M%S\", gmtime())\n out_filename = os.path.join(\n self.server.speech_save_dir,\n timestamp + \"_\" + self.client_address[0] + \".wav\")\n # write to wav file\n file = wave.open(out_filename, 'wb')\n file.setnchannels(1)\n file.setsampwidth(4)\n file.setframerate(16000)\n file.writeframes(data)\n file.close()\n return out_filename\n\n\ndef warm_up_test(audio_process_handler,\n manifest_path,\n num_test_cases,\n random_seed=0):\n \"\"\"Warming-up test.\"\"\"\n manifest = read_manifest(manifest_path)\n rng = random.Random(random_seed)\n samples = rng.sample(manifest, num_test_cases)\n for idx, sample in enumerate(samples):\n print(\"Warm-up Test Case %d: %s\", idx, sample['audio_filepath'])\n start_time = time.time()\n transcript = audio_process_handler(sample['audio_filepath'])\n finish_time = time.time()\n print(\"Response Time: %f, Transcript: %s\" %\n (finish_time - start_time, transcript))\n\n\ndef start_server():\n \"\"\"Start the ASR server\"\"\"\n # prepare data generator\n data_generator = DataGenerator(\n vocab_filepath=args.vocab_path,\n mean_std_filepath=args.mean_std_path,\n augmentation_config='{}',\n specgram_type=args.specgram_type,\n num_threads=1,\n keep_transcription_text=True)\n # prepare ASR model\n ds2_model = DeepSpeech2Model(\n vocab_size=data_generator.vocab_size,\n num_conv_layers=args.num_conv_layers,\n num_rnn_layers=args.num_rnn_layers,\n rnn_layer_size=args.rnn_layer_size,\n use_gru=args.use_gru,\n pretrained_model_path=args.model_path,\n share_rnn_weights=args.share_rnn_weights)\n\n vocab_list = [chars.encode(\"utf-8\") for chars in data_generator.vocab_list]\n\n # prepare ASR inference handler\n def file_to_transcript(filename):\n feature = data_generator.process_utterance(filename, \"\")\n result_transcript = ds2_model.infer_batch(\n infer_data=[feature],\n decoding_method=args.decoding_method,\n beam_alpha=args.alpha,\n beam_beta=args.beta,\n beam_size=args.beam_size,\n cutoff_prob=args.cutoff_prob,\n cutoff_top_n=args.cutoff_top_n,\n vocab_list=vocab_list,\n language_model_path=args.lang_model_path,\n num_processes=1)\n return result_transcript[0]\n\n # warming up with utterrances sampled from Librispeech\n print('-----------------------------------------------------------')\n print('Warming up ...')\n warm_up_test(\n audio_process_handler=file_to_transcript,\n manifest_path=args.warmup_manifest,\n num_test_cases=3)\n print('-----------------------------------------------------------')\n\n # start the server\n server = AsrTCPServer(\n server_address=(args.host_ip, args.host_port),\n RequestHandlerClass=AsrRequestHandler,\n speech_save_dir=args.speech_save_dir,\n audio_process_handler=file_to_transcript)\n print(\"ASR Server Started.\")\n server.serve_forever()\n\n\ndef main():\n print_arguments(args)\n paddle.init(use_gpu=args.use_gpu, trainer_count=1)\n start_server()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "deploy/demo_server.py"}], "after_files": [{"content": "\"\"\"Server-end for the ASR demo.\"\"\"\nimport os\nimport time\nimport random\nimport argparse\nimport functools\nfrom time import gmtime, strftime\nimport SocketServer\nimport struct\nimport wave\nimport paddle.v2 as paddle\nimport _init_paths\nfrom data_utils.data import DataGenerator\nfrom model_utils.model import DeepSpeech2Model\nfrom data_utils.utility import read_manifest\nfrom utils.utility import add_arguments, print_arguments\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('host_port', int, 8086, \"Server's IP port.\")\nadd_arg('beam_size', int, 500, \"Beam search width.\")\nadd_arg('num_conv_layers', int, 2, \"# of convolution layers.\")\nadd_arg('num_rnn_layers', int, 3, \"# of recurrent layers.\")\nadd_arg('rnn_layer_size', int, 2048, \"# of recurrent cells per layer.\")\nadd_arg('alpha', float, 2.15, \"Coef of LM for beam search.\")\nadd_arg('beta', float, 0.35, \"Coef of WC for beam search.\")\nadd_arg('cutoff_prob', float, 1.0, \"Cutoff probability for pruning.\")\nadd_arg('cutoff_top_n', int, 40, \"Cutoff number for pruning.\")\nadd_arg('use_gru', bool, False, \"Use GRUs instead of simple RNNs.\")\nadd_arg('use_gpu', bool, True, \"Use GPU or not.\")\nadd_arg('share_rnn_weights',bool, True, \"Share input-hidden weights across \"\n \"bi-directional RNNs. Not for GRU.\")\nadd_arg('host_ip', str,\n 'localhost',\n \"Server's IP address.\")\nadd_arg('speech_save_dir', str,\n 'demo_cache',\n \"Directory to save demo audios.\")\nadd_arg('warmup_manifest', str,\n 'data/librispeech/manifest.test-clean',\n \"Filepath of manifest to warm up.\")\nadd_arg('mean_std_path', str,\n 'data/librispeech/mean_std.npz',\n \"Filepath of normalizer's mean & std.\")\nadd_arg('vocab_path', str,\n 'data/librispeech/eng_vocab.txt',\n \"Filepath of vocabulary.\")\nadd_arg('model_path', str,\n './checkpoints/libri/params.latest.tar.gz',\n \"If None, the training starts from scratch, \"\n \"otherwise, it resumes from the pre-trained model.\")\nadd_arg('lang_model_path', str,\n 'lm/data/common_crawl_00.prune01111.trie.klm',\n \"Filepath for language model.\")\nadd_arg('decoding_method', str,\n 'ctc_beam_search',\n \"Decoding method. Options: ctc_beam_search, ctc_greedy\",\n choices = ['ctc_beam_search', 'ctc_greedy'])\nadd_arg('specgram_type', str,\n 'linear',\n \"Audio feature type. Options: linear, mfcc.\",\n choices=['linear', 'mfcc'])\n# yapf: disable\nargs = parser.parse_args()\n\n\nclass AsrTCPServer(SocketServer.TCPServer):\n \"\"\"The ASR TCP Server.\"\"\"\n\n def __init__(self,\n server_address,\n RequestHandlerClass,\n speech_save_dir,\n audio_process_handler,\n bind_and_activate=True):\n self.speech_save_dir = speech_save_dir\n self.audio_process_handler = audio_process_handler\n SocketServer.TCPServer.__init__(\n self, server_address, RequestHandlerClass, bind_and_activate=True)\n\n\nclass AsrRequestHandler(SocketServer.BaseRequestHandler):\n \"\"\"The ASR request handler.\"\"\"\n\n def handle(self):\n # receive data through TCP socket\n chunk = self.request.recv(1024)\n target_len = struct.unpack('>i', chunk[:4])[0]\n data = chunk[4:]\n while len(data) < target_len:\n chunk = self.request.recv(1024)\n data += chunk\n # write to file\n filename = self._write_to_file(data)\n\n print(\"Received utterance[length=%d] from %s, saved to %s.\" %\n (len(data), self.client_address[0], filename))\n start_time = time.time()\n transcript = self.server.audio_process_handler(filename)\n finish_time = time.time()\n print(\"Response Time: %f, Transcript: %s\" %\n (finish_time - start_time, transcript))\n self.request.sendall(transcript.encode('utf-8'))\n\n def _write_to_file(self, data):\n # prepare save dir and filename\n if not os.path.exists(self.server.speech_save_dir):\n os.mkdir(self.server.speech_save_dir)\n timestamp = strftime(\"%Y%m%d%H%M%S\", gmtime())\n out_filename = os.path.join(\n self.server.speech_save_dir,\n timestamp + \"_\" + self.client_address[0] + \".wav\")\n # write to wav file\n file = wave.open(out_filename, 'wb')\n file.setnchannels(1)\n file.setsampwidth(4)\n file.setframerate(16000)\n file.writeframes(data)\n file.close()\n return out_filename\n\n\ndef warm_up_test(audio_process_handler,\n manifest_path,\n num_test_cases,\n random_seed=0):\n \"\"\"Warming-up test.\"\"\"\n manifest = read_manifest(manifest_path)\n rng = random.Random(random_seed)\n samples = rng.sample(manifest, num_test_cases)\n for idx, sample in enumerate(samples):\n print(\"Warm-up Test Case %d: %s\", idx, sample['audio_filepath'])\n start_time = time.time()\n transcript = audio_process_handler(sample['audio_filepath'])\n finish_time = time.time()\n print(\"Response Time: %f, Transcript: %s\" %\n (finish_time - start_time, transcript))\n\n\ndef start_server():\n \"\"\"Start the ASR server\"\"\"\n # prepare data generator\n data_generator = DataGenerator(\n vocab_filepath=args.vocab_path,\n mean_std_filepath=args.mean_std_path,\n augmentation_config='{}',\n specgram_type=args.specgram_type,\n num_threads=1,\n keep_transcription_text=True,\n num_conv_layers=args.num_conv_layers)\n # prepare ASR model\n ds2_model = DeepSpeech2Model(\n vocab_size=data_generator.vocab_size,\n num_conv_layers=args.num_conv_layers,\n num_rnn_layers=args.num_rnn_layers,\n rnn_layer_size=args.rnn_layer_size,\n use_gru=args.use_gru,\n pretrained_model_path=args.model_path,\n share_rnn_weights=args.share_rnn_weights)\n\n vocab_list = [chars.encode(\"utf-8\") for chars in data_generator.vocab_list]\n\n # prepare ASR inference handler\n def file_to_transcript(filename):\n feature = data_generator.process_utterance(filename, \"\")\n ins = []\n conv0_h = (feature[0].shape[0] - 1) // 2 + 1\n conv0_w = (feature[0].shape[1] - 1) // 3 + 1\n ins += [feature[0], feature[1],\n [0], [conv0_w],\n [1, 32, 1, conv0_h, conv0_w + 1, conv0_w]]\n pre_h = conv0_h\n for i in xrange(args.num_conv_layers - 1):\n h = (pre_h - 1) // 2 + 1\n pre_h = h\n ins += [[1, 32, 1, h, conv0_w + 1, conv0_w]]\n\n result_transcript = ds2_model.infer_batch(\n infer_data=[ins],\n decoding_method=args.decoding_method,\n beam_alpha=args.alpha,\n beam_beta=args.beta,\n beam_size=args.beam_size,\n cutoff_prob=args.cutoff_prob,\n cutoff_top_n=args.cutoff_top_n,\n vocab_list=vocab_list,\n language_model_path=args.lang_model_path,\n num_processes=1,\n feeding_dict=data_generator.feeding)\n return result_transcript[0]\n\n # warming up with utterrances sampled from Librispeech\n print('-----------------------------------------------------------')\n print('Warming up ...')\n warm_up_test(\n audio_process_handler=file_to_transcript,\n manifest_path=args.warmup_manifest,\n num_test_cases=3)\n print('-----------------------------------------------------------')\n\n # start the server\n server = AsrTCPServer(\n server_address=(args.host_ip, args.host_port),\n RequestHandlerClass=AsrRequestHandler,\n speech_save_dir=args.speech_save_dir,\n audio_process_handler=file_to_transcript)\n print(\"ASR Server Started.\")\n server.serve_forever()\n\n\ndef main():\n print_arguments(args)\n paddle.init(use_gpu=args.use_gpu, trainer_count=1)\n start_server()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "deploy/demo_server.py"}]} | 2,567 | 475 |
gh_patches_debug_2530 | rasdani/github-patches | git_diff | cloudtools__troposphere-1683 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ApiGatewayV2 is missing VpcLink
We appear to be unable to create an HTTP Vpc Link, as the ApiGatewayV2 Vpc Link is not yet implemented.
From the AWS documentation:
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-vpclink.html
From the Troposphere codebase, VpcLink is missing.
https://github.com/cloudtools/troposphere/blob/master/troposphere/apigatewayv2.py
Please note that this is not the same thing as the ApiGateway VpcLink, as that does not support Http Vpc Links.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/apigatewayv2.py`
Content:
```
1 # Copyright (c) 2012-2019, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5 #
6 # *** Do not modify - this file is autogenerated ***
7 # Resource specification version: 10.0.0
8
9
10 from . import AWSObject
11 from . import AWSProperty
12 from .validators import boolean
13 from .validators import integer
14 from .validators import integer_range
15 from .validators import positive_integer
16 from .validators import double
17 from .validators import json_checker
18
19
20 def validate_integration_type(integration_type):
21
22 valid_integration_types = [
23 "AWS",
24 "AWS_PROXY",
25 "HTTP",
26 "HTTP_PROXY",
27 "MOCK"
28 ]
29 if integration_type not in valid_integration_types:
30 raise ValueError(
31 "{} is not a valid IntegrationType".format(
32 integration_type)
33 )
34 return integration_type
35
36
37 def validate_authorizer_type(authorizer_type):
38
39 valid_authorizer_types = [
40 "REQUEST"
41 ]
42 if authorizer_type not in valid_authorizer_types:
43 raise ValueError(
44 "{} is not a valid AuthorizerType".format(
45 authorizer_type)
46 )
47 return authorizer_type
48
49
50 def validate_logging_level(logging_level):
51
52 valid_logging_levels = [
53 "WARN",
54 "INFO",
55 "DEBUG"
56 ]
57 if logging_level not in valid_logging_levels:
58 raise ValueError(
59 "{} is not a valid LoggingLevel".format(
60 logging_level)
61 )
62 return logging_level
63
64
65 def validate_passthrough_behavior(passthrough_behavior):
66
67 valid_passthrough_behaviors = [
68 "WHEN_NO_MATCH",
69 "WHEN_NO_TEMPLATES",
70 "NEVER"
71 ]
72 if passthrough_behavior not in valid_passthrough_behaviors:
73 raise ValueError(
74 "{} is not a valid PassthroughBehavior".format(
75 passthrough_behavior)
76 )
77 return passthrough_behavior
78
79
80 def validate_content_handling_strategy(content_handling_strategy):
81
82 valid_handling_strategy_values = [
83 "CONVERT_TO_TEXT",
84 "CONVERT_TO_BINARY"
85 ]
86 if content_handling_strategy not in valid_handling_strategy_values:
87 raise ValueError(
88 "{} is not a valid ContentHandlingStrategy".format(
89 content_handling_strategy)
90 )
91 return content_handling_strategy
92
93
94 def validate_authorizer_ttl(ttl_value):
95 """ Validate authorizer ttl timeout
96 :param ttl_value: The TTL timeout in seconds
97 :return: The provided TTL value if valid
98 """
99 ttl_value = int(positive_integer(ttl_value))
100 if ttl_value > 3600:
101 raise ValueError("The AuthorizerResultTtlInSeconds should be <= 3600")
102 return ttl_value
103
104
105 class BodyS3Location(AWSProperty):
106 props = {
107 'Bucket': (basestring, False),
108 'Etag': (basestring, False),
109 'Key': (basestring, False),
110 'Version': (basestring, False),
111 }
112
113
114 class Cors(AWSProperty):
115 props = {
116 'AllowCredentials': (boolean, False),
117 'AllowHeaders': ([basestring], False),
118 'AllowMethods': ([basestring], False),
119 'AllowOrigins': ([basestring], False),
120 'ExposeHeaders': ([basestring], False),
121 'MaxAge': (integer, False),
122 }
123
124
125 class Api(AWSObject):
126 resource_type = "AWS::ApiGatewayV2::Api"
127
128 props = {
129 'ApiKeySelectionExpression': (basestring, False),
130 'BasePath': (basestring, False),
131 'Body': (dict, False),
132 'BodyS3Location': (BodyS3Location, False),
133 'CorsConfiguration': (Cors, False),
134 'CredentialsArn': (basestring, False),
135 'Description': (basestring, False),
136 'DisableSchemaValidation': (boolean, False),
137 'FailOnWarnings': (boolean, False),
138 'Name': (basestring, False),
139 'ProtocolType': (basestring, False),
140 'RouteKey': (basestring, False),
141 'RouteSelectionExpression': (basestring, False),
142 'Tags': (dict, False),
143 'Target': (basestring, False),
144 'Version': (basestring, False),
145 }
146
147
148 class ApiMapping(AWSObject):
149 resource_type = "AWS::ApiGatewayV2::ApiMapping"
150
151 props = {
152 'ApiId': (basestring, True),
153 'ApiMappingKey': (basestring, False),
154 'DomainName': (basestring, True),
155 'Stage': (basestring, True),
156 }
157
158
159 class JWTConfiguration(AWSProperty):
160 props = {
161 'Audience': ([basestring], False),
162 'Issuer': (basestring, False),
163 }
164
165
166 class Authorizer(AWSObject):
167 resource_type = "AWS::ApiGatewayV2::Authorizer"
168
169 props = {
170 'ApiId': (basestring, True),
171 'AuthorizerCredentialsArn': (basestring, False),
172 'AuthorizerResultTtlInSeconds': (validate_authorizer_ttl, False),
173 'AuthorizerType': (validate_authorizer_type, True),
174 'AuthorizerUri': (basestring, False),
175 'IdentitySource': ([basestring], True),
176 'IdentityValidationExpression': (basestring, False),
177 'JwtConfiguration': (JWTConfiguration, False),
178 'Name': (basestring, True),
179 }
180
181
182 class Deployment(AWSObject):
183 resource_type = "AWS::ApiGatewayV2::Deployment"
184
185 props = {
186 'ApiId': (basestring, True),
187 'Description': (basestring, False),
188 'StageName': (basestring, False),
189 }
190
191
192 class DomainNameConfiguration(AWSProperty):
193 props = {
194 'CertificateArn': (basestring, False),
195 'CertificateName': (basestring, False),
196 'EndpointType': (basestring, False),
197 }
198
199
200 class DomainName(AWSObject):
201 resource_type = "AWS::ApiGatewayV2::DomainName"
202
203 props = {
204 'DomainName': (basestring, True),
205 'DomainNameConfigurations': ([DomainNameConfiguration], False),
206 'Tags': (dict, False),
207 }
208
209
210 class TlsConfig(AWSProperty):
211 props = {
212 'ServerNameToVerify': (basestring, False),
213 }
214
215
216 class Integration(AWSObject):
217 resource_type = "AWS::ApiGatewayV2::Integration"
218
219 props = {
220 'ApiId': (basestring, True),
221 'ConnectionType': (basestring, False),
222 'ConnectionId': (basestring, False),
223 'ContentHandlingStrategy': (validate_content_handling_strategy, False),
224 'CredentialsArn': (basestring, False),
225 'Description': (basestring, False),
226 'IntegrationMethod': (basestring, False),
227 'IntegrationType': (validate_integration_type, True),
228 'IntegrationUri': (basestring, False),
229 'PassthroughBehavior': (validate_passthrough_behavior, False),
230 'PayloadFormatVersion': (basestring, False),
231 'RequestParameters': (dict, False),
232 'RequestTemplates': (dict, False),
233 'TemplateSelectionExpression': (basestring, False),
234 'TimeoutInMillis': (integer_range(50, 29000), False),
235 'TlsConfig': (TlsConfig, False),
236 }
237
238
239 class IntegrationResponse(AWSObject):
240 resource_type = "AWS::ApiGatewayV2::IntegrationResponse"
241
242 props = {
243 'ApiId': (basestring, True),
244 'ContentHandlingStrategy': (validate_content_handling_strategy, False),
245 'IntegrationId': (basestring, True),
246 'IntegrationResponseKey': (basestring, True),
247 'ResponseParameters': (dict, False),
248 'ResponseTemplates': (dict, False),
249 'TemplateSelectionExpression': (basestring, False),
250 }
251
252
253 class Model(AWSObject):
254 resource_type = "AWS::ApiGatewayV2::Model"
255
256 props = {
257 'ApiId': (basestring, True),
258 'ContentType': (basestring, False),
259 'Description': (basestring, False),
260 'Name': (basestring, True),
261 'Schema': ((basestring, dict), True),
262 }
263
264 def validate(self):
265 name = 'Schema'
266 if name in self.properties:
267 schema = self.properties.get(name)
268 self.properties[name] = json_checker(schema)
269
270
271 class Route(AWSObject):
272 resource_type = "AWS::ApiGatewayV2::Route"
273
274 props = {
275 'ApiId': (basestring, True),
276 'ApiKeyRequired': (boolean, False),
277 'AuthorizationScopes': ([basestring], False),
278 'AuthorizationType': (basestring, False),
279 'AuthorizerId': (basestring, False),
280 'ModelSelectionExpression': (basestring, False),
281 'OperationName': (basestring, False),
282 'RequestModels': (dict, False),
283 'RequestParameters': (dict, False),
284 'RouteKey': (basestring, True),
285 'RouteResponseSelectionExpression': (basestring, False),
286 'Target': (basestring, False),
287 }
288
289
290 class RouteResponse(AWSObject):
291 resource_type = "AWS::ApiGatewayV2::RouteResponse"
292
293 props = {
294 'ApiId': (basestring, True),
295 'ModelSelectionExpression': (basestring, False),
296 'ResponseModels': (dict, False),
297 'ResponseParameters': (dict, False),
298 'RouteId': (basestring, True),
299 'RouteResponseKey': (basestring, True),
300 }
301
302
303 class AccessLogSettings(AWSProperty):
304 props = {
305 'DestinationArn': (basestring, False),
306 'Format': (basestring, False),
307 }
308
309
310 class RouteSettings(AWSProperty):
311 props = {
312 'DataTraceEnabled': (boolean, False),
313 'DetailedMetricsEnabled': (boolean, False),
314 'LoggingLevel': (validate_logging_level, False),
315 'ThrottlingBurstLimit': (integer, False),
316 'ThrottlingRateLimit': (double, False),
317 }
318
319
320 class Stage(AWSObject):
321 resource_type = "AWS::ApiGatewayV2::Stage"
322
323 props = {
324 'AccessLogSettings': (AccessLogSettings, False),
325 'ApiId': (basestring, True),
326 'AutoDeploy': (boolean, False),
327 'ClientCertificateId': (basestring, False),
328 'DefaultRouteSettings': (RouteSettings, False),
329 'DeploymentId': (basestring, False),
330 'Description': (basestring, False),
331 'RouteSettings': (dict, False),
332 'StageName': (basestring, True),
333 'StageVariables': (dict, False),
334 'Tags': (dict, False),
335 }
336
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/apigatewayv2.py b/troposphere/apigatewayv2.py
--- a/troposphere/apigatewayv2.py
+++ b/troposphere/apigatewayv2.py
@@ -333,3 +333,14 @@
'StageVariables': (dict, False),
'Tags': (dict, False),
}
+
+
+class VpcLink(AWSObject):
+ resource_type = "AWS::ApiGatewayV2::VpcLink"
+
+ props = {
+ 'Name': (basestring, True),
+ 'SecurityGroupIds': ([basestring], False),
+ 'SubnetIds': ([basestring], True),
+ 'Tags': (dict, False),
+ }
| {"golden_diff": "diff --git a/troposphere/apigatewayv2.py b/troposphere/apigatewayv2.py\n--- a/troposphere/apigatewayv2.py\n+++ b/troposphere/apigatewayv2.py\n@@ -333,3 +333,14 @@\n 'StageVariables': (dict, False),\n 'Tags': (dict, False),\n }\n+\n+\n+class VpcLink(AWSObject):\n+ resource_type = \"AWS::ApiGatewayV2::VpcLink\"\n+\n+ props = {\n+ 'Name': (basestring, True),\n+ 'SecurityGroupIds': ([basestring], False),\n+ 'SubnetIds': ([basestring], True),\n+ 'Tags': (dict, False),\n+ }\n", "issue": "ApiGatewayV2 is missing VpcLink\nWe appear to be unable to create an HTTP Vpc Link, as the ApiGatewayV2 Vpc Link is not yet implemented.\r\n\r\nFrom the AWS documentation:\r\n\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-vpclink.html\r\n\r\nFrom the Troposphere codebase, VpcLink is missing.\r\n\r\nhttps://github.com/cloudtools/troposphere/blob/master/troposphere/apigatewayv2.py\r\n\r\nPlease note that this is not the same thing as the ApiGateway VpcLink, as that does not support Http Vpc Links.\n", "before_files": [{"content": "# Copyright (c) 2012-2019, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n#\n# *** Do not modify - this file is autogenerated ***\n# Resource specification version: 10.0.0\n\n\nfrom . import AWSObject\nfrom . import AWSProperty\nfrom .validators import boolean\nfrom .validators import integer\nfrom .validators import integer_range\nfrom .validators import positive_integer\nfrom .validators import double\nfrom .validators import json_checker\n\n\ndef validate_integration_type(integration_type):\n\n valid_integration_types = [\n \"AWS\",\n \"AWS_PROXY\",\n \"HTTP\",\n \"HTTP_PROXY\",\n \"MOCK\"\n ]\n if integration_type not in valid_integration_types:\n raise ValueError(\n \"{} is not a valid IntegrationType\".format(\n integration_type)\n )\n return integration_type\n\n\ndef validate_authorizer_type(authorizer_type):\n\n valid_authorizer_types = [\n \"REQUEST\"\n ]\n if authorizer_type not in valid_authorizer_types:\n raise ValueError(\n \"{} is not a valid AuthorizerType\".format(\n authorizer_type)\n )\n return authorizer_type\n\n\ndef validate_logging_level(logging_level):\n\n valid_logging_levels = [\n \"WARN\",\n \"INFO\",\n \"DEBUG\"\n ]\n if logging_level not in valid_logging_levels:\n raise ValueError(\n \"{} is not a valid LoggingLevel\".format(\n logging_level)\n )\n return logging_level\n\n\ndef validate_passthrough_behavior(passthrough_behavior):\n\n valid_passthrough_behaviors = [\n \"WHEN_NO_MATCH\",\n \"WHEN_NO_TEMPLATES\",\n \"NEVER\"\n ]\n if passthrough_behavior not in valid_passthrough_behaviors:\n raise ValueError(\n \"{} is not a valid PassthroughBehavior\".format(\n passthrough_behavior)\n )\n return passthrough_behavior\n\n\ndef validate_content_handling_strategy(content_handling_strategy):\n\n valid_handling_strategy_values = [\n \"CONVERT_TO_TEXT\",\n \"CONVERT_TO_BINARY\"\n ]\n if content_handling_strategy not in valid_handling_strategy_values:\n raise ValueError(\n \"{} is not a valid ContentHandlingStrategy\".format(\n content_handling_strategy)\n )\n return content_handling_strategy\n\n\ndef validate_authorizer_ttl(ttl_value):\n \"\"\" Validate authorizer ttl timeout\n :param ttl_value: The TTL timeout in seconds\n :return: The provided TTL value if valid\n \"\"\"\n ttl_value = int(positive_integer(ttl_value))\n if ttl_value > 3600:\n raise ValueError(\"The AuthorizerResultTtlInSeconds should be <= 3600\")\n return ttl_value\n\n\nclass BodyS3Location(AWSProperty):\n props = {\n 'Bucket': (basestring, False),\n 'Etag': (basestring, False),\n 'Key': (basestring, False),\n 'Version': (basestring, False),\n }\n\n\nclass Cors(AWSProperty):\n props = {\n 'AllowCredentials': (boolean, False),\n 'AllowHeaders': ([basestring], False),\n 'AllowMethods': ([basestring], False),\n 'AllowOrigins': ([basestring], False),\n 'ExposeHeaders': ([basestring], False),\n 'MaxAge': (integer, False),\n }\n\n\nclass Api(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Api\"\n\n props = {\n 'ApiKeySelectionExpression': (basestring, False),\n 'BasePath': (basestring, False),\n 'Body': (dict, False),\n 'BodyS3Location': (BodyS3Location, False),\n 'CorsConfiguration': (Cors, False),\n 'CredentialsArn': (basestring, False),\n 'Description': (basestring, False),\n 'DisableSchemaValidation': (boolean, False),\n 'FailOnWarnings': (boolean, False),\n 'Name': (basestring, False),\n 'ProtocolType': (basestring, False),\n 'RouteKey': (basestring, False),\n 'RouteSelectionExpression': (basestring, False),\n 'Tags': (dict, False),\n 'Target': (basestring, False),\n 'Version': (basestring, False),\n }\n\n\nclass ApiMapping(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::ApiMapping\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ApiMappingKey': (basestring, False),\n 'DomainName': (basestring, True),\n 'Stage': (basestring, True),\n }\n\n\nclass JWTConfiguration(AWSProperty):\n props = {\n 'Audience': ([basestring], False),\n 'Issuer': (basestring, False),\n }\n\n\nclass Authorizer(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Authorizer\"\n\n props = {\n 'ApiId': (basestring, True),\n 'AuthorizerCredentialsArn': (basestring, False),\n 'AuthorizerResultTtlInSeconds': (validate_authorizer_ttl, False),\n 'AuthorizerType': (validate_authorizer_type, True),\n 'AuthorizerUri': (basestring, False),\n 'IdentitySource': ([basestring], True),\n 'IdentityValidationExpression': (basestring, False),\n 'JwtConfiguration': (JWTConfiguration, False),\n 'Name': (basestring, True),\n }\n\n\nclass Deployment(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Deployment\"\n\n props = {\n 'ApiId': (basestring, True),\n 'Description': (basestring, False),\n 'StageName': (basestring, False),\n }\n\n\nclass DomainNameConfiguration(AWSProperty):\n props = {\n 'CertificateArn': (basestring, False),\n 'CertificateName': (basestring, False),\n 'EndpointType': (basestring, False),\n }\n\n\nclass DomainName(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::DomainName\"\n\n props = {\n 'DomainName': (basestring, True),\n 'DomainNameConfigurations': ([DomainNameConfiguration], False),\n 'Tags': (dict, False),\n }\n\n\nclass TlsConfig(AWSProperty):\n props = {\n 'ServerNameToVerify': (basestring, False),\n }\n\n\nclass Integration(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Integration\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ConnectionType': (basestring, False),\n 'ConnectionId': (basestring, False),\n 'ContentHandlingStrategy': (validate_content_handling_strategy, False),\n 'CredentialsArn': (basestring, False),\n 'Description': (basestring, False),\n 'IntegrationMethod': (basestring, False),\n 'IntegrationType': (validate_integration_type, True),\n 'IntegrationUri': (basestring, False),\n 'PassthroughBehavior': (validate_passthrough_behavior, False),\n 'PayloadFormatVersion': (basestring, False),\n 'RequestParameters': (dict, False),\n 'RequestTemplates': (dict, False),\n 'TemplateSelectionExpression': (basestring, False),\n 'TimeoutInMillis': (integer_range(50, 29000), False),\n 'TlsConfig': (TlsConfig, False),\n }\n\n\nclass IntegrationResponse(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::IntegrationResponse\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ContentHandlingStrategy': (validate_content_handling_strategy, False),\n 'IntegrationId': (basestring, True),\n 'IntegrationResponseKey': (basestring, True),\n 'ResponseParameters': (dict, False),\n 'ResponseTemplates': (dict, False),\n 'TemplateSelectionExpression': (basestring, False),\n }\n\n\nclass Model(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Model\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ContentType': (basestring, False),\n 'Description': (basestring, False),\n 'Name': (basestring, True),\n 'Schema': ((basestring, dict), True),\n }\n\n def validate(self):\n name = 'Schema'\n if name in self.properties:\n schema = self.properties.get(name)\n self.properties[name] = json_checker(schema)\n\n\nclass Route(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Route\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ApiKeyRequired': (boolean, False),\n 'AuthorizationScopes': ([basestring], False),\n 'AuthorizationType': (basestring, False),\n 'AuthorizerId': (basestring, False),\n 'ModelSelectionExpression': (basestring, False),\n 'OperationName': (basestring, False),\n 'RequestModels': (dict, False),\n 'RequestParameters': (dict, False),\n 'RouteKey': (basestring, True),\n 'RouteResponseSelectionExpression': (basestring, False),\n 'Target': (basestring, False),\n }\n\n\nclass RouteResponse(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::RouteResponse\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ModelSelectionExpression': (basestring, False),\n 'ResponseModels': (dict, False),\n 'ResponseParameters': (dict, False),\n 'RouteId': (basestring, True),\n 'RouteResponseKey': (basestring, True),\n }\n\n\nclass AccessLogSettings(AWSProperty):\n props = {\n 'DestinationArn': (basestring, False),\n 'Format': (basestring, False),\n }\n\n\nclass RouteSettings(AWSProperty):\n props = {\n 'DataTraceEnabled': (boolean, False),\n 'DetailedMetricsEnabled': (boolean, False),\n 'LoggingLevel': (validate_logging_level, False),\n 'ThrottlingBurstLimit': (integer, False),\n 'ThrottlingRateLimit': (double, False),\n }\n\n\nclass Stage(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Stage\"\n\n props = {\n 'AccessLogSettings': (AccessLogSettings, False),\n 'ApiId': (basestring, True),\n 'AutoDeploy': (boolean, False),\n 'ClientCertificateId': (basestring, False),\n 'DefaultRouteSettings': (RouteSettings, False),\n 'DeploymentId': (basestring, False),\n 'Description': (basestring, False),\n 'RouteSettings': (dict, False),\n 'StageName': (basestring, True),\n 'StageVariables': (dict, False),\n 'Tags': (dict, False),\n }\n", "path": "troposphere/apigatewayv2.py"}], "after_files": [{"content": "# Copyright (c) 2012-2019, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n#\n# *** Do not modify - this file is autogenerated ***\n# Resource specification version: 10.0.0\n\n\nfrom . import AWSObject\nfrom . import AWSProperty\nfrom .validators import boolean\nfrom .validators import integer\nfrom .validators import integer_range\nfrom .validators import positive_integer\nfrom .validators import double\nfrom .validators import json_checker\n\n\ndef validate_integration_type(integration_type):\n\n valid_integration_types = [\n \"AWS\",\n \"AWS_PROXY\",\n \"HTTP\",\n \"HTTP_PROXY\",\n \"MOCK\"\n ]\n if integration_type not in valid_integration_types:\n raise ValueError(\n \"{} is not a valid IntegrationType\".format(\n integration_type)\n )\n return integration_type\n\n\ndef validate_authorizer_type(authorizer_type):\n\n valid_authorizer_types = [\n \"REQUEST\"\n ]\n if authorizer_type not in valid_authorizer_types:\n raise ValueError(\n \"{} is not a valid AuthorizerType\".format(\n authorizer_type)\n )\n return authorizer_type\n\n\ndef validate_logging_level(logging_level):\n\n valid_logging_levels = [\n \"WARN\",\n \"INFO\",\n \"DEBUG\"\n ]\n if logging_level not in valid_logging_levels:\n raise ValueError(\n \"{} is not a valid LoggingLevel\".format(\n logging_level)\n )\n return logging_level\n\n\ndef validate_passthrough_behavior(passthrough_behavior):\n\n valid_passthrough_behaviors = [\n \"WHEN_NO_MATCH\",\n \"WHEN_NO_TEMPLATES\",\n \"NEVER\"\n ]\n if passthrough_behavior not in valid_passthrough_behaviors:\n raise ValueError(\n \"{} is not a valid PassthroughBehavior\".format(\n passthrough_behavior)\n )\n return passthrough_behavior\n\n\ndef validate_content_handling_strategy(content_handling_strategy):\n\n valid_handling_strategy_values = [\n \"CONVERT_TO_TEXT\",\n \"CONVERT_TO_BINARY\"\n ]\n if content_handling_strategy not in valid_handling_strategy_values:\n raise ValueError(\n \"{} is not a valid ContentHandlingStrategy\".format(\n content_handling_strategy)\n )\n return content_handling_strategy\n\n\ndef validate_authorizer_ttl(ttl_value):\n \"\"\" Validate authorizer ttl timeout\n :param ttl_value: The TTL timeout in seconds\n :return: The provided TTL value if valid\n \"\"\"\n ttl_value = int(positive_integer(ttl_value))\n if ttl_value > 3600:\n raise ValueError(\"The AuthorizerResultTtlInSeconds should be <= 3600\")\n return ttl_value\n\n\nclass BodyS3Location(AWSProperty):\n props = {\n 'Bucket': (basestring, False),\n 'Etag': (basestring, False),\n 'Key': (basestring, False),\n 'Version': (basestring, False),\n }\n\n\nclass Cors(AWSProperty):\n props = {\n 'AllowCredentials': (boolean, False),\n 'AllowHeaders': ([basestring], False),\n 'AllowMethods': ([basestring], False),\n 'AllowOrigins': ([basestring], False),\n 'ExposeHeaders': ([basestring], False),\n 'MaxAge': (integer, False),\n }\n\n\nclass Api(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Api\"\n\n props = {\n 'ApiKeySelectionExpression': (basestring, False),\n 'BasePath': (basestring, False),\n 'Body': (dict, False),\n 'BodyS3Location': (BodyS3Location, False),\n 'CorsConfiguration': (Cors, False),\n 'CredentialsArn': (basestring, False),\n 'Description': (basestring, False),\n 'DisableSchemaValidation': (boolean, False),\n 'FailOnWarnings': (boolean, False),\n 'Name': (basestring, False),\n 'ProtocolType': (basestring, False),\n 'RouteKey': (basestring, False),\n 'RouteSelectionExpression': (basestring, False),\n 'Tags': (dict, False),\n 'Target': (basestring, False),\n 'Version': (basestring, False),\n }\n\n\nclass ApiMapping(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::ApiMapping\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ApiMappingKey': (basestring, False),\n 'DomainName': (basestring, True),\n 'Stage': (basestring, True),\n }\n\n\nclass JWTConfiguration(AWSProperty):\n props = {\n 'Audience': ([basestring], False),\n 'Issuer': (basestring, False),\n }\n\n\nclass Authorizer(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Authorizer\"\n\n props = {\n 'ApiId': (basestring, True),\n 'AuthorizerCredentialsArn': (basestring, False),\n 'AuthorizerResultTtlInSeconds': (validate_authorizer_ttl, False),\n 'AuthorizerType': (validate_authorizer_type, True),\n 'AuthorizerUri': (basestring, False),\n 'IdentitySource': ([basestring], True),\n 'IdentityValidationExpression': (basestring, False),\n 'JwtConfiguration': (JWTConfiguration, False),\n 'Name': (basestring, True),\n }\n\n\nclass Deployment(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Deployment\"\n\n props = {\n 'ApiId': (basestring, True),\n 'Description': (basestring, False),\n 'StageName': (basestring, False),\n }\n\n\nclass DomainNameConfiguration(AWSProperty):\n props = {\n 'CertificateArn': (basestring, False),\n 'CertificateName': (basestring, False),\n 'EndpointType': (basestring, False),\n }\n\n\nclass DomainName(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::DomainName\"\n\n props = {\n 'DomainName': (basestring, True),\n 'DomainNameConfigurations': ([DomainNameConfiguration], False),\n 'Tags': (dict, False),\n }\n\n\nclass TlsConfig(AWSProperty):\n props = {\n 'ServerNameToVerify': (basestring, False),\n }\n\n\nclass Integration(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Integration\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ConnectionType': (basestring, False),\n 'ConnectionId': (basestring, False),\n 'ContentHandlingStrategy': (validate_content_handling_strategy, False),\n 'CredentialsArn': (basestring, False),\n 'Description': (basestring, False),\n 'IntegrationMethod': (basestring, False),\n 'IntegrationType': (validate_integration_type, True),\n 'IntegrationUri': (basestring, False),\n 'PassthroughBehavior': (validate_passthrough_behavior, False),\n 'PayloadFormatVersion': (basestring, False),\n 'RequestParameters': (dict, False),\n 'RequestTemplates': (dict, False),\n 'TemplateSelectionExpression': (basestring, False),\n 'TimeoutInMillis': (integer_range(50, 29000), False),\n 'TlsConfig': (TlsConfig, False),\n }\n\n\nclass IntegrationResponse(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::IntegrationResponse\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ContentHandlingStrategy': (validate_content_handling_strategy, False),\n 'IntegrationId': (basestring, True),\n 'IntegrationResponseKey': (basestring, True),\n 'ResponseParameters': (dict, False),\n 'ResponseTemplates': (dict, False),\n 'TemplateSelectionExpression': (basestring, False),\n }\n\n\nclass Model(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Model\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ContentType': (basestring, False),\n 'Description': (basestring, False),\n 'Name': (basestring, True),\n 'Schema': ((basestring, dict), True),\n }\n\n def validate(self):\n name = 'Schema'\n if name in self.properties:\n schema = self.properties.get(name)\n self.properties[name] = json_checker(schema)\n\n\nclass Route(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Route\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ApiKeyRequired': (boolean, False),\n 'AuthorizationScopes': ([basestring], False),\n 'AuthorizationType': (basestring, False),\n 'AuthorizerId': (basestring, False),\n 'ModelSelectionExpression': (basestring, False),\n 'OperationName': (basestring, False),\n 'RequestModels': (dict, False),\n 'RequestParameters': (dict, False),\n 'RouteKey': (basestring, True),\n 'RouteResponseSelectionExpression': (basestring, False),\n 'Target': (basestring, False),\n }\n\n\nclass RouteResponse(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::RouteResponse\"\n\n props = {\n 'ApiId': (basestring, True),\n 'ModelSelectionExpression': (basestring, False),\n 'ResponseModels': (dict, False),\n 'ResponseParameters': (dict, False),\n 'RouteId': (basestring, True),\n 'RouteResponseKey': (basestring, True),\n }\n\n\nclass AccessLogSettings(AWSProperty):\n props = {\n 'DestinationArn': (basestring, False),\n 'Format': (basestring, False),\n }\n\n\nclass RouteSettings(AWSProperty):\n props = {\n 'DataTraceEnabled': (boolean, False),\n 'DetailedMetricsEnabled': (boolean, False),\n 'LoggingLevel': (validate_logging_level, False),\n 'ThrottlingBurstLimit': (integer, False),\n 'ThrottlingRateLimit': (double, False),\n }\n\n\nclass Stage(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::Stage\"\n\n props = {\n 'AccessLogSettings': (AccessLogSettings, False),\n 'ApiId': (basestring, True),\n 'AutoDeploy': (boolean, False),\n 'ClientCertificateId': (basestring, False),\n 'DefaultRouteSettings': (RouteSettings, False),\n 'DeploymentId': (basestring, False),\n 'Description': (basestring, False),\n 'RouteSettings': (dict, False),\n 'StageName': (basestring, True),\n 'StageVariables': (dict, False),\n 'Tags': (dict, False),\n }\n\n\nclass VpcLink(AWSObject):\n resource_type = \"AWS::ApiGatewayV2::VpcLink\"\n\n props = {\n 'Name': (basestring, True),\n 'SecurityGroupIds': ([basestring], False),\n 'SubnetIds': ([basestring], True),\n 'Tags': (dict, False),\n }\n", "path": "troposphere/apigatewayv2.py"}]} | 3,641 | 168 |
gh_patches_debug_2081 | rasdani/github-patches | git_diff | PaddlePaddle__models-2261 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deeplabv3+反复报warning
在paddle1.4.1下,由于deeplabv3+打开了显存优化,导致反复报如下的warning:
<img width="956" alt="db83046567521a831348d8eea6f2e46a" src="https://user-images.githubusercontent.com/46314656/57190981-398cac80-6f53-11e9-9ffc-3a3c7b379d82.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PaddleCV/deeplabv3+/train.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4 import os
5 if 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:
6 os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'
7
8 import paddle
9 import paddle.fluid as fluid
10 import numpy as np
11 import argparse
12 from reader import CityscapeDataset
13 import reader
14 import models
15 import time
16 import contextlib
17 import paddle.fluid.profiler as profiler
18 import utility
19
20 parser = argparse.ArgumentParser()
21 add_arg = lambda *args: utility.add_arguments(*args, argparser=parser)
22
23 # yapf: disable
24 add_arg('batch_size', int, 4, "The number of images in each batch during training.")
25 add_arg('train_crop_size', int, 769, "Image crop size during training.")
26 add_arg('base_lr', float, 0.001, "The base learning rate for model training.")
27 add_arg('total_step', int, 500000, "Number of the training step.")
28 add_arg('init_weights_path', str, None, "Path of the initial weights in paddlepaddle format.")
29 add_arg('save_weights_path', str, None, "Path of the saved weights during training.")
30 add_arg('dataset_path', str, None, "Cityscape dataset path.")
31 add_arg('parallel', bool, True, "using ParallelExecutor.")
32 add_arg('use_gpu', bool, True, "Whether use GPU or CPU.")
33 add_arg('num_classes', int, 19, "Number of classes.")
34 add_arg('load_logit_layer', bool, True, "Load last logit fc layer or not. If you are training with different number of classes, you should set to False.")
35 add_arg('memory_optimize', bool, True, "Using memory optimizer.")
36 add_arg('norm_type', str, 'bn', "Normalization type, should be 'bn' or 'gn'.")
37 add_arg('profile', bool, False, "Enable profiler.")
38 add_arg('use_py_reader', bool, True, "Use py reader.")
39 parser.add_argument(
40 '--enable_ce',
41 action='store_true',
42 help='If set, run the task with continuous evaluation logs. Users can ignore this agument.')
43 #yapf: enable
44
45 @contextlib.contextmanager
46 def profile_context(profile=True):
47 if profile:
48 with profiler.profiler('All', 'total', '/tmp/profile_file2'):
49 yield
50 else:
51 yield
52
53 def load_model():
54 if os.path.isdir(args.init_weights_path):
55 load_vars = [
56 x for x in tp.list_vars()
57 if isinstance(x, fluid.framework.Parameter) and x.name.find('logit') ==
58 -1
59 ]
60 if args.load_logit_layer:
61 fluid.io.load_params(
62 exe, dirname=args.init_weights_path, main_program=tp)
63 else:
64 fluid.io.load_vars(exe, dirname=args.init_weights_path, vars=load_vars)
65 else:
66 fluid.io.load_params(
67 exe,
68 dirname="",
69 filename=args.init_weights_path,
70 main_program=tp)
71
72
73
74 def save_model():
75 assert not os.path.isfile(args.save_weights_path)
76 fluid.io.save_params(
77 exe, dirname=args.save_weights_path, main_program=tp)
78
79
80 def loss(logit, label):
81 label_nignore = fluid.layers.less_than(
82 label.astype('float32'),
83 fluid.layers.assign(np.array([num_classes], 'float32')),
84 force_cpu=False).astype('float32')
85 logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
86 logit = fluid.layers.reshape(logit, [-1, num_classes])
87 label = fluid.layers.reshape(label, [-1, 1])
88 label = fluid.layers.cast(label, 'int64')
89 label_nignore = fluid.layers.reshape(label_nignore, [-1, 1])
90 logit = fluid.layers.softmax(logit, use_cudnn=False)
91 loss = fluid.layers.cross_entropy(logit, label, ignore_index=255)
92 label_nignore.stop_gradient = True
93 label.stop_gradient = True
94 return loss, label_nignore
95
96
97 args = parser.parse_args()
98 utility.print_arguments(args)
99
100 models.clean()
101 models.bn_momentum = 0.9997
102 models.dropout_keep_prop = 0.9
103 models.label_number = args.num_classes
104 models.default_norm_type = args.norm_type
105 deeplabv3p = models.deeplabv3p
106
107 sp = fluid.Program()
108 tp = fluid.Program()
109
110 # only for ce
111 if args.enable_ce:
112 SEED = 102
113 sp.random_seed = SEED
114 tp.random_seed = SEED
115
116 crop_size = args.train_crop_size
117 batch_size = args.batch_size
118 image_shape = [crop_size, crop_size]
119 reader.default_config['crop_size'] = crop_size
120 reader.default_config['shuffle'] = True
121 num_classes = args.num_classes
122 weight_decay = 0.00004
123
124 base_lr = args.base_lr
125 total_step = args.total_step
126
127 with fluid.program_guard(tp, sp):
128 if args.use_py_reader:
129 batch_size_each = batch_size // fluid.core.get_cuda_device_count()
130 py_reader = fluid.layers.py_reader(capacity=64,
131 shapes=[[batch_size_each, 3] + image_shape, [batch_size_each] + image_shape],
132 dtypes=['float32', 'int32'])
133 img, label = fluid.layers.read_file(py_reader)
134 else:
135 img = fluid.layers.data(
136 name='img', shape=[3] + image_shape, dtype='float32')
137 label = fluid.layers.data(name='label', shape=image_shape, dtype='int32')
138 logit = deeplabv3p(img)
139 pred = fluid.layers.argmax(logit, axis=1).astype('int32')
140 loss, mask = loss(logit, label)
141 lr = fluid.layers.polynomial_decay(
142 base_lr, total_step, end_learning_rate=0, power=0.9)
143 area = fluid.layers.elementwise_max(
144 fluid.layers.reduce_mean(mask),
145 fluid.layers.assign(np.array(
146 [0.1], dtype=np.float32)))
147 loss_mean = fluid.layers.reduce_mean(loss) / area
148
149 opt = fluid.optimizer.Momentum(
150 lr,
151 momentum=0.9,
152 regularization=fluid.regularizer.L2DecayRegularizer(
153 regularization_coeff=weight_decay))
154 optimize_ops, params_grads = opt.minimize(loss_mean, startup_program=sp)
155 # ir memory optimizer has some issues, we need to seed grad persistable to
156 # avoid this issue
157 for p,g in params_grads: g.persistable = True
158
159
160 exec_strategy = fluid.ExecutionStrategy()
161 exec_strategy.num_threads = fluid.core.get_cuda_device_count()
162 exec_strategy.num_iteration_per_drop_scope = 100
163 build_strategy = fluid.BuildStrategy()
164 if args.memory_optimize:
165 build_strategy.fuse_relu_depthwise_conv = True
166 build_strategy.enable_inplace = True
167 build_strategy.memory_optimize = True
168
169 place = fluid.CPUPlace()
170 if args.use_gpu:
171 place = fluid.CUDAPlace(0)
172 exe = fluid.Executor(place)
173 exe.run(sp)
174
175 if args.init_weights_path:
176 print("load from:", args.init_weights_path)
177 load_model()
178
179 dataset = reader.CityscapeDataset(args.dataset_path, 'train')
180
181 if args.parallel:
182 binary = fluid.compiler.CompiledProgram(tp).with_data_parallel(
183 loss_name=loss_mean.name,
184 build_strategy=build_strategy,
185 exec_strategy=exec_strategy)
186 else:
187 binary = fluid.compiler.CompiledProgram(tp)
188
189 if args.use_py_reader:
190 assert(batch_size % fluid.core.get_cuda_device_count() == 0)
191 def data_gen():
192 batches = dataset.get_batch_generator(
193 batch_size // fluid.core.get_cuda_device_count(),
194 total_step * fluid.core.get_cuda_device_count())
195 for b in batches:
196 yield b[1], b[2]
197 py_reader.decorate_tensor_provider(data_gen)
198 py_reader.start()
199 else:
200 batches = dataset.get_batch_generator(batch_size, total_step)
201 total_time = 0.0
202 epoch_idx = 0
203 train_loss = 0
204
205 with profile_context(args.profile):
206 for i in range(total_step):
207 epoch_idx += 1
208 begin_time = time.time()
209 prev_start_time = time.time()
210 if not args.use_py_reader:
211 _, imgs, labels, names = next(batches)
212 train_loss, = exe.run(binary,
213 feed={'img': imgs,
214 'label': labels}, fetch_list=[loss_mean])
215 else:
216 train_loss, = exe.run(binary, fetch_list=[loss_mean])
217 train_loss = np.mean(train_loss)
218 end_time = time.time()
219 total_time += end_time - begin_time
220 if i % 100 == 0:
221 print("Model is saved to", args.save_weights_path)
222 save_model()
223 print("step {:d}, loss: {:.6f}, step_time_cost: {:.3f}".format(
224 i, train_loss, end_time - prev_start_time))
225
226 print("Training done. Model is saved to", args.save_weights_path)
227 save_model()
228
229 if args.enable_ce:
230 gpu_num = fluid.core.get_cuda_device_count()
231 print("kpis\teach_pass_duration_card%s\t%s" %
232 (gpu_num, total_time / epoch_idx))
233 print("kpis\ttrain_loss_card%s\t%s" % (gpu_num, train_loss))
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PaddleCV/deeplabv3+/train.py b/PaddleCV/deeplabv3+/train.py
--- a/PaddleCV/deeplabv3+/train.py
+++ b/PaddleCV/deeplabv3+/train.py
@@ -145,6 +145,7 @@
fluid.layers.assign(np.array(
[0.1], dtype=np.float32)))
loss_mean = fluid.layers.reduce_mean(loss) / area
+ loss_mean.persistable = True
opt = fluid.optimizer.Momentum(
lr,
| {"golden_diff": "diff --git a/PaddleCV/deeplabv3+/train.py b/PaddleCV/deeplabv3+/train.py\n--- a/PaddleCV/deeplabv3+/train.py\n+++ b/PaddleCV/deeplabv3+/train.py\n@@ -145,6 +145,7 @@\n fluid.layers.assign(np.array(\n [0.1], dtype=np.float32)))\n loss_mean = fluid.layers.reduce_mean(loss) / area\n+ loss_mean.persistable = True\n \n opt = fluid.optimizer.Momentum(\n lr,\n", "issue": "deeplabv3+\u53cd\u590d\u62a5warning\n\u5728paddle1.4.1\u4e0b\uff0c\u7531\u4e8edeeplabv3+\u6253\u5f00\u4e86\u663e\u5b58\u4f18\u5316\uff0c\u5bfc\u81f4\u53cd\u590d\u62a5\u5982\u4e0b\u7684warning\uff1a\r\n<img width=\"956\" alt=\"db83046567521a831348d8eea6f2e46a\" src=\"https://user-images.githubusercontent.com/46314656/57190981-398cac80-6f53-11e9-9ffc-3a3c7b379d82.png\">\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nif 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:\n os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\nimport argparse\nfrom reader import CityscapeDataset\nimport reader\nimport models\nimport time\nimport contextlib\nimport paddle.fluid.profiler as profiler\nimport utility\n\nparser = argparse.ArgumentParser()\nadd_arg = lambda *args: utility.add_arguments(*args, argparser=parser)\n\n# yapf: disable\nadd_arg('batch_size', int, 4, \"The number of images in each batch during training.\")\nadd_arg('train_crop_size', int, 769, \"Image crop size during training.\")\nadd_arg('base_lr', float, 0.001, \"The base learning rate for model training.\")\nadd_arg('total_step', int, 500000, \"Number of the training step.\")\nadd_arg('init_weights_path', str, None, \"Path of the initial weights in paddlepaddle format.\")\nadd_arg('save_weights_path', str, None, \"Path of the saved weights during training.\")\nadd_arg('dataset_path', str, None, \"Cityscape dataset path.\")\nadd_arg('parallel', bool, True, \"using ParallelExecutor.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU or CPU.\")\nadd_arg('num_classes', int, 19, \"Number of classes.\")\nadd_arg('load_logit_layer', bool, True, \"Load last logit fc layer or not. If you are training with different number of classes, you should set to False.\")\nadd_arg('memory_optimize', bool, True, \"Using memory optimizer.\")\nadd_arg('norm_type', str, 'bn', \"Normalization type, should be 'bn' or 'gn'.\")\nadd_arg('profile', bool, False, \"Enable profiler.\")\nadd_arg('use_py_reader', bool, True, \"Use py reader.\")\nparser.add_argument(\n '--enable_ce',\n action='store_true',\n help='If set, run the task with continuous evaluation logs. Users can ignore this agument.')\n#yapf: enable\n\[email protected]\ndef profile_context(profile=True):\n if profile:\n with profiler.profiler('All', 'total', '/tmp/profile_file2'):\n yield\n else:\n yield\n\ndef load_model():\n if os.path.isdir(args.init_weights_path):\n load_vars = [\n x for x in tp.list_vars()\n if isinstance(x, fluid.framework.Parameter) and x.name.find('logit') ==\n -1\n ]\n if args.load_logit_layer:\n fluid.io.load_params(\n exe, dirname=args.init_weights_path, main_program=tp)\n else:\n fluid.io.load_vars(exe, dirname=args.init_weights_path, vars=load_vars)\n else:\n fluid.io.load_params(\n exe,\n dirname=\"\",\n filename=args.init_weights_path,\n main_program=tp)\n\n\n\ndef save_model():\n assert not os.path.isfile(args.save_weights_path)\n fluid.io.save_params(\n exe, dirname=args.save_weights_path, main_program=tp)\n\n\ndef loss(logit, label):\n label_nignore = fluid.layers.less_than(\n label.astype('float32'),\n fluid.layers.assign(np.array([num_classes], 'float32')),\n force_cpu=False).astype('float32')\n logit = fluid.layers.transpose(logit, [0, 2, 3, 1])\n logit = fluid.layers.reshape(logit, [-1, num_classes])\n label = fluid.layers.reshape(label, [-1, 1])\n label = fluid.layers.cast(label, 'int64')\n label_nignore = fluid.layers.reshape(label_nignore, [-1, 1])\n logit = fluid.layers.softmax(logit, use_cudnn=False)\n loss = fluid.layers.cross_entropy(logit, label, ignore_index=255)\n label_nignore.stop_gradient = True\n label.stop_gradient = True\n return loss, label_nignore\n\n\nargs = parser.parse_args()\nutility.print_arguments(args)\n\nmodels.clean()\nmodels.bn_momentum = 0.9997\nmodels.dropout_keep_prop = 0.9\nmodels.label_number = args.num_classes\nmodels.default_norm_type = args.norm_type\ndeeplabv3p = models.deeplabv3p\n\nsp = fluid.Program()\ntp = fluid.Program()\n\n# only for ce\nif args.enable_ce:\n SEED = 102\n sp.random_seed = SEED\n tp.random_seed = SEED\n\ncrop_size = args.train_crop_size\nbatch_size = args.batch_size\nimage_shape = [crop_size, crop_size]\nreader.default_config['crop_size'] = crop_size\nreader.default_config['shuffle'] = True\nnum_classes = args.num_classes\nweight_decay = 0.00004\n\nbase_lr = args.base_lr\ntotal_step = args.total_step\n\nwith fluid.program_guard(tp, sp):\n if args.use_py_reader:\n batch_size_each = batch_size // fluid.core.get_cuda_device_count()\n py_reader = fluid.layers.py_reader(capacity=64,\n shapes=[[batch_size_each, 3] + image_shape, [batch_size_each] + image_shape],\n dtypes=['float32', 'int32'])\n img, label = fluid.layers.read_file(py_reader)\n else:\n img = fluid.layers.data(\n name='img', shape=[3] + image_shape, dtype='float32')\n label = fluid.layers.data(name='label', shape=image_shape, dtype='int32')\n logit = deeplabv3p(img)\n pred = fluid.layers.argmax(logit, axis=1).astype('int32')\n loss, mask = loss(logit, label)\n lr = fluid.layers.polynomial_decay(\n base_lr, total_step, end_learning_rate=0, power=0.9)\n area = fluid.layers.elementwise_max(\n fluid.layers.reduce_mean(mask),\n fluid.layers.assign(np.array(\n [0.1], dtype=np.float32)))\n loss_mean = fluid.layers.reduce_mean(loss) / area\n\n opt = fluid.optimizer.Momentum(\n lr,\n momentum=0.9,\n regularization=fluid.regularizer.L2DecayRegularizer(\n regularization_coeff=weight_decay))\n optimize_ops, params_grads = opt.minimize(loss_mean, startup_program=sp)\n # ir memory optimizer has some issues, we need to seed grad persistable to\n # avoid this issue\n for p,g in params_grads: g.persistable = True\n\n\nexec_strategy = fluid.ExecutionStrategy()\nexec_strategy.num_threads = fluid.core.get_cuda_device_count()\nexec_strategy.num_iteration_per_drop_scope = 100\nbuild_strategy = fluid.BuildStrategy()\nif args.memory_optimize:\n build_strategy.fuse_relu_depthwise_conv = True\n build_strategy.enable_inplace = True\n build_strategy.memory_optimize = True\n\nplace = fluid.CPUPlace()\nif args.use_gpu:\n place = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\nexe.run(sp)\n\nif args.init_weights_path:\n print(\"load from:\", args.init_weights_path)\n load_model()\n\ndataset = reader.CityscapeDataset(args.dataset_path, 'train')\n\nif args.parallel:\n binary = fluid.compiler.CompiledProgram(tp).with_data_parallel(\n loss_name=loss_mean.name,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy)\nelse:\n binary = fluid.compiler.CompiledProgram(tp)\n\nif args.use_py_reader:\n assert(batch_size % fluid.core.get_cuda_device_count() == 0)\n def data_gen():\n batches = dataset.get_batch_generator(\n batch_size // fluid.core.get_cuda_device_count(),\n total_step * fluid.core.get_cuda_device_count())\n for b in batches:\n yield b[1], b[2]\n py_reader.decorate_tensor_provider(data_gen)\n py_reader.start()\nelse:\n batches = dataset.get_batch_generator(batch_size, total_step)\ntotal_time = 0.0\nepoch_idx = 0\ntrain_loss = 0\n\nwith profile_context(args.profile):\n for i in range(total_step):\n epoch_idx += 1\n begin_time = time.time()\n prev_start_time = time.time()\n if not args.use_py_reader:\n _, imgs, labels, names = next(batches)\n train_loss, = exe.run(binary,\n feed={'img': imgs,\n 'label': labels}, fetch_list=[loss_mean])\n else:\n train_loss, = exe.run(binary, fetch_list=[loss_mean])\n train_loss = np.mean(train_loss)\n end_time = time.time()\n total_time += end_time - begin_time\n if i % 100 == 0:\n print(\"Model is saved to\", args.save_weights_path)\n save_model()\n print(\"step {:d}, loss: {:.6f}, step_time_cost: {:.3f}\".format(\n i, train_loss, end_time - prev_start_time))\n\nprint(\"Training done. Model is saved to\", args.save_weights_path)\nsave_model()\n\nif args.enable_ce:\n gpu_num = fluid.core.get_cuda_device_count()\n print(\"kpis\\teach_pass_duration_card%s\\t%s\" %\n (gpu_num, total_time / epoch_idx))\n print(\"kpis\\ttrain_loss_card%s\\t%s\" % (gpu_num, train_loss))\n", "path": "PaddleCV/deeplabv3+/train.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nif 'FLAGS_fraction_of_gpu_memory_to_use' not in os.environ:\n os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98'\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\nimport argparse\nfrom reader import CityscapeDataset\nimport reader\nimport models\nimport time\nimport contextlib\nimport paddle.fluid.profiler as profiler\nimport utility\n\nparser = argparse.ArgumentParser()\nadd_arg = lambda *args: utility.add_arguments(*args, argparser=parser)\n\n# yapf: disable\nadd_arg('batch_size', int, 4, \"The number of images in each batch during training.\")\nadd_arg('train_crop_size', int, 769, \"Image crop size during training.\")\nadd_arg('base_lr', float, 0.001, \"The base learning rate for model training.\")\nadd_arg('total_step', int, 500000, \"Number of the training step.\")\nadd_arg('init_weights_path', str, None, \"Path of the initial weights in paddlepaddle format.\")\nadd_arg('save_weights_path', str, None, \"Path of the saved weights during training.\")\nadd_arg('dataset_path', str, None, \"Cityscape dataset path.\")\nadd_arg('parallel', bool, True, \"using ParallelExecutor.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU or CPU.\")\nadd_arg('num_classes', int, 19, \"Number of classes.\")\nadd_arg('load_logit_layer', bool, True, \"Load last logit fc layer or not. If you are training with different number of classes, you should set to False.\")\nadd_arg('memory_optimize', bool, True, \"Using memory optimizer.\")\nadd_arg('norm_type', str, 'bn', \"Normalization type, should be 'bn' or 'gn'.\")\nadd_arg('profile', bool, False, \"Enable profiler.\")\nadd_arg('use_py_reader', bool, True, \"Use py reader.\")\nparser.add_argument(\n '--enable_ce',\n action='store_true',\n help='If set, run the task with continuous evaluation logs. Users can ignore this agument.')\n#yapf: enable\n\[email protected]\ndef profile_context(profile=True):\n if profile:\n with profiler.profiler('All', 'total', '/tmp/profile_file2'):\n yield\n else:\n yield\n\ndef load_model():\n if os.path.isdir(args.init_weights_path):\n load_vars = [\n x for x in tp.list_vars()\n if isinstance(x, fluid.framework.Parameter) and x.name.find('logit') ==\n -1\n ]\n if args.load_logit_layer:\n fluid.io.load_params(\n exe, dirname=args.init_weights_path, main_program=tp)\n else:\n fluid.io.load_vars(exe, dirname=args.init_weights_path, vars=load_vars)\n else:\n fluid.io.load_params(\n exe,\n dirname=\"\",\n filename=args.init_weights_path,\n main_program=tp)\n\n\n\ndef save_model():\n assert not os.path.isfile(args.save_weights_path)\n fluid.io.save_params(\n exe, dirname=args.save_weights_path, main_program=tp)\n\n\ndef loss(logit, label):\n label_nignore = fluid.layers.less_than(\n label.astype('float32'),\n fluid.layers.assign(np.array([num_classes], 'float32')),\n force_cpu=False).astype('float32')\n logit = fluid.layers.transpose(logit, [0, 2, 3, 1])\n logit = fluid.layers.reshape(logit, [-1, num_classes])\n label = fluid.layers.reshape(label, [-1, 1])\n label = fluid.layers.cast(label, 'int64')\n label_nignore = fluid.layers.reshape(label_nignore, [-1, 1])\n logit = fluid.layers.softmax(logit, use_cudnn=False)\n loss = fluid.layers.cross_entropy(logit, label, ignore_index=255)\n label_nignore.stop_gradient = True\n label.stop_gradient = True\n return loss, label_nignore\n\n\nargs = parser.parse_args()\nutility.print_arguments(args)\n\nmodels.clean()\nmodels.bn_momentum = 0.9997\nmodels.dropout_keep_prop = 0.9\nmodels.label_number = args.num_classes\nmodels.default_norm_type = args.norm_type\ndeeplabv3p = models.deeplabv3p\n\nsp = fluid.Program()\ntp = fluid.Program()\n\n# only for ce\nif args.enable_ce:\n SEED = 102\n sp.random_seed = SEED\n tp.random_seed = SEED\n\ncrop_size = args.train_crop_size\nbatch_size = args.batch_size\nimage_shape = [crop_size, crop_size]\nreader.default_config['crop_size'] = crop_size\nreader.default_config['shuffle'] = True\nnum_classes = args.num_classes\nweight_decay = 0.00004\n\nbase_lr = args.base_lr\ntotal_step = args.total_step\n\nwith fluid.program_guard(tp, sp):\n if args.use_py_reader:\n batch_size_each = batch_size // fluid.core.get_cuda_device_count()\n py_reader = fluid.layers.py_reader(capacity=64,\n shapes=[[batch_size_each, 3] + image_shape, [batch_size_each] + image_shape],\n dtypes=['float32', 'int32'])\n img, label = fluid.layers.read_file(py_reader)\n else:\n img = fluid.layers.data(\n name='img', shape=[3] + image_shape, dtype='float32')\n label = fluid.layers.data(name='label', shape=image_shape, dtype='int32')\n logit = deeplabv3p(img)\n pred = fluid.layers.argmax(logit, axis=1).astype('int32')\n loss, mask = loss(logit, label)\n lr = fluid.layers.polynomial_decay(\n base_lr, total_step, end_learning_rate=0, power=0.9)\n area = fluid.layers.elementwise_max(\n fluid.layers.reduce_mean(mask),\n fluid.layers.assign(np.array(\n [0.1], dtype=np.float32)))\n loss_mean = fluid.layers.reduce_mean(loss) / area\n loss_mean.persistable = True\n\n opt = fluid.optimizer.Momentum(\n lr,\n momentum=0.9,\n regularization=fluid.regularizer.L2DecayRegularizer(\n regularization_coeff=weight_decay))\n optimize_ops, params_grads = opt.minimize(loss_mean, startup_program=sp)\n # ir memory optimizer has some issues, we need to seed grad persistable to\n # avoid this issue\n for p,g in params_grads: g.persistable = True\n\n\nexec_strategy = fluid.ExecutionStrategy()\nexec_strategy.num_threads = fluid.core.get_cuda_device_count()\nexec_strategy.num_iteration_per_drop_scope = 100\nbuild_strategy = fluid.BuildStrategy()\nif args.memory_optimize:\n build_strategy.fuse_relu_depthwise_conv = True\n build_strategy.enable_inplace = True\n build_strategy.memory_optimize = True\n\nplace = fluid.CPUPlace()\nif args.use_gpu:\n place = fluid.CUDAPlace(0)\nexe = fluid.Executor(place)\nexe.run(sp)\n\nif args.init_weights_path:\n print(\"load from:\", args.init_weights_path)\n load_model()\n\ndataset = reader.CityscapeDataset(args.dataset_path, 'train')\n\nif args.parallel:\n binary = fluid.compiler.CompiledProgram(tp).with_data_parallel(\n loss_name=loss_mean.name,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy)\nelse:\n binary = fluid.compiler.CompiledProgram(tp)\n\nif args.use_py_reader:\n assert(batch_size % fluid.core.get_cuda_device_count() == 0)\n def data_gen():\n batches = dataset.get_batch_generator(\n batch_size // fluid.core.get_cuda_device_count(),\n total_step * fluid.core.get_cuda_device_count())\n for b in batches:\n yield b[1], b[2]\n py_reader.decorate_tensor_provider(data_gen)\n py_reader.start()\nelse:\n batches = dataset.get_batch_generator(batch_size, total_step)\ntotal_time = 0.0\nepoch_idx = 0\ntrain_loss = 0\n\nwith profile_context(args.profile):\n for i in range(total_step):\n epoch_idx += 1\n begin_time = time.time()\n prev_start_time = time.time()\n if not args.use_py_reader:\n _, imgs, labels, names = next(batches)\n train_loss, = exe.run(binary,\n feed={'img': imgs,\n 'label': labels}, fetch_list=[loss_mean])\n else:\n train_loss, = exe.run(binary, fetch_list=[loss_mean])\n train_loss = np.mean(train_loss)\n end_time = time.time()\n total_time += end_time - begin_time\n if i % 100 == 0:\n print(\"Model is saved to\", args.save_weights_path)\n save_model()\n print(\"step {:d}, loss: {:.6f}, step_time_cost: {:.3f}\".format(\n i, train_loss, end_time - prev_start_time))\n\nprint(\"Training done. Model is saved to\", args.save_weights_path)\nsave_model()\n\nif args.enable_ce:\n gpu_num = fluid.core.get_cuda_device_count()\n print(\"kpis\\teach_pass_duration_card%s\\t%s\" %\n (gpu_num, total_time / epoch_idx))\n print(\"kpis\\ttrain_loss_card%s\\t%s\" % (gpu_num, train_loss))\n", "path": "PaddleCV/deeplabv3+/train.py"}]} | 3,101 | 126 |
gh_patches_debug_14931 | rasdani/github-patches | git_diff | huggingface__accelerate-1800 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cannot import accelerate when torch>=2.0.1 and torch.distributed is disabled
### System Info
```Shell
I can't run `accelerate env` because of an import error.
accelerate: 0.21.0
OS: macOS
python: 3.10.12
numpy: 1.24.2
torch: 2.0.1
```
### Information
- [ ] The official example scripts
- [ ] My own modified scripts
### Tasks
- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)
- [ ] My own task or dataset (give details below)
### Reproduction
1. build `torch >= 2.0.1` with `USE_DISTRIBUTED=0`
2. install `accelerate == 0.21.0`
3. `python -c "import accelerate"`
4. raise `ModuleNotFoundError: No module named 'torch._C._distributed_c10d'; 'torch._C' is not a package`
<details><summary>Traceback (most recent call last):</summary><div>
```
File "<string>", line 1, in <module>
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/__init__.py", line 3, in <module>
from .accelerator import Accelerator
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/accelerator.py", line 35, in <module>
from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/checkpointing.py", line 24, in <module>
from .utils import (
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/utils/__init__.py", line 132, in <module>
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py", line 24, in <module>
import torch.distributed.checkpoint as dist_cp
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/checkpoint/__init__.py", line 1, in <module>
from .metadata import (
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/checkpoint/metadata.py", line 3, in <module>
from torch.distributed._shard.sharded_tensor.metadata import TensorProperties
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py", line 1, in <module>
from .api import (
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/_shard/api.py", line 5, in <module>
from torch.distributed import distributed_c10d
File "/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 16, in <module>
from torch._C._distributed_c10d import (
```
</div></details>
### Expected behavior
This is the line in the issue.
https://github.com/huggingface/accelerate/blob/d5894ab4992878c191ed4c9fdb53b35ad2c0e628/src/accelerate/utils/fsdp_utils.py#L23-L24
I think it would be better to decide whether to import `torch.distributed` by the result of `torch.distributed.is_available()` besides the torch version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/accelerate/utils/fsdp_utils.py`
Content:
```
1 # Copyright 2023 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15
16 import torch
17
18 from ..logging import get_logger
19 from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
20 from .versions import is_torch_version
21
22
23 if is_torch_version(">=", FSDP_PYTORCH_VERSION):
24 import torch.distributed.checkpoint as dist_cp
25 from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
26 from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
27 from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
28 from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
29
30
31 logger = get_logger(__name__)
32
33
34 def save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0):
35 os.makedirs(output_dir, exist_ok=True)
36
37 if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
38 # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT
39 # so, only enable it when num_processes>1
40 is_multi_process = accelerator.num_processes > 1
41 fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process
42 fsdp_plugin.state_dict_config.rank0_only = is_multi_process
43
44 with FSDP.state_dict_type(
45 model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
46 ):
47 state_dict = model.state_dict()
48 if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
49 weights_name = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
50 output_model_file = os.path.join(output_dir, weights_name)
51 if accelerator.process_index == 0:
52 logger.info(f"Saving model to {output_model_file}")
53 torch.save(state_dict, output_model_file)
54 logger.info(f"Model saved to {output_model_file}")
55 elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
56 weights_name = (
57 f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
58 if model_index == 0
59 else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
60 )
61 output_model_file = os.path.join(output_dir, weights_name)
62 logger.info(f"Saving model to {output_model_file}")
63 torch.save(state_dict, output_model_file)
64 logger.info(f"Model saved to {output_model_file}")
65 elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
66 ckpt_dir = os.path.join(output_dir, f"{MODEL_NAME}_{model_index}")
67 os.makedirs(ckpt_dir, exist_ok=True)
68 logger.info(f"Saving model to {ckpt_dir}")
69 state_dict = {"model": state_dict}
70
71 dist_cp.save_state_dict(
72 state_dict=state_dict,
73 storage_writer=dist_cp.FileSystemWriter(ckpt_dir),
74 planner=DefaultSavePlanner(),
75 )
76 logger.info(f"Model saved to {ckpt_dir}")
77
78
79 def load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0):
80 accelerator.wait_for_everyone()
81 if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
82 # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT
83 # so, only enable it when num_processes>1
84 is_multi_process = accelerator.num_processes > 1
85 fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process
86 fsdp_plugin.state_dict_config.rank0_only = is_multi_process
87 with FSDP.state_dict_type(
88 model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
89 ):
90 if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
91 if type(model) != FSDP and accelerator.process_index != 0:
92 if not fsdp_plugin.sync_module_states:
93 raise ValueError(
94 "Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
95 "initializing FSDP object"
96 )
97 return
98 weights_name = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
99 input_model_file = os.path.join(input_dir, weights_name)
100 logger.info(f"Loading model from {input_model_file}")
101 state_dict = torch.load(input_model_file)
102 logger.info(f"Model loaded from {input_model_file}")
103 elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
104 weights_name = (
105 f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
106 if model_index == 0
107 else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
108 )
109 input_model_file = os.path.join(input_dir, weights_name)
110 logger.info(f"Loading model from {input_model_file}")
111 state_dict = torch.load(input_model_file)
112 logger.info(f"Model loaded from {input_model_file}")
113 elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
114 ckpt_dir = (
115 os.path.join(input_dir, f"{MODEL_NAME}_{model_index}")
116 if f"{MODEL_NAME}" not in input_dir
117 else input_dir
118 )
119 logger.info(f"Loading model from {ckpt_dir}")
120 state_dict = {"model": model.state_dict()}
121 dist_cp.load_state_dict(
122 state_dict=state_dict,
123 storage_reader=dist_cp.FileSystemReader(ckpt_dir),
124 planner=DefaultLoadPlanner(),
125 )
126 state_dict = state_dict["model"]
127 logger.info(f"Model loaded from {ckpt_dir}")
128 load_result = model.load_state_dict(state_dict)
129 return load_result
130
131
132 def save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0):
133 os.makedirs(output_dir, exist_ok=True)
134 with FSDP.state_dict_type(
135 model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
136 ):
137 optim_state = FSDP.optim_state_dict(model, optimizer)
138 if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
139 if accelerator.process_index == 0:
140 optim_state_name = (
141 f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
142 )
143 output_optimizer_file = os.path.join(output_dir, optim_state_name)
144 logger.info(f"Saving Optimizer state to {output_optimizer_file}")
145 torch.save(optim_state, output_optimizer_file)
146 logger.info(f"Optimizer state saved in {output_optimizer_file}")
147 else:
148 ckpt_dir = os.path.join(output_dir, f"{OPTIMIZER_NAME}_{optimizer_index}")
149 os.makedirs(ckpt_dir, exist_ok=True)
150 logger.info(f"Saving Optimizer state to {ckpt_dir}")
151 dist_cp.save_state_dict(
152 state_dict={"optimizer": optim_state},
153 storage_writer=dist_cp.FileSystemWriter(ckpt_dir),
154 planner=DefaultSavePlanner(),
155 )
156 logger.info(f"Optimizer state saved in {ckpt_dir}")
157
158
159 def load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0):
160 accelerator.wait_for_everyone()
161 with FSDP.state_dict_type(
162 model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config
163 ):
164 if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
165 optim_state = None
166 # below check should work but currently it isn't working (mostly opytorch issue),
167 # in the meantime disabling it at the cost of excess memory usage
168 # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
169 optimizer_name = (
170 f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
171 )
172 input_optimizer_file = os.path.join(input_dir, optimizer_name)
173 logger.info(f"Loading Optimizer state from {input_optimizer_file}")
174 optim_state = torch.load(input_optimizer_file)
175 logger.info(f"Optimizer state loaded from {input_optimizer_file}")
176 else:
177 ckpt_dir = (
178 os.path.join(input_dir, f"{OPTIMIZER_NAME}_{optimizer_index}")
179 if f"{OPTIMIZER_NAME}" not in input_dir
180 else input_dir
181 )
182 logger.info(f"Loading Optimizer from {ckpt_dir}")
183 optim_state = load_sharded_optimizer_state_dict(
184 model_state_dict=model.state_dict(),
185 optimizer_key="optimizer",
186 storage_reader=dist_cp.FileSystemReader(ckpt_dir),
187 )
188 optim_state = optim_state["optimizer"]
189 logger.info(f"Optimizer loaded from {ckpt_dir}")
190 flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state)
191 optimizer.load_state_dict(flattened_osd)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/accelerate/utils/fsdp_utils.py b/src/accelerate/utils/fsdp_utils.py
--- a/src/accelerate/utils/fsdp_utils.py
+++ b/src/accelerate/utils/fsdp_utils.py
@@ -17,10 +17,11 @@
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
+from .imports import is_torch_distributed_available
from .versions import is_torch_version
-if is_torch_version(">=", FSDP_PYTORCH_VERSION):
+if is_torch_version(">=", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
| {"golden_diff": "diff --git a/src/accelerate/utils/fsdp_utils.py b/src/accelerate/utils/fsdp_utils.py\n--- a/src/accelerate/utils/fsdp_utils.py\n+++ b/src/accelerate/utils/fsdp_utils.py\n@@ -17,10 +17,11 @@\n \n from ..logging import get_logger\n from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME\n+from .imports import is_torch_distributed_available\n from .versions import is_torch_version\n \n \n-if is_torch_version(\">=\", FSDP_PYTORCH_VERSION):\n+if is_torch_version(\">=\", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():\n import torch.distributed.checkpoint as dist_cp\n from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner\n from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict\n", "issue": "cannot import accelerate when torch>=2.0.1 and torch.distributed is disabled\n### System Info\r\n\r\n```Shell\r\nI can't run `accelerate env` because of an import error.\r\n\r\naccelerate: 0.21.0\r\nOS: macOS\r\npython: 3.10.12\r\nnumpy: 1.24.2\r\ntorch: 2.0.1\r\n```\r\n\r\n\r\n### Information\r\n\r\n- [ ] The official example scripts\r\n- [ ] My own modified scripts\r\n\r\n### Tasks\r\n\r\n- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\r\n- [ ] My own task or dataset (give details below)\r\n\r\n### Reproduction\r\n\r\n1. build `torch >= 2.0.1` with `USE_DISTRIBUTED=0`\r\n2. install `accelerate == 0.21.0`\r\n3. `python -c \"import accelerate\"`\r\n4. raise `ModuleNotFoundError: No module named 'torch._C._distributed_c10d'; 'torch._C' is not a package`\r\n\r\n<details><summary>Traceback (most recent call last):</summary><div>\r\n\r\n```\r\n File \"<string>\", line 1, in <module>\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/__init__.py\", line 3, in <module>\r\n from .accelerator import Accelerator\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/accelerator.py\", line 35, in <module>\r\n from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/checkpointing.py\", line 24, in <module>\r\n from .utils import (\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/utils/__init__.py\", line 132, in <module>\r\n from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/accelerate/utils/fsdp_utils.py\", line 24, in <module>\r\n import torch.distributed.checkpoint as dist_cp\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/checkpoint/__init__.py\", line 1, in <module>\r\n from .metadata import (\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/checkpoint/metadata.py\", line 3, in <module>\r\n from torch.distributed._shard.sharded_tensor.metadata import TensorProperties\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py\", line 1, in <module>\r\n from .api import (\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/_shard/api.py\", line 5, in <module>\r\n from torch.distributed import distributed_c10d\r\n File \"/nix/store/v9h5iiawvw6y0j03840qxjpqc9nbk4c2-python3-3.10.12-env/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py\", line 16, in <module>\r\n from torch._C._distributed_c10d import (\r\n```\r\n</div></details>\r\n\r\n### Expected behavior\r\n\r\nThis is the line in the issue.\r\nhttps://github.com/huggingface/accelerate/blob/d5894ab4992878c191ed4c9fdb53b35ad2c0e628/src/accelerate/utils/fsdp_utils.py#L23-L24\r\n\r\nI think it would be better to decide whether to import `torch.distributed` by the result of `torch.distributed.is_available()` besides the torch version.\n", "before_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nimport torch\n\nfrom ..logging import get_logger\nfrom .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME\nfrom .versions import is_torch_version\n\n\nif is_torch_version(\">=\", FSDP_PYTORCH_VERSION):\n import torch.distributed.checkpoint as dist_cp\n from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner\n from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\n\n\nlogger = get_logger(__name__)\n\n\ndef save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0):\n os.makedirs(output_dir, exist_ok=True)\n\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT\n # so, only enable it when num_processes>1\n is_multi_process = accelerator.num_processes > 1\n fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process\n fsdp_plugin.state_dict_config.rank0_only = is_multi_process\n\n with FSDP.state_dict_type(\n model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config\n ):\n state_dict = model.state_dict()\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n weights_name = f\"{MODEL_NAME}.bin\" if model_index == 0 else f\"{MODEL_NAME}_{model_index}.bin\"\n output_model_file = os.path.join(output_dir, weights_name)\n if accelerator.process_index == 0:\n logger.info(f\"Saving model to {output_model_file}\")\n torch.save(state_dict, output_model_file)\n logger.info(f\"Model saved to {output_model_file}\")\n elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:\n weights_name = (\n f\"{MODEL_NAME}_rank{accelerator.process_index}.bin\"\n if model_index == 0\n else f\"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin\"\n )\n output_model_file = os.path.join(output_dir, weights_name)\n logger.info(f\"Saving model to {output_model_file}\")\n torch.save(state_dict, output_model_file)\n logger.info(f\"Model saved to {output_model_file}\")\n elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:\n ckpt_dir = os.path.join(output_dir, f\"{MODEL_NAME}_{model_index}\")\n os.makedirs(ckpt_dir, exist_ok=True)\n logger.info(f\"Saving model to {ckpt_dir}\")\n state_dict = {\"model\": state_dict}\n\n dist_cp.save_state_dict(\n state_dict=state_dict,\n storage_writer=dist_cp.FileSystemWriter(ckpt_dir),\n planner=DefaultSavePlanner(),\n )\n logger.info(f\"Model saved to {ckpt_dir}\")\n\n\ndef load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0):\n accelerator.wait_for_everyone()\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT\n # so, only enable it when num_processes>1\n is_multi_process = accelerator.num_processes > 1\n fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process\n fsdp_plugin.state_dict_config.rank0_only = is_multi_process\n with FSDP.state_dict_type(\n model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config\n ):\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n if type(model) != FSDP and accelerator.process_index != 0:\n if not fsdp_plugin.sync_module_states:\n raise ValueError(\n \"Set the `sync_module_states` flag to `True` so that model states are synced across processes when \"\n \"initializing FSDP object\"\n )\n return\n weights_name = f\"{MODEL_NAME}.bin\" if model_index == 0 else f\"{MODEL_NAME}_{model_index}.bin\"\n input_model_file = os.path.join(input_dir, weights_name)\n logger.info(f\"Loading model from {input_model_file}\")\n state_dict = torch.load(input_model_file)\n logger.info(f\"Model loaded from {input_model_file}\")\n elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:\n weights_name = (\n f\"{MODEL_NAME}_rank{accelerator.process_index}.bin\"\n if model_index == 0\n else f\"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin\"\n )\n input_model_file = os.path.join(input_dir, weights_name)\n logger.info(f\"Loading model from {input_model_file}\")\n state_dict = torch.load(input_model_file)\n logger.info(f\"Model loaded from {input_model_file}\")\n elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:\n ckpt_dir = (\n os.path.join(input_dir, f\"{MODEL_NAME}_{model_index}\")\n if f\"{MODEL_NAME}\" not in input_dir\n else input_dir\n )\n logger.info(f\"Loading model from {ckpt_dir}\")\n state_dict = {\"model\": model.state_dict()}\n dist_cp.load_state_dict(\n state_dict=state_dict,\n storage_reader=dist_cp.FileSystemReader(ckpt_dir),\n planner=DefaultLoadPlanner(),\n )\n state_dict = state_dict[\"model\"]\n logger.info(f\"Model loaded from {ckpt_dir}\")\n load_result = model.load_state_dict(state_dict)\n return load_result\n\n\ndef save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0):\n os.makedirs(output_dir, exist_ok=True)\n with FSDP.state_dict_type(\n model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config\n ):\n optim_state = FSDP.optim_state_dict(model, optimizer)\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n if accelerator.process_index == 0:\n optim_state_name = (\n f\"{OPTIMIZER_NAME}.bin\" if optimizer_index == 0 else f\"{OPTIMIZER_NAME}_{optimizer_index}.bin\"\n )\n output_optimizer_file = os.path.join(output_dir, optim_state_name)\n logger.info(f\"Saving Optimizer state to {output_optimizer_file}\")\n torch.save(optim_state, output_optimizer_file)\n logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n else:\n ckpt_dir = os.path.join(output_dir, f\"{OPTIMIZER_NAME}_{optimizer_index}\")\n os.makedirs(ckpt_dir, exist_ok=True)\n logger.info(f\"Saving Optimizer state to {ckpt_dir}\")\n dist_cp.save_state_dict(\n state_dict={\"optimizer\": optim_state},\n storage_writer=dist_cp.FileSystemWriter(ckpt_dir),\n planner=DefaultSavePlanner(),\n )\n logger.info(f\"Optimizer state saved in {ckpt_dir}\")\n\n\ndef load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0):\n accelerator.wait_for_everyone()\n with FSDP.state_dict_type(\n model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config\n ):\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n optim_state = None\n # below check should work but currently it isn't working (mostly opytorch issue),\n # in the meantime disabling it at the cost of excess memory usage\n # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:\n optimizer_name = (\n f\"{OPTIMIZER_NAME}.bin\" if optimizer_index == 0 else f\"{OPTIMIZER_NAME}_{optimizer_index}.bin\"\n )\n input_optimizer_file = os.path.join(input_dir, optimizer_name)\n logger.info(f\"Loading Optimizer state from {input_optimizer_file}\")\n optim_state = torch.load(input_optimizer_file)\n logger.info(f\"Optimizer state loaded from {input_optimizer_file}\")\n else:\n ckpt_dir = (\n os.path.join(input_dir, f\"{OPTIMIZER_NAME}_{optimizer_index}\")\n if f\"{OPTIMIZER_NAME}\" not in input_dir\n else input_dir\n )\n logger.info(f\"Loading Optimizer from {ckpt_dir}\")\n optim_state = load_sharded_optimizer_state_dict(\n model_state_dict=model.state_dict(),\n optimizer_key=\"optimizer\",\n storage_reader=dist_cp.FileSystemReader(ckpt_dir),\n )\n optim_state = optim_state[\"optimizer\"]\n logger.info(f\"Optimizer loaded from {ckpt_dir}\")\n flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state)\n optimizer.load_state_dict(flattened_osd)\n", "path": "src/accelerate/utils/fsdp_utils.py"}], "after_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nimport torch\n\nfrom ..logging import get_logger\nfrom .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME\nfrom .imports import is_torch_distributed_available\nfrom .versions import is_torch_version\n\n\nif is_torch_version(\">=\", FSDP_PYTORCH_VERSION) and is_torch_distributed_available():\n import torch.distributed.checkpoint as dist_cp\n from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner\n from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict\n from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP\n from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType\n\n\nlogger = get_logger(__name__)\n\n\ndef save_fsdp_model(fsdp_plugin, accelerator, model, output_dir, model_index=0):\n os.makedirs(output_dir, exist_ok=True)\n\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT\n # so, only enable it when num_processes>1\n is_multi_process = accelerator.num_processes > 1\n fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process\n fsdp_plugin.state_dict_config.rank0_only = is_multi_process\n\n with FSDP.state_dict_type(\n model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config\n ):\n state_dict = model.state_dict()\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n weights_name = f\"{MODEL_NAME}.bin\" if model_index == 0 else f\"{MODEL_NAME}_{model_index}.bin\"\n output_model_file = os.path.join(output_dir, weights_name)\n if accelerator.process_index == 0:\n logger.info(f\"Saving model to {output_model_file}\")\n torch.save(state_dict, output_model_file)\n logger.info(f\"Model saved to {output_model_file}\")\n elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:\n weights_name = (\n f\"{MODEL_NAME}_rank{accelerator.process_index}.bin\"\n if model_index == 0\n else f\"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin\"\n )\n output_model_file = os.path.join(output_dir, weights_name)\n logger.info(f\"Saving model to {output_model_file}\")\n torch.save(state_dict, output_model_file)\n logger.info(f\"Model saved to {output_model_file}\")\n elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:\n ckpt_dir = os.path.join(output_dir, f\"{MODEL_NAME}_{model_index}\")\n os.makedirs(ckpt_dir, exist_ok=True)\n logger.info(f\"Saving model to {ckpt_dir}\")\n state_dict = {\"model\": state_dict}\n\n dist_cp.save_state_dict(\n state_dict=state_dict,\n storage_writer=dist_cp.FileSystemWriter(ckpt_dir),\n planner=DefaultSavePlanner(),\n )\n logger.info(f\"Model saved to {ckpt_dir}\")\n\n\ndef load_fsdp_model(fsdp_plugin, accelerator, model, input_dir, model_index=0):\n accelerator.wait_for_everyone()\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n # FSDP raises error when single GPU is used with `offload_to_cpu=True` for FULL_STATE_DICT\n # so, only enable it when num_processes>1\n is_multi_process = accelerator.num_processes > 1\n fsdp_plugin.state_dict_config.offload_to_cpu = is_multi_process\n fsdp_plugin.state_dict_config.rank0_only = is_multi_process\n with FSDP.state_dict_type(\n model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config\n ):\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n if type(model) != FSDP and accelerator.process_index != 0:\n if not fsdp_plugin.sync_module_states:\n raise ValueError(\n \"Set the `sync_module_states` flag to `True` so that model states are synced across processes when \"\n \"initializing FSDP object\"\n )\n return\n weights_name = f\"{MODEL_NAME}.bin\" if model_index == 0 else f\"{MODEL_NAME}_{model_index}.bin\"\n input_model_file = os.path.join(input_dir, weights_name)\n logger.info(f\"Loading model from {input_model_file}\")\n state_dict = torch.load(input_model_file)\n logger.info(f\"Model loaded from {input_model_file}\")\n elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:\n weights_name = (\n f\"{MODEL_NAME}_rank{accelerator.process_index}.bin\"\n if model_index == 0\n else f\"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin\"\n )\n input_model_file = os.path.join(input_dir, weights_name)\n logger.info(f\"Loading model from {input_model_file}\")\n state_dict = torch.load(input_model_file)\n logger.info(f\"Model loaded from {input_model_file}\")\n elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:\n ckpt_dir = (\n os.path.join(input_dir, f\"{MODEL_NAME}_{model_index}\")\n if f\"{MODEL_NAME}\" not in input_dir\n else input_dir\n )\n logger.info(f\"Loading model from {ckpt_dir}\")\n state_dict = {\"model\": model.state_dict()}\n dist_cp.load_state_dict(\n state_dict=state_dict,\n storage_reader=dist_cp.FileSystemReader(ckpt_dir),\n planner=DefaultLoadPlanner(),\n )\n state_dict = state_dict[\"model\"]\n logger.info(f\"Model loaded from {ckpt_dir}\")\n load_result = model.load_state_dict(state_dict)\n return load_result\n\n\ndef save_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, output_dir, optimizer_index=0):\n os.makedirs(output_dir, exist_ok=True)\n with FSDP.state_dict_type(\n model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config\n ):\n optim_state = FSDP.optim_state_dict(model, optimizer)\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n if accelerator.process_index == 0:\n optim_state_name = (\n f\"{OPTIMIZER_NAME}.bin\" if optimizer_index == 0 else f\"{OPTIMIZER_NAME}_{optimizer_index}.bin\"\n )\n output_optimizer_file = os.path.join(output_dir, optim_state_name)\n logger.info(f\"Saving Optimizer state to {output_optimizer_file}\")\n torch.save(optim_state, output_optimizer_file)\n logger.info(f\"Optimizer state saved in {output_optimizer_file}\")\n else:\n ckpt_dir = os.path.join(output_dir, f\"{OPTIMIZER_NAME}_{optimizer_index}\")\n os.makedirs(ckpt_dir, exist_ok=True)\n logger.info(f\"Saving Optimizer state to {ckpt_dir}\")\n dist_cp.save_state_dict(\n state_dict={\"optimizer\": optim_state},\n storage_writer=dist_cp.FileSystemWriter(ckpt_dir),\n planner=DefaultSavePlanner(),\n )\n logger.info(f\"Optimizer state saved in {ckpt_dir}\")\n\n\ndef load_fsdp_optimizer(fsdp_plugin, accelerator, optimizer, model, input_dir, optimizer_index=0):\n accelerator.wait_for_everyone()\n with FSDP.state_dict_type(\n model, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config\n ):\n if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:\n optim_state = None\n # below check should work but currently it isn't working (mostly opytorch issue),\n # in the meantime disabling it at the cost of excess memory usage\n # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:\n optimizer_name = (\n f\"{OPTIMIZER_NAME}.bin\" if optimizer_index == 0 else f\"{OPTIMIZER_NAME}_{optimizer_index}.bin\"\n )\n input_optimizer_file = os.path.join(input_dir, optimizer_name)\n logger.info(f\"Loading Optimizer state from {input_optimizer_file}\")\n optim_state = torch.load(input_optimizer_file)\n logger.info(f\"Optimizer state loaded from {input_optimizer_file}\")\n else:\n ckpt_dir = (\n os.path.join(input_dir, f\"{OPTIMIZER_NAME}_{optimizer_index}\")\n if f\"{OPTIMIZER_NAME}\" not in input_dir\n else input_dir\n )\n logger.info(f\"Loading Optimizer from {ckpt_dir}\")\n optim_state = load_sharded_optimizer_state_dict(\n model_state_dict=model.state_dict(),\n optimizer_key=\"optimizer\",\n storage_reader=dist_cp.FileSystemReader(ckpt_dir),\n )\n optim_state = optim_state[\"optimizer\"]\n logger.info(f\"Optimizer loaded from {ckpt_dir}\")\n flattened_osd = FSDP.optim_state_dict_to_load(model=model, optim=optimizer, optim_state_dict=optim_state)\n optimizer.load_state_dict(flattened_osd)\n", "path": "src/accelerate/utils/fsdp_utils.py"}]} | 4,056 | 199 |
gh_patches_debug_16042 | rasdani/github-patches | git_diff | NVIDIA__apex-331 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Does Apex really support PyTorch 0.4.1?
In Readme.md it says:
> PyTorch 0.4 or newer.
But sample code [examples/simple/distributed](https://github.com/NVIDIA/apex/tree/master/examples/simple/distributed) goes down with error `AttributeError: 'Linear' object has no attribute 'named_buffers'`, because `named_buffers` method of torch.Module was introduced after 0.4.1 version.
Is there any build, that supports pytorch 0.4.1? If not, at least maybe the readme.md should be changed?
P.S.: I'm using apex with CUDA and C++ libraries, installed with command `pip install -v --no-cache-dir .`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apex/amp/_initialize.py`
Content:
```
1 import torch
2 from torch._six import string_classes
3 import functools
4 import numpy as np
5 import warnings
6 from ._amp_state import _amp_state, warn_or_err, container_abcs
7 from .handle import disable_casts
8 from .scaler import LossScaler
9 from ._process_optimizer import _process_optimizer
10 from apex.fp16_utils import convert_network
11 from ..fp16_utils import FP16_Optimizer as FP16_Optimizer_general
12 from ..optimizers import FP16_Optimizer as FP16_Optimizer_for_fused
13 from ..optimizers import FusedAdam
14 from ..parallel import DistributedDataParallel as apex_DDP
15 from ..parallel.LARC import LARC
16
17
18 def to_type(dtype, t):
19 if isinstance(t, torch.Tensor):
20 if not t.is_cuda:
21 # This should not be a hard error, since it may be legitimate.
22 warnings.warn("An input tensor was not cuda.")
23 # GANs require this.
24 # if t.requires_grad:
25 # warn_or_err("input data requires grad. Since input data is not a model parameter,\n"
26 # "its gradients will not be properly allreduced by DDP.")
27 if t.is_floating_point():
28 return t.to(dtype)
29 return t
30 else:
31 # Trust the user's custom batch type, that's all I can do here.
32 return t.to(dtype)
33
34
35 # Modified from torch.optim.optimizer.py. This is a bit more general than casted_args in utils.py.
36 def applier(value, fn):
37 if isinstance(value, torch.Tensor):
38 return fn(value)
39 elif isinstance(value, string_classes):
40 return value
41 elif isinstance(value, np.ndarray):
42 return value
43 elif isinstance(value, container_abcs.Mapping):
44 return {applier(k, fn) : applier(v, fn) for k, v in value.items()}
45 elif isinstance(value, container_abcs.Iterable):
46 return type(value)(applier(v, fn) for v in value)
47 elif hasattr(value, "to"): # Allow handling of custom batch classes
48 return fn(value)
49 else:
50 # Do I want this to fire off even if someone chooses to pass something ordinary like
51 # an int or float? May be more annoying than it's worth.
52 # print("Warning: unrecognized type in applier. If your input data is a custom class, "
53 # "provide it with a .to(dtype) method which converts its floating-point Tensors to dtype. "
54 # "Amp will check for your custom to() and invoke it to cast the batch's "
55 # "floating-point Tensors to the appropriate type. "
56 # "Also, if your data is a custom class, it is your responsibility to ensure that "
57 # "any Tensors you want to be cuda are already cuda."
58 return value
59
60
61 def check_models(models):
62 for model in models:
63 parallel_type = None
64 if isinstance(model, torch.nn.parallel.DistributedDataParallel):
65 parallel_type = "torch.nn.parallel.DistributedDataParallel"
66 if isinstance(model, apex_DDP):
67 parallel_type = "apex.parallel.DistributedDataParallel"
68 if isinstance(model, torch.nn.parallel.DataParallel):
69 parallel_type = "torch.nn.parallel.DataParallel"
70 if parallel_type is not None:
71 raise RuntimeError("Incoming model is an instance of {}. ".format(parallel_type) +
72 "Parallel wrappers should only be applied to the model(s) AFTER \n"
73 "the model(s) have been returned from amp.initialize.")
74
75
76 def check_params_fp32(models):
77 for model in models:
78 for name, param in model.named_parameters():
79 if param.is_floating_point():
80 if 'Half' in param.type():
81 warn_or_err("Found param {} with type {}, expected torch.cuda.FloatTensor.\n"
82 "When using amp.initialize, you do not need to call .half() on your model\n"
83 "before passing it, no matter what optimization level you choose.".format(
84 name, param.type()))
85 elif not param.is_cuda:
86 warn_or_err("Found param {} with type {}, expected torch.cuda.FloatTensor.\n"
87 "When using amp.initialize, you need to provide a model with parameters\n"
88 "located on a CUDA device before passing it no matter what optimization level\n"
89 "you chose. Use model.to('cuda') to use the default device.".format(
90 name, param.type()))
91
92 for name, buf in model.named_buffers():
93 if buf.is_floating_point():
94 if 'Half' in buf.type():
95 warn_or_err("Found buffer {} with type {}, expected torch.cuda.FloatTensor.\n"
96 "When using amp.initialize, you do not need to call .half() on your model\n"
97 "before passing it, no matter what optimization level you choose.".format(
98 name, buf.type()))
99 elif not buf.is_cuda:
100 warn_or_err("Found buffer {} with type {}, expected torch.cuda.FloatTensor.\n"
101 "When using amp.initialize, you need to provide a model with buffers\n"
102 "located on a CUDA device before passing it no matter what optimization level\n"
103 "you chose. Use model.to('cuda') to use the default device.".format(
104 name, buf.type()))
105
106
107 def check_optimizers(optimizers):
108 for optim in optimizers:
109 bad_optim_type = None
110 if isinstance(optim, FP16_Optimizer_general):
111 bad_optim_type = "apex.fp16_utils.FP16_Optimizer"
112 if isinstance(optim, FP16_Optimizer_for_fused):
113 bad_optim_type = "apex.optimizers.FP16_Optimizer"
114 if bad_optim_type is not None:
115 raise RuntimeError("An incoming optimizer is an instance of {}. ".format(bad_optim_type) +
116 "The optimizer(s) passed to amp.initialize() must be bare \n"
117 "instances of either ordinary Pytorch optimizers, or Apex fused \n"
118 "optimizers (currently just FusedAdam, but FusedSGD will be added \n"
119 "soon). You should not manually wrap your optimizer in either \n"
120 "apex.fp16_utils.FP16_Optimizer or apex.optimizers.FP16_Optimizer. \n"
121 "amp.initialize will take care of that for you (if necessary) based \n"
122 "on the specified opt_level (and optional overridden properties).")
123
124
125 def wrap_fused_adam(optimizer, properties):
126 msg = 'Currently, the usage of FusedAdam is restricted to '\
127 'amp.initialize(..., opt_level="O2", keep_batchnorm_fp32=False, '\
128 'loss_scale=float or "dynamic"). We are working on enabling more general usage.'
129
130 assert properties.master_weights is True, msg
131 assert properties.cast_model_type is torch.float16, msg
132 assert (properties.keep_batchnorm_fp32 is False or
133 properties.keep_batchnorm_fp32 is None), msg
134
135 if properties.loss_scale == "dynamic":
136 return FP16_Optimizer_for_fused(optimizer, dynamic_loss_scale=True)
137 else:
138 return FP16_Optimizer_for_fused(optimizer, static_loss_scale=properties.loss_scale)
139
140
141 def _initialize(models, optimizers, properties, num_losses=1, cast_model_outputs=None):
142 from apex.parallel import DistributedDataParallel as apex_DDP
143 from .amp import init as amp_init
144
145 optimizers_was_list = False
146 if isinstance(optimizers, torch.optim.Optimizer) or isinstance(optimizers, LARC):
147 optimizers = [optimizers]
148 elif optimizers is None:
149 optimizers = []
150 elif isinstance(optimizers, list):
151 optimizers_was_list = True
152 check_optimizers(optimizers)
153 else:
154 check_optimizers([optimizers])
155 raise TypeError("optimizers must be either a single optimizer or a list of optimizers.")
156
157 if isinstance(models, torch.nn.Module):
158 models_was_list = False
159 models = [models]
160 elif isinstance(models, list):
161 models_was_list = True
162 else:
163 raise TypeError("models must be either a single model or a list of models.")
164
165 check_models(models)
166
167 if not _amp_state.allow_incoming_model_not_fp32:
168 check_params_fp32(models)
169
170
171 # In the future, when FP16_Optimizer can be deprecated and master weights can
172 # become an attribute, remember to stash master weights before casting the model.
173
174 if properties.cast_model_type:
175 if properties.keep_batchnorm_fp32:
176 for model in models:
177 convert_network(model, properties.cast_model_type)
178 else:
179 for model in models:
180 model.to(properties.cast_model_type)
181
182 input_caster = functools.partial(to_type, properties.cast_model_type)
183 if cast_model_outputs is not None:
184 output_caster = functools.partial(to_type, cast_model_outputs)
185 else:
186 output_caster = functools.partial(to_type, torch.float32)
187
188 for model in models:
189 # Patch the forward method to cast incoming data to the correct type, and
190 # outgoing data to float32, so "the user never needs to call .half()."
191 # I like writing things explicitly more than decorators.
192 def patch_forward(old_fwd):
193 def new_fwd(*args, **kwargs):
194 output = old_fwd(*applier(args, input_caster),
195 **applier(kwargs, input_caster))
196 return applier(output, output_caster)
197 return new_fwd
198
199 model.forward = patch_forward(model.forward)
200
201 # State dict trick to recast any preexisting per-param state tensors
202 for optimizer in optimizers:
203 optimizer.load_state_dict(optimizer.state_dict())
204 elif cast_model_outputs is not None:
205 output_caster = functools.partial(to_type, cast_model_outputs)
206
207 for model in models:
208 def patch_forward(old_fwd):
209 def new_fwd(*args, **kwargs):
210 output = old_fwd(*args, **kwargs)
211 return applier(output, output_caster)
212 return new_fwd
213
214 model.forward = patch_forward(model.forward)
215
216 for i, optimizer in enumerate(optimizers):
217 # Still need to special case this for the first pass
218 if isinstance(optimizer, FusedAdam):
219 optimizers[i] = wrap_fused_adam(optimizer, properties)
220 else:
221 optimizers[i] = _process_optimizer(optimizer, properties)
222
223 _amp_state.loss_scalers = []
224 for _ in range(num_losses):
225 _amp_state.loss_scalers.append(LossScaler(properties.loss_scale))
226
227 if properties.patch_torch_functions:
228 # handle is unused here. It's accessible later through a global value anyway.
229 handle = amp_init(loss_scale=properties.loss_scale, verbose=(_amp_state.verbosity == 2))
230 for optimizer in optimizers:
231 # Disable Amp casting for the optimizer step, because it should only be
232 # applied to FP32 master params anyway.
233 def patch_step(old_step):
234 def new_step(*args, **kwargs):
235 with disable_casts():
236 output = old_step(*args, **kwargs)
237 return output
238 return new_step
239
240 optimizer.step = patch_step(optimizer.step)
241
242 if optimizers_was_list:
243 if models_was_list:
244 return models, optimizers
245 else:
246 return models[0], optimizers
247 else:
248 if models_was_list:
249 if len(optimizers) == 0:
250 return models
251 else:
252 return models, optimizers[0]
253 else:
254 if len(optimizers) == 0:
255 return models[0]
256 else:
257 return models[0], optimizers[0]
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apex/amp/_initialize.py b/apex/amp/_initialize.py
--- a/apex/amp/_initialize.py
+++ b/apex/amp/_initialize.py
@@ -89,7 +89,16 @@
"you chose. Use model.to('cuda') to use the default device.".format(
name, param.type()))
- for name, buf in model.named_buffers():
+ # Backward compatibility for PyTorch 0.4
+ if hasattr(model, 'named_buffers'):
+ buf_iter = model.named_buffers()
+ else:
+ buf_iter = model._buffers
+ for obj in buf_iter:
+ if type(obj)==tuple:
+ name, buf = obj
+ else:
+ name, buf = obj, buf_iter[obj]
if buf.is_floating_point():
if 'Half' in buf.type():
warn_or_err("Found buffer {} with type {}, expected torch.cuda.FloatTensor.\n"
| {"golden_diff": "diff --git a/apex/amp/_initialize.py b/apex/amp/_initialize.py\n--- a/apex/amp/_initialize.py\n+++ b/apex/amp/_initialize.py\n@@ -89,7 +89,16 @@\n \"you chose. Use model.to('cuda') to use the default device.\".format(\n name, param.type()))\n \n- for name, buf in model.named_buffers():\n+ # Backward compatibility for PyTorch 0.4\n+ if hasattr(model, 'named_buffers'):\n+ buf_iter = model.named_buffers()\n+ else:\n+ buf_iter = model._buffers\n+ for obj in buf_iter:\n+ if type(obj)==tuple:\n+ name, buf = obj\n+ else:\n+ name, buf = obj, buf_iter[obj]\n if buf.is_floating_point():\n if 'Half' in buf.type():\n warn_or_err(\"Found buffer {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n", "issue": "Does Apex really support PyTorch 0.4.1?\nIn Readme.md it says:\r\n\r\n> PyTorch 0.4 or newer.\r\n\r\nBut sample code [examples/simple/distributed](https://github.com/NVIDIA/apex/tree/master/examples/simple/distributed) goes down with error `AttributeError: 'Linear' object has no attribute 'named_buffers'`, because `named_buffers` method of torch.Module was introduced after 0.4.1 version.\r\n\r\nIs there any build, that supports pytorch 0.4.1? If not, at least maybe the readme.md should be changed?\r\n\r\n\r\nP.S.: I'm using apex with CUDA and C++ libraries, installed with command `pip install -v --no-cache-dir .`\n", "before_files": [{"content": "import torch\nfrom torch._six import string_classes\nimport functools\nimport numpy as np\nimport warnings\nfrom ._amp_state import _amp_state, warn_or_err, container_abcs\nfrom .handle import disable_casts\nfrom .scaler import LossScaler\nfrom ._process_optimizer import _process_optimizer\nfrom apex.fp16_utils import convert_network\nfrom ..fp16_utils import FP16_Optimizer as FP16_Optimizer_general\nfrom ..optimizers import FP16_Optimizer as FP16_Optimizer_for_fused\nfrom ..optimizers import FusedAdam\nfrom ..parallel import DistributedDataParallel as apex_DDP\nfrom ..parallel.LARC import LARC\n\n\ndef to_type(dtype, t):\n if isinstance(t, torch.Tensor):\n if not t.is_cuda:\n # This should not be a hard error, since it may be legitimate.\n warnings.warn(\"An input tensor was not cuda.\")\n # GANs require this.\n # if t.requires_grad:\n # warn_or_err(\"input data requires grad. Since input data is not a model parameter,\\n\"\n # \"its gradients will not be properly allreduced by DDP.\")\n if t.is_floating_point():\n return t.to(dtype)\n return t\n else:\n # Trust the user's custom batch type, that's all I can do here.\n return t.to(dtype)\n\n\n# Modified from torch.optim.optimizer.py. This is a bit more general than casted_args in utils.py.\ndef applier(value, fn):\n if isinstance(value, torch.Tensor):\n return fn(value)\n elif isinstance(value, string_classes):\n return value\n elif isinstance(value, np.ndarray):\n return value\n elif isinstance(value, container_abcs.Mapping):\n return {applier(k, fn) : applier(v, fn) for k, v in value.items()}\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(applier(v, fn) for v in value)\n elif hasattr(value, \"to\"): # Allow handling of custom batch classes\n return fn(value)\n else:\n # Do I want this to fire off even if someone chooses to pass something ordinary like\n # an int or float? May be more annoying than it's worth.\n # print(\"Warning: unrecognized type in applier. If your input data is a custom class, \"\n # \"provide it with a .to(dtype) method which converts its floating-point Tensors to dtype. \"\n # \"Amp will check for your custom to() and invoke it to cast the batch's \"\n # \"floating-point Tensors to the appropriate type. \"\n # \"Also, if your data is a custom class, it is your responsibility to ensure that \"\n # \"any Tensors you want to be cuda are already cuda.\"\n return value\n\n\ndef check_models(models):\n for model in models:\n parallel_type = None\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n parallel_type = \"torch.nn.parallel.DistributedDataParallel\"\n if isinstance(model, apex_DDP):\n parallel_type = \"apex.parallel.DistributedDataParallel\"\n if isinstance(model, torch.nn.parallel.DataParallel):\n parallel_type = \"torch.nn.parallel.DataParallel\"\n if parallel_type is not None:\n raise RuntimeError(\"Incoming model is an instance of {}. \".format(parallel_type) +\n \"Parallel wrappers should only be applied to the model(s) AFTER \\n\"\n \"the model(s) have been returned from amp.initialize.\")\n\n\ndef check_params_fp32(models):\n for model in models:\n for name, param in model.named_parameters():\n if param.is_floating_point():\n if 'Half' in param.type():\n warn_or_err(\"Found param {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you do not need to call .half() on your model\\n\"\n \"before passing it, no matter what optimization level you choose.\".format(\n name, param.type()))\n elif not param.is_cuda:\n warn_or_err(\"Found param {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you need to provide a model with parameters\\n\"\n \"located on a CUDA device before passing it no matter what optimization level\\n\"\n \"you chose. Use model.to('cuda') to use the default device.\".format(\n name, param.type()))\n\n for name, buf in model.named_buffers():\n if buf.is_floating_point():\n if 'Half' in buf.type():\n warn_or_err(\"Found buffer {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you do not need to call .half() on your model\\n\"\n \"before passing it, no matter what optimization level you choose.\".format(\n name, buf.type()))\n elif not buf.is_cuda:\n warn_or_err(\"Found buffer {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you need to provide a model with buffers\\n\"\n \"located on a CUDA device before passing it no matter what optimization level\\n\"\n \"you chose. Use model.to('cuda') to use the default device.\".format(\n name, buf.type()))\n\n\ndef check_optimizers(optimizers):\n for optim in optimizers:\n bad_optim_type = None\n if isinstance(optim, FP16_Optimizer_general):\n bad_optim_type = \"apex.fp16_utils.FP16_Optimizer\"\n if isinstance(optim, FP16_Optimizer_for_fused):\n bad_optim_type = \"apex.optimizers.FP16_Optimizer\"\n if bad_optim_type is not None:\n raise RuntimeError(\"An incoming optimizer is an instance of {}. \".format(bad_optim_type) +\n \"The optimizer(s) passed to amp.initialize() must be bare \\n\"\n \"instances of either ordinary Pytorch optimizers, or Apex fused \\n\"\n \"optimizers (currently just FusedAdam, but FusedSGD will be added \\n\"\n \"soon). You should not manually wrap your optimizer in either \\n\"\n \"apex.fp16_utils.FP16_Optimizer or apex.optimizers.FP16_Optimizer. \\n\"\n \"amp.initialize will take care of that for you (if necessary) based \\n\"\n \"on the specified opt_level (and optional overridden properties).\")\n\n\ndef wrap_fused_adam(optimizer, properties):\n msg = 'Currently, the usage of FusedAdam is restricted to '\\\n 'amp.initialize(..., opt_level=\"O2\", keep_batchnorm_fp32=False, '\\\n 'loss_scale=float or \"dynamic\"). We are working on enabling more general usage.'\n\n assert properties.master_weights is True, msg\n assert properties.cast_model_type is torch.float16, msg\n assert (properties.keep_batchnorm_fp32 is False or\n properties.keep_batchnorm_fp32 is None), msg\n\n if properties.loss_scale == \"dynamic\":\n return FP16_Optimizer_for_fused(optimizer, dynamic_loss_scale=True)\n else:\n return FP16_Optimizer_for_fused(optimizer, static_loss_scale=properties.loss_scale)\n\n\ndef _initialize(models, optimizers, properties, num_losses=1, cast_model_outputs=None):\n from apex.parallel import DistributedDataParallel as apex_DDP\n from .amp import init as amp_init\n\n optimizers_was_list = False\n if isinstance(optimizers, torch.optim.Optimizer) or isinstance(optimizers, LARC):\n optimizers = [optimizers]\n elif optimizers is None:\n optimizers = []\n elif isinstance(optimizers, list):\n optimizers_was_list = True\n check_optimizers(optimizers)\n else:\n check_optimizers([optimizers])\n raise TypeError(\"optimizers must be either a single optimizer or a list of optimizers.\")\n\n if isinstance(models, torch.nn.Module):\n models_was_list = False\n models = [models]\n elif isinstance(models, list):\n models_was_list = True\n else:\n raise TypeError(\"models must be either a single model or a list of models.\")\n\n check_models(models)\n\n if not _amp_state.allow_incoming_model_not_fp32:\n check_params_fp32(models)\n\n\n # In the future, when FP16_Optimizer can be deprecated and master weights can\n # become an attribute, remember to stash master weights before casting the model.\n\n if properties.cast_model_type:\n if properties.keep_batchnorm_fp32:\n for model in models:\n convert_network(model, properties.cast_model_type)\n else:\n for model in models:\n model.to(properties.cast_model_type)\n\n input_caster = functools.partial(to_type, properties.cast_model_type)\n if cast_model_outputs is not None:\n output_caster = functools.partial(to_type, cast_model_outputs)\n else:\n output_caster = functools.partial(to_type, torch.float32)\n\n for model in models:\n # Patch the forward method to cast incoming data to the correct type, and\n # outgoing data to float32, so \"the user never needs to call .half().\"\n # I like writing things explicitly more than decorators.\n def patch_forward(old_fwd):\n def new_fwd(*args, **kwargs):\n output = old_fwd(*applier(args, input_caster),\n **applier(kwargs, input_caster))\n return applier(output, output_caster)\n return new_fwd\n\n model.forward = patch_forward(model.forward)\n\n # State dict trick to recast any preexisting per-param state tensors \n for optimizer in optimizers:\n optimizer.load_state_dict(optimizer.state_dict())\n elif cast_model_outputs is not None:\n output_caster = functools.partial(to_type, cast_model_outputs)\n\n for model in models:\n def patch_forward(old_fwd):\n def new_fwd(*args, **kwargs):\n output = old_fwd(*args, **kwargs)\n return applier(output, output_caster)\n return new_fwd\n\n model.forward = patch_forward(model.forward)\n\n for i, optimizer in enumerate(optimizers):\n # Still need to special case this for the first pass\n if isinstance(optimizer, FusedAdam):\n optimizers[i] = wrap_fused_adam(optimizer, properties)\n else:\n optimizers[i] = _process_optimizer(optimizer, properties)\n\n _amp_state.loss_scalers = []\n for _ in range(num_losses):\n _amp_state.loss_scalers.append(LossScaler(properties.loss_scale))\n\n if properties.patch_torch_functions:\n # handle is unused here. It's accessible later through a global value anyway.\n handle = amp_init(loss_scale=properties.loss_scale, verbose=(_amp_state.verbosity == 2))\n for optimizer in optimizers:\n # Disable Amp casting for the optimizer step, because it should only be\n # applied to FP32 master params anyway.\n def patch_step(old_step):\n def new_step(*args, **kwargs):\n with disable_casts():\n output = old_step(*args, **kwargs)\n return output\n return new_step\n\n optimizer.step = patch_step(optimizer.step)\n\n if optimizers_was_list:\n if models_was_list:\n return models, optimizers\n else:\n return models[0], optimizers\n else:\n if models_was_list:\n if len(optimizers) == 0:\n return models\n else:\n return models, optimizers[0]\n else:\n if len(optimizers) == 0:\n return models[0]\n else:\n return models[0], optimizers[0]\n", "path": "apex/amp/_initialize.py"}], "after_files": [{"content": "import torch\nfrom torch._six import string_classes\nimport functools\nimport numpy as np\nimport warnings\nfrom ._amp_state import _amp_state, warn_or_err, container_abcs\nfrom .handle import disable_casts\nfrom .scaler import LossScaler\nfrom ._process_optimizer import _process_optimizer\nfrom apex.fp16_utils import convert_network\nfrom ..fp16_utils import FP16_Optimizer as FP16_Optimizer_general\nfrom ..optimizers import FP16_Optimizer as FP16_Optimizer_for_fused\nfrom ..optimizers import FusedAdam\nfrom ..parallel import DistributedDataParallel as apex_DDP\nfrom ..parallel.LARC import LARC\n\n\ndef to_type(dtype, t):\n if isinstance(t, torch.Tensor):\n if not t.is_cuda:\n # This should not be a hard error, since it may be legitimate.\n warnings.warn(\"An input tensor was not cuda.\")\n # GANs require this.\n # if t.requires_grad:\n # warn_or_err(\"input data requires grad. Since input data is not a model parameter,\\n\"\n # \"its gradients will not be properly allreduced by DDP.\")\n if t.is_floating_point():\n return t.to(dtype)\n return t\n else:\n # Trust the user's custom batch type, that's all I can do here.\n return t.to(dtype)\n\n\n# Modified from torch.optim.optimizer.py. This is a bit more general than casted_args in utils.py.\ndef applier(value, fn):\n if isinstance(value, torch.Tensor):\n return fn(value)\n elif isinstance(value, string_classes):\n return value\n elif isinstance(value, np.ndarray):\n return value\n elif isinstance(value, container_abcs.Mapping):\n return {applier(k, fn) : applier(v, fn) for k, v in value.items()}\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(applier(v, fn) for v in value)\n elif hasattr(value, \"to\"): # Allow handling of custom batch classes\n return fn(value)\n else:\n # Do I want this to fire off even if someone chooses to pass something ordinary like\n # an int or float? May be more annoying than it's worth.\n # print(\"Warning: unrecognized type in applier. If your input data is a custom class, \"\n # \"provide it with a .to(dtype) method which converts its floating-point Tensors to dtype. \"\n # \"Amp will check for your custom to() and invoke it to cast the batch's \"\n # \"floating-point Tensors to the appropriate type. \"\n # \"Also, if your data is a custom class, it is your responsibility to ensure that \"\n # \"any Tensors you want to be cuda are already cuda.\"\n return value\n\n\ndef check_models(models):\n for model in models:\n parallel_type = None\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n parallel_type = \"torch.nn.parallel.DistributedDataParallel\"\n if isinstance(model, apex_DDP):\n parallel_type = \"apex.parallel.DistributedDataParallel\"\n if isinstance(model, torch.nn.parallel.DataParallel):\n parallel_type = \"torch.nn.parallel.DataParallel\"\n if parallel_type is not None:\n raise RuntimeError(\"Incoming model is an instance of {}. \".format(parallel_type) +\n \"Parallel wrappers should only be applied to the model(s) AFTER \\n\"\n \"the model(s) have been returned from amp.initialize.\")\n\n\ndef check_params_fp32(models):\n for model in models:\n for name, param in model.named_parameters():\n if param.is_floating_point():\n if 'Half' in param.type():\n warn_or_err(\"Found param {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you do not need to call .half() on your model\\n\"\n \"before passing it, no matter what optimization level you choose.\".format(\n name, param.type()))\n elif not param.is_cuda:\n warn_or_err(\"Found param {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you need to provide a model with parameters\\n\"\n \"located on a CUDA device before passing it no matter what optimization level\\n\"\n \"you chose. Use model.to('cuda') to use the default device.\".format(\n name, param.type()))\n\n # Backward compatibility for PyTorch 0.4\n if hasattr(model, 'named_buffers'):\n buf_iter = model.named_buffers()\n else:\n buf_iter = model._buffers\n for obj in buf_iter:\n if type(obj)==tuple:\n name, buf = obj\n else:\n name, buf = obj, buf_iter[obj]\n if buf.is_floating_point():\n if 'Half' in buf.type():\n warn_or_err(\"Found buffer {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you do not need to call .half() on your model\\n\"\n \"before passing it, no matter what optimization level you choose.\".format(\n name, buf.type()))\n elif not buf.is_cuda:\n warn_or_err(\"Found buffer {} with type {}, expected torch.cuda.FloatTensor.\\n\"\n \"When using amp.initialize, you need to provide a model with buffers\\n\"\n \"located on a CUDA device before passing it no matter what optimization level\\n\"\n \"you chose. Use model.to('cuda') to use the default device.\".format(\n name, buf.type()))\n\n\ndef check_optimizers(optimizers):\n for optim in optimizers:\n bad_optim_type = None\n if isinstance(optim, FP16_Optimizer_general):\n bad_optim_type = \"apex.fp16_utils.FP16_Optimizer\"\n if isinstance(optim, FP16_Optimizer_for_fused):\n bad_optim_type = \"apex.optimizers.FP16_Optimizer\"\n if bad_optim_type is not None:\n raise RuntimeError(\"An incoming optimizer is an instance of {}. \".format(bad_optim_type) +\n \"The optimizer(s) passed to amp.initialize() must be bare \\n\"\n \"instances of either ordinary Pytorch optimizers, or Apex fused \\n\"\n \"optimizers (currently just FusedAdam, but FusedSGD will be added \\n\"\n \"soon). You should not manually wrap your optimizer in either \\n\"\n \"apex.fp16_utils.FP16_Optimizer or apex.optimizers.FP16_Optimizer. \\n\"\n \"amp.initialize will take care of that for you (if necessary) based \\n\"\n \"on the specified opt_level (and optional overridden properties).\")\n\n\ndef wrap_fused_adam(optimizer, properties):\n msg = 'Currently, the usage of FusedAdam is restricted to '\\\n 'amp.initialize(..., opt_level=\"O2\", keep_batchnorm_fp32=False, '\\\n 'loss_scale=float or \"dynamic\"). We are working on enabling more general usage.'\n\n assert properties.master_weights is True, msg\n assert properties.cast_model_type is torch.float16, msg\n assert (properties.keep_batchnorm_fp32 is False or\n properties.keep_batchnorm_fp32 is None), msg\n\n if properties.loss_scale == \"dynamic\":\n return FP16_Optimizer_for_fused(optimizer, dynamic_loss_scale=True)\n else:\n return FP16_Optimizer_for_fused(optimizer, static_loss_scale=properties.loss_scale)\n\n\ndef _initialize(models, optimizers, properties, num_losses=1, cast_model_outputs=None):\n from apex.parallel import DistributedDataParallel as apex_DDP\n from .amp import init as amp_init\n\n optimizers_was_list = False\n if isinstance(optimizers, torch.optim.Optimizer) or isinstance(optimizers, LARC):\n optimizers = [optimizers]\n elif optimizers is None:\n optimizers = []\n elif isinstance(optimizers, list):\n optimizers_was_list = True\n check_optimizers(optimizers)\n else:\n check_optimizers([optimizers])\n raise TypeError(\"optimizers must be either a single optimizer or a list of optimizers.\")\n\n if isinstance(models, torch.nn.Module):\n models_was_list = False\n models = [models]\n elif isinstance(models, list):\n models_was_list = True\n else:\n raise TypeError(\"models must be either a single model or a list of models.\")\n\n check_models(models)\n\n if not _amp_state.allow_incoming_model_not_fp32:\n check_params_fp32(models)\n\n\n # In the future, when FP16_Optimizer can be deprecated and master weights can\n # become an attribute, remember to stash master weights before casting the model.\n\n if properties.cast_model_type:\n if properties.keep_batchnorm_fp32:\n for model in models:\n convert_network(model, properties.cast_model_type)\n else:\n for model in models:\n model.to(properties.cast_model_type)\n\n input_caster = functools.partial(to_type, properties.cast_model_type)\n if cast_model_outputs is not None:\n output_caster = functools.partial(to_type, cast_model_outputs)\n else:\n output_caster = functools.partial(to_type, torch.float32)\n\n for model in models:\n # Patch the forward method to cast incoming data to the correct type, and\n # outgoing data to float32, so \"the user never needs to call .half().\"\n # I like writing things explicitly more than decorators.\n def patch_forward(old_fwd):\n def new_fwd(*args, **kwargs):\n output = old_fwd(*applier(args, input_caster),\n **applier(kwargs, input_caster))\n return applier(output, output_caster)\n return new_fwd\n\n model.forward = patch_forward(model.forward)\n\n # State dict trick to recast any preexisting per-param state tensors \n for optimizer in optimizers:\n optimizer.load_state_dict(optimizer.state_dict())\n elif cast_model_outputs is not None:\n output_caster = functools.partial(to_type, cast_model_outputs)\n\n for model in models:\n def patch_forward(old_fwd):\n def new_fwd(*args, **kwargs):\n output = old_fwd(*args, **kwargs)\n return applier(output, output_caster)\n return new_fwd\n\n model.forward = patch_forward(model.forward)\n\n for i, optimizer in enumerate(optimizers):\n # Still need to special case this for the first pass\n if isinstance(optimizer, FusedAdam):\n optimizers[i] = wrap_fused_adam(optimizer, properties)\n else:\n optimizers[i] = _process_optimizer(optimizer, properties)\n\n _amp_state.loss_scalers = []\n for _ in range(num_losses):\n _amp_state.loss_scalers.append(LossScaler(properties.loss_scale))\n\n if properties.patch_torch_functions:\n # handle is unused here. It's accessible later through a global value anyway.\n handle = amp_init(loss_scale=properties.loss_scale, verbose=(_amp_state.verbosity == 2))\n for optimizer in optimizers:\n # Disable Amp casting for the optimizer step, because it should only be\n # applied to FP32 master params anyway.\n def patch_step(old_step):\n def new_step(*args, **kwargs):\n with disable_casts():\n output = old_step(*args, **kwargs)\n return output\n return new_step\n\n optimizer.step = patch_step(optimizer.step)\n\n if optimizers_was_list:\n if models_was_list:\n return models, optimizers\n else:\n return models[0], optimizers\n else:\n if models_was_list:\n if len(optimizers) == 0:\n return models\n else:\n return models, optimizers[0]\n else:\n if len(optimizers) == 0:\n return models[0]\n else:\n return models[0], optimizers[0]\n", "path": "apex/amp/_initialize.py"}]} | 3,602 | 212 |
gh_patches_debug_15237 | rasdani/github-patches | git_diff | rlworkgroup__garage-691 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sim_policy not working
Hi,
I just found that sim_policy.py cannot work.
data that read from "params.pkl" does not include the key of "policy"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/sim_policy.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import argparse
4
5 import joblib
6 import tensorflow as tf
7
8 from garage.misc.console import query_yes_no
9 from garage.sampler.utils import rollout
10
11 if __name__ == "__main__":
12
13 parser = argparse.ArgumentParser()
14 parser.add_argument('file', type=str, help='path to the snapshot file')
15 parser.add_argument(
16 '--max_path_length',
17 type=int,
18 default=1000,
19 help='Max length of rollout')
20 parser.add_argument('--speedup', type=float, default=1, help='Speedup')
21 args = parser.parse_args()
22
23 # If the snapshot file use tensorflow, do:
24 # import tensorflow as tf
25 # with tf.Session():
26 # [rest of the code]
27 with tf.Session() as sess:
28 data = joblib.load(args.file)
29 policy = data['policy']
30 env = data['env']
31 while True:
32 path = rollout(
33 env,
34 policy,
35 max_path_length=args.max_path_length,
36 animated=True,
37 speedup=args.speedup)
38 if not query_yes_no('Continue simulation?'):
39 break
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/sim_policy.py b/examples/sim_policy.py
--- a/examples/sim_policy.py
+++ b/examples/sim_policy.py
@@ -8,7 +8,7 @@
from garage.misc.console import query_yes_no
from garage.sampler.utils import rollout
-if __name__ == "__main__":
+if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='path to the snapshot file')
@@ -26,7 +26,7 @@
# [rest of the code]
with tf.Session() as sess:
data = joblib.load(args.file)
- policy = data['policy']
+ policy = data['algo'].policy
env = data['env']
while True:
path = rollout(
| {"golden_diff": "diff --git a/examples/sim_policy.py b/examples/sim_policy.py\n--- a/examples/sim_policy.py\n+++ b/examples/sim_policy.py\n@@ -8,7 +8,7 @@\n from garage.misc.console import query_yes_no\n from garage.sampler.utils import rollout\n \n-if __name__ == \"__main__\":\n+if __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, help='path to the snapshot file')\n@@ -26,7 +26,7 @@\n # [rest of the code]\n with tf.Session() as sess:\n data = joblib.load(args.file)\n- policy = data['policy']\n+ policy = data['algo'].policy\n env = data['env']\n while True:\n path = rollout(\n", "issue": "sim_policy not working\nHi, \r\nI just found that sim_policy.py cannot work. \r\ndata that read from \"params.pkl\" does not include the key of \"policy\"\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\n\nimport joblib\nimport tensorflow as tf\n\nfrom garage.misc.console import query_yes_no\nfrom garage.sampler.utils import rollout\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, help='path to the snapshot file')\n parser.add_argument(\n '--max_path_length',\n type=int,\n default=1000,\n help='Max length of rollout')\n parser.add_argument('--speedup', type=float, default=1, help='Speedup')\n args = parser.parse_args()\n\n # If the snapshot file use tensorflow, do:\n # import tensorflow as tf\n # with tf.Session():\n # [rest of the code]\n with tf.Session() as sess:\n data = joblib.load(args.file)\n policy = data['policy']\n env = data['env']\n while True:\n path = rollout(\n env,\n policy,\n max_path_length=args.max_path_length,\n animated=True,\n speedup=args.speedup)\n if not query_yes_no('Continue simulation?'):\n break\n", "path": "examples/sim_policy.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\n\nimport joblib\nimport tensorflow as tf\n\nfrom garage.misc.console import query_yes_no\nfrom garage.sampler.utils import rollout\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('file', type=str, help='path to the snapshot file')\n parser.add_argument(\n '--max_path_length',\n type=int,\n default=1000,\n help='Max length of rollout')\n parser.add_argument('--speedup', type=float, default=1, help='Speedup')\n args = parser.parse_args()\n\n # If the snapshot file use tensorflow, do:\n # import tensorflow as tf\n # with tf.Session():\n # [rest of the code]\n with tf.Session() as sess:\n data = joblib.load(args.file)\n policy = data['algo'].policy\n env = data['env']\n while True:\n path = rollout(\n env,\n policy,\n max_path_length=args.max_path_length,\n animated=True,\n speedup=args.speedup)\n if not query_yes_no('Continue simulation?'):\n break\n", "path": "examples/sim_policy.py"}]} | 612 | 173 |
gh_patches_debug_30229 | rasdani/github-patches | git_diff | arviz-devs__arviz-746 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add more fitted models
figshare (https://figshare.com/) seems like a possibility that I think scikit-learn uses. In terms of size, [the radon example here](https://docs.pymc.io/notebooks/multilevel_modeling.html) generates a netCDF file of about 15MB, which is not so bad.
Perhaps it could even be hosted in a separate github repo?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/data/datasets.py`
Content:
```
1 """Base IO code for all datasets. Heavily influenced by scikit-learn's implementation."""
2 from collections import namedtuple
3 import hashlib
4 import itertools
5 import os
6 import shutil
7 from urllib.request import urlretrieve
8
9 from .io_netcdf import from_netcdf
10
11 LocalFileMetadata = namedtuple("LocalFileMetadata", ["filename", "description"])
12
13 RemoteFileMetadata = namedtuple(
14 "RemoteFileMetadata", ["filename", "url", "checksum", "description"]
15 )
16 _DATASET_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "_datasets")
17
18 LOCAL_DATASETS = {
19 "centered_eight": LocalFileMetadata(
20 filename=os.path.join(_DATASET_DIR, "centered_eight.nc"),
21 description="""
22 A centered parameterization of the eight schools model. Provided as an example of a
23 model that NUTS has trouble fitting. Compare to `load_arviz_data("non_centered_eight")`.
24
25 The eight schools model is a hierarchical model used for an analysis of the effectiveness
26 of classes that were designed to improve students’ performance on the Scholastic Aptitude Test.
27
28 See Bayesian Data Analysis (Gelman et. al.) for more details.
29 """,
30 ),
31 "non_centered_eight": LocalFileMetadata(
32 filename=os.path.join(_DATASET_DIR, "non_centered_eight.nc"),
33 description="""
34 A non-centered parameterization of the eight schools model. This is a hierarchical model
35 where sampling problems may be fixed by a non-centered parametrization. Compare to
36 `load_arviz_data("centered_eight")`.
37
38 The eight schools model is a hierarchical model used for an analysis of the effectiveness
39 of classes that were designed to improve students’ performance on the Scholastic Aptitude Test.
40
41 See Bayesian Data Analysis (Gelman et. al.) for more details.
42 """,
43 ),
44 }
45
46 REMOTE_DATASETS = {
47 "radon": RemoteFileMetadata(
48 filename="radon.nc",
49 url="https://ndownloader.figshare.com/files/13284311",
50 checksum="ee9d4644e498d45ab5163982fc74baf05efce5cfa87b11f8509f7b9acf471f09",
51 description="""
52 Radon is a radioactive gas that enters homes through contact points with the ground.
53 It is a carcinogen that is the primary cause of lung cancer in non-smokers. Radon
54 levels vary greatly from household to household.
55
56 This example uses an EPA study of radon levels in houses in Minnesota to construct a
57 model with a hierarchy over households within a county. The model includes estimates
58 (gamma) for contextual effects of the uranium per household.
59
60 See Gelman and Hill (2006) for details on the example, or
61 https://docs.pymc.io/notebooks/multilevel_modeling.html#Correlations-among-levels
62 by Chris Fonnesbeck for details on this implementation.
63 """,
64 ),
65 "rugby": RemoteFileMetadata(
66 filename="rugby.nc",
67 url="https://ndownloader.figshare.com/files/16254359",
68 checksum="9eecd2c6317e45b0388dd97ae6326adecf94128b5a7d15a52c9fcfac0937e2a6",
69 description="""
70 The Six Nations Championship is a yearly rugby competition between Italy, Ireland,
71 Scotland, England, France and Wales. Fifteen games are played each year, representing
72 all combinations of the six teams.
73
74 This example uses and includes results from 2014 - 2017, comprising 60 total
75 games. It models latent parameters for each team's attack and defense, as well
76 as a parameter for home team advantage.
77
78 See https://docs.pymc.io/notebooks/rugby_analytics.html by Peader Coyle
79 for more details and references.
80 """,
81 ),
82 }
83
84
85 def get_data_home(data_home=None):
86 """Return the path of the arviz data dir.
87
88 This folder is used by some dataset loaders to avoid downloading the
89 data several times.
90
91 By default the data dir is set to a folder named 'arviz_data' in the
92 user home folder.
93
94 Alternatively, it can be set by the 'ARVIZ_DATA' environment
95 variable or programmatically by giving an explicit folder path. The '~'
96 symbol is expanded to the user home folder.
97
98 If the folder does not already exist, it is automatically created.
99
100 Parameters
101 ----------
102 data_home : str | None
103 The path to arviz data dir.
104 """
105 if data_home is None:
106 data_home = os.environ.get("ARVIZ_DATA", os.path.join("~", "arviz_data"))
107 data_home = os.path.expanduser(data_home)
108 if not os.path.exists(data_home):
109 os.makedirs(data_home)
110 return data_home
111
112
113 def clear_data_home(data_home=None):
114 """Delete all the content of the data home cache.
115
116 Parameters
117 ----------
118 data_home : str | None
119 The path to arviz data dir.
120 """
121 data_home = get_data_home(data_home)
122 shutil.rmtree(data_home)
123
124
125 def _sha256(path):
126 """Calculate the sha256 hash of the file at path."""
127 sha256hash = hashlib.sha256()
128 chunk_size = 8192
129 with open(path, "rb") as buff:
130 while True:
131 buffer = buff.read(chunk_size)
132 if not buffer:
133 break
134 sha256hash.update(buffer)
135 return sha256hash.hexdigest()
136
137
138 def load_arviz_data(dataset=None, data_home=None):
139 """Load a local or remote pre-made dataset.
140
141 Run with no parameters to get a list of all available models.
142
143 The directory to save to can also be set with the environement
144 variable `ARVIZ_HOME`. The checksum of the dataset is checked against a
145 hardcoded value to watch for data corruption.
146
147 Run `az.clear_data_home` to clear the data directory.
148
149 Parameters
150 ----------
151 dataset : str
152 Name of dataset to load.
153
154 data_home : str, optional
155 Where to save remote datasets
156
157 Returns
158 -------
159 xarray.Dataset
160 """
161 if dataset in LOCAL_DATASETS:
162 resource = LOCAL_DATASETS[dataset]
163 return from_netcdf(resource.filename)
164
165 elif dataset in REMOTE_DATASETS:
166 remote = REMOTE_DATASETS[dataset]
167 home_dir = get_data_home(data_home=data_home)
168 file_path = os.path.join(home_dir, remote.filename)
169 if not os.path.exists(file_path):
170 urlretrieve(remote.url, file_path)
171 checksum = _sha256(file_path)
172 if remote.checksum != checksum:
173 raise IOError(
174 "{} has an SHA256 checksum ({}) differing from expected ({}), "
175 "file may be corrupted. Run `arviz.clear_data_home()` and try "
176 "again, or please open an issue.".format(file_path, checksum, remote.checksum)
177 )
178 return from_netcdf(file_path)
179 else:
180 if dataset is None:
181 return dict(itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()))
182 else:
183 raise ValueError(
184 "Dataset {} not found! The following are available:\n{}".format(
185 dataset, list_datasets()
186 )
187 )
188
189
190 def list_datasets():
191 """Get a string representation of all available datasets with descriptions."""
192 lines = []
193 for name, resource in itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()):
194
195 if isinstance(resource, LocalFileMetadata):
196 location = "local: {}".format(resource.filename)
197 elif isinstance(resource, RemoteFileMetadata):
198 location = "remote: {}".format(resource.url)
199 else:
200 location = "unknown"
201 lines.append("{}\n{}\n{}\n{}".format(name, "=" * len(name), resource.description, location))
202
203 return "\n\n{}\n\n".format(10 * "-").join(lines)
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/data/datasets.py b/arviz/data/datasets.py
--- a/arviz/data/datasets.py
+++ b/arviz/data/datasets.py
@@ -77,6 +77,53 @@
See https://docs.pymc.io/notebooks/rugby_analytics.html by Peader Coyle
for more details and references.
+""",
+ ),
+ "regression1d": RemoteFileMetadata(
+ filename="regression1d.nc",
+ url="https://ndownloader.figshare.com/files/16254899",
+ checksum="909e8ffe344e196dad2730b1542881ab5729cb0977dd20ba645a532ffa427278",
+ description="""
+A synthetic one dimensional linear regression dataset with latent slope,
+intercept, and noise ("eps"). One hundred data points, fit with PyMC3.
+
+True slope and intercept are included as deterministic variables.
+""",
+ ),
+ "regression10d": RemoteFileMetadata(
+ filename="regression10d.nc",
+ url="https://ndownloader.figshare.com/files/16255736",
+ checksum="c6716ec7e19926ad2a52d6ae4c1d1dd5ddb747e204c0d811757c8e93fcf9f970",
+ description="""
+A synthetic multi-dimensional (10 dimensions) linear regression dataset with
+latent weights ("w"), intercept, and noise ("eps"). Five hundred data points,
+fit with PyMC3.
+
+True weights and intercept are included as deterministic variables.
+""",
+ ),
+ "classification1d": RemoteFileMetadata(
+ filename="classification1d.nc",
+ url="https://ndownloader.figshare.com/files/16256678",
+ checksum="1cf3806e72c14001f6864bb69d89747dcc09dd55bcbca50aba04e9939daee5a0",
+ description="""
+A synthetic one dimensional logistic regression dataset with latent slope and
+intercept, passed into a Bernoulli random variable. One hundred data points,
+fit with PyMC3.
+
+True slope and intercept are included as deterministic variables.
+""",
+ ),
+ "classification10d": RemoteFileMetadata(
+ filename="classification10d.nc",
+ url="https://ndownloader.figshare.com/files/16256681",
+ checksum="16c9a45e1e6e0519d573cafc4d266d761ba347e62b6f6a79030aaa8e2fde1367",
+ description="""
+A synthetic multi dimensional (10 dimensions) logistic regression dataset with
+latent weights ("w") and intercept, passed into a Bernoulli random variable.
+Five hundred data points, fit with PyMC3.
+
+True weights and intercept are included as deterministic variables.
""",
),
}
| {"golden_diff": "diff --git a/arviz/data/datasets.py b/arviz/data/datasets.py\n--- a/arviz/data/datasets.py\n+++ b/arviz/data/datasets.py\n@@ -77,6 +77,53 @@\n \n See https://docs.pymc.io/notebooks/rugby_analytics.html by Peader Coyle\n for more details and references.\n+\"\"\",\n+ ),\n+ \"regression1d\": RemoteFileMetadata(\n+ filename=\"regression1d.nc\",\n+ url=\"https://ndownloader.figshare.com/files/16254899\",\n+ checksum=\"909e8ffe344e196dad2730b1542881ab5729cb0977dd20ba645a532ffa427278\",\n+ description=\"\"\"\n+A synthetic one dimensional linear regression dataset with latent slope,\n+intercept, and noise (\"eps\"). One hundred data points, fit with PyMC3.\n+\n+True slope and intercept are included as deterministic variables.\n+\"\"\",\n+ ),\n+ \"regression10d\": RemoteFileMetadata(\n+ filename=\"regression10d.nc\",\n+ url=\"https://ndownloader.figshare.com/files/16255736\",\n+ checksum=\"c6716ec7e19926ad2a52d6ae4c1d1dd5ddb747e204c0d811757c8e93fcf9f970\",\n+ description=\"\"\"\n+A synthetic multi-dimensional (10 dimensions) linear regression dataset with\n+latent weights (\"w\"), intercept, and noise (\"eps\"). Five hundred data points,\n+fit with PyMC3.\n+\n+True weights and intercept are included as deterministic variables.\n+\"\"\",\n+ ),\n+ \"classification1d\": RemoteFileMetadata(\n+ filename=\"classification1d.nc\",\n+ url=\"https://ndownloader.figshare.com/files/16256678\",\n+ checksum=\"1cf3806e72c14001f6864bb69d89747dcc09dd55bcbca50aba04e9939daee5a0\",\n+ description=\"\"\"\n+A synthetic one dimensional logistic regression dataset with latent slope and\n+intercept, passed into a Bernoulli random variable. One hundred data points,\n+fit with PyMC3.\n+\n+True slope and intercept are included as deterministic variables.\n+\"\"\",\n+ ),\n+ \"classification10d\": RemoteFileMetadata(\n+ filename=\"classification10d.nc\",\n+ url=\"https://ndownloader.figshare.com/files/16256681\",\n+ checksum=\"16c9a45e1e6e0519d573cafc4d266d761ba347e62b6f6a79030aaa8e2fde1367\",\n+ description=\"\"\"\n+A synthetic multi dimensional (10 dimensions) logistic regression dataset with\n+latent weights (\"w\") and intercept, passed into a Bernoulli random variable.\n+Five hundred data points, fit with PyMC3.\n+\n+True weights and intercept are included as deterministic variables.\n \"\"\",\n ),\n }\n", "issue": "Add more fitted models\nfigshare (https://figshare.com/) seems like a possibility that I think scikit-learn uses. In terms of size, [the radon example here](https://docs.pymc.io/notebooks/multilevel_modeling.html) generates a netCDF file of about 15MB, which is not so bad.\r\n\r\nPerhaps it could even be hosted in a separate github repo?\n", "before_files": [{"content": "\"\"\"Base IO code for all datasets. Heavily influenced by scikit-learn's implementation.\"\"\"\nfrom collections import namedtuple\nimport hashlib\nimport itertools\nimport os\nimport shutil\nfrom urllib.request import urlretrieve\n\nfrom .io_netcdf import from_netcdf\n\nLocalFileMetadata = namedtuple(\"LocalFileMetadata\", [\"filename\", \"description\"])\n\nRemoteFileMetadata = namedtuple(\n \"RemoteFileMetadata\", [\"filename\", \"url\", \"checksum\", \"description\"]\n)\n_DATASET_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"_datasets\")\n\nLOCAL_DATASETS = {\n \"centered_eight\": LocalFileMetadata(\n filename=os.path.join(_DATASET_DIR, \"centered_eight.nc\"),\n description=\"\"\"\nA centered parameterization of the eight schools model. Provided as an example of a\nmodel that NUTS has trouble fitting. Compare to `load_arviz_data(\"non_centered_eight\")`.\n\nThe eight schools model is a hierarchical model used for an analysis of the effectiveness\nof classes that were designed to improve students\u2019 performance on the Scholastic Aptitude Test.\n\nSee Bayesian Data Analysis (Gelman et. al.) for more details.\n\"\"\",\n ),\n \"non_centered_eight\": LocalFileMetadata(\n filename=os.path.join(_DATASET_DIR, \"non_centered_eight.nc\"),\n description=\"\"\"\nA non-centered parameterization of the eight schools model. This is a hierarchical model\nwhere sampling problems may be fixed by a non-centered parametrization. Compare to\n`load_arviz_data(\"centered_eight\")`.\n\nThe eight schools model is a hierarchical model used for an analysis of the effectiveness\nof classes that were designed to improve students\u2019 performance on the Scholastic Aptitude Test.\n\nSee Bayesian Data Analysis (Gelman et. al.) for more details.\n\"\"\",\n ),\n}\n\nREMOTE_DATASETS = {\n \"radon\": RemoteFileMetadata(\n filename=\"radon.nc\",\n url=\"https://ndownloader.figshare.com/files/13284311\",\n checksum=\"ee9d4644e498d45ab5163982fc74baf05efce5cfa87b11f8509f7b9acf471f09\",\n description=\"\"\"\nRadon is a radioactive gas that enters homes through contact points with the ground.\nIt is a carcinogen that is the primary cause of lung cancer in non-smokers. Radon\nlevels vary greatly from household to household.\n\nThis example uses an EPA study of radon levels in houses in Minnesota to construct a\nmodel with a hierarchy over households within a county. The model includes estimates\n(gamma) for contextual effects of the uranium per household.\n\nSee Gelman and Hill (2006) for details on the example, or\nhttps://docs.pymc.io/notebooks/multilevel_modeling.html#Correlations-among-levels\nby Chris Fonnesbeck for details on this implementation.\n\"\"\",\n ),\n \"rugby\": RemoteFileMetadata(\n filename=\"rugby.nc\",\n url=\"https://ndownloader.figshare.com/files/16254359\",\n checksum=\"9eecd2c6317e45b0388dd97ae6326adecf94128b5a7d15a52c9fcfac0937e2a6\",\n description=\"\"\"\nThe Six Nations Championship is a yearly rugby competition between Italy, Ireland,\nScotland, England, France and Wales. Fifteen games are played each year, representing\nall combinations of the six teams.\n\nThis example uses and includes results from 2014 - 2017, comprising 60 total\ngames. It models latent parameters for each team's attack and defense, as well\nas a parameter for home team advantage.\n\nSee https://docs.pymc.io/notebooks/rugby_analytics.html by Peader Coyle\nfor more details and references.\n\"\"\",\n ),\n}\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return the path of the arviz data dir.\n\n This folder is used by some dataset loaders to avoid downloading the\n data several times.\n\n By default the data dir is set to a folder named 'arviz_data' in the\n user home folder.\n\n Alternatively, it can be set by the 'ARVIZ_DATA' environment\n variable or programmatically by giving an explicit folder path. The '~'\n symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n\n Parameters\n ----------\n data_home : str | None\n The path to arviz data dir.\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"ARVIZ_DATA\", os.path.join(\"~\", \"arviz_data\"))\n data_home = os.path.expanduser(data_home)\n if not os.path.exists(data_home):\n os.makedirs(data_home)\n return data_home\n\n\ndef clear_data_home(data_home=None):\n \"\"\"Delete all the content of the data home cache.\n\n Parameters\n ----------\n data_home : str | None\n The path to arviz data dir.\n \"\"\"\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)\n\n\ndef _sha256(path):\n \"\"\"Calculate the sha256 hash of the file at path.\"\"\"\n sha256hash = hashlib.sha256()\n chunk_size = 8192\n with open(path, \"rb\") as buff:\n while True:\n buffer = buff.read(chunk_size)\n if not buffer:\n break\n sha256hash.update(buffer)\n return sha256hash.hexdigest()\n\n\ndef load_arviz_data(dataset=None, data_home=None):\n \"\"\"Load a local or remote pre-made dataset.\n\n Run with no parameters to get a list of all available models.\n\n The directory to save to can also be set with the environement\n variable `ARVIZ_HOME`. The checksum of the dataset is checked against a\n hardcoded value to watch for data corruption.\n\n Run `az.clear_data_home` to clear the data directory.\n\n Parameters\n ----------\n dataset : str\n Name of dataset to load.\n\n data_home : str, optional\n Where to save remote datasets\n\n Returns\n -------\n xarray.Dataset\n \"\"\"\n if dataset in LOCAL_DATASETS:\n resource = LOCAL_DATASETS[dataset]\n return from_netcdf(resource.filename)\n\n elif dataset in REMOTE_DATASETS:\n remote = REMOTE_DATASETS[dataset]\n home_dir = get_data_home(data_home=data_home)\n file_path = os.path.join(home_dir, remote.filename)\n if not os.path.exists(file_path):\n urlretrieve(remote.url, file_path)\n checksum = _sha256(file_path)\n if remote.checksum != checksum:\n raise IOError(\n \"{} has an SHA256 checksum ({}) differing from expected ({}), \"\n \"file may be corrupted. Run `arviz.clear_data_home()` and try \"\n \"again, or please open an issue.\".format(file_path, checksum, remote.checksum)\n )\n return from_netcdf(file_path)\n else:\n if dataset is None:\n return dict(itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()))\n else:\n raise ValueError(\n \"Dataset {} not found! The following are available:\\n{}\".format(\n dataset, list_datasets()\n )\n )\n\n\ndef list_datasets():\n \"\"\"Get a string representation of all available datasets with descriptions.\"\"\"\n lines = []\n for name, resource in itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()):\n\n if isinstance(resource, LocalFileMetadata):\n location = \"local: {}\".format(resource.filename)\n elif isinstance(resource, RemoteFileMetadata):\n location = \"remote: {}\".format(resource.url)\n else:\n location = \"unknown\"\n lines.append(\"{}\\n{}\\n{}\\n{}\".format(name, \"=\" * len(name), resource.description, location))\n\n return \"\\n\\n{}\\n\\n\".format(10 * \"-\").join(lines)\n", "path": "arviz/data/datasets.py"}], "after_files": [{"content": "\"\"\"Base IO code for all datasets. Heavily influenced by scikit-learn's implementation.\"\"\"\nfrom collections import namedtuple\nimport hashlib\nimport itertools\nimport os\nimport shutil\nfrom urllib.request import urlretrieve\n\nfrom .io_netcdf import from_netcdf\n\nLocalFileMetadata = namedtuple(\"LocalFileMetadata\", [\"filename\", \"description\"])\n\nRemoteFileMetadata = namedtuple(\n \"RemoteFileMetadata\", [\"filename\", \"url\", \"checksum\", \"description\"]\n)\n_DATASET_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"_datasets\")\n\nLOCAL_DATASETS = {\n \"centered_eight\": LocalFileMetadata(\n filename=os.path.join(_DATASET_DIR, \"centered_eight.nc\"),\n description=\"\"\"\nA centered parameterization of the eight schools model. Provided as an example of a\nmodel that NUTS has trouble fitting. Compare to `load_arviz_data(\"non_centered_eight\")`.\n\nThe eight schools model is a hierarchical model used for an analysis of the effectiveness\nof classes that were designed to improve students\u2019 performance on the Scholastic Aptitude Test.\n\nSee Bayesian Data Analysis (Gelman et. al.) for more details.\n\"\"\",\n ),\n \"non_centered_eight\": LocalFileMetadata(\n filename=os.path.join(_DATASET_DIR, \"non_centered_eight.nc\"),\n description=\"\"\"\nA non-centered parameterization of the eight schools model. This is a hierarchical model\nwhere sampling problems may be fixed by a non-centered parametrization. Compare to\n`load_arviz_data(\"centered_eight\")`.\n\nThe eight schools model is a hierarchical model used for an analysis of the effectiveness\nof classes that were designed to improve students\u2019 performance on the Scholastic Aptitude Test.\n\nSee Bayesian Data Analysis (Gelman et. al.) for more details.\n\"\"\",\n ),\n}\n\nREMOTE_DATASETS = {\n \"radon\": RemoteFileMetadata(\n filename=\"radon.nc\",\n url=\"https://ndownloader.figshare.com/files/13284311\",\n checksum=\"ee9d4644e498d45ab5163982fc74baf05efce5cfa87b11f8509f7b9acf471f09\",\n description=\"\"\"\nRadon is a radioactive gas that enters homes through contact points with the ground.\nIt is a carcinogen that is the primary cause of lung cancer in non-smokers. Radon\nlevels vary greatly from household to household.\n\nThis example uses an EPA study of radon levels in houses in Minnesota to construct a\nmodel with a hierarchy over households within a county. The model includes estimates\n(gamma) for contextual effects of the uranium per household.\n\nSee Gelman and Hill (2006) for details on the example, or\nhttps://docs.pymc.io/notebooks/multilevel_modeling.html#Correlations-among-levels\nby Chris Fonnesbeck for details on this implementation.\n\"\"\",\n ),\n \"rugby\": RemoteFileMetadata(\n filename=\"rugby.nc\",\n url=\"https://ndownloader.figshare.com/files/16254359\",\n checksum=\"9eecd2c6317e45b0388dd97ae6326adecf94128b5a7d15a52c9fcfac0937e2a6\",\n description=\"\"\"\nThe Six Nations Championship is a yearly rugby competition between Italy, Ireland,\nScotland, England, France and Wales. Fifteen games are played each year, representing\nall combinations of the six teams.\n\nThis example uses and includes results from 2014 - 2017, comprising 60 total\ngames. It models latent parameters for each team's attack and defense, as well\nas a parameter for home team advantage.\n\nSee https://docs.pymc.io/notebooks/rugby_analytics.html by Peader Coyle\nfor more details and references.\n\"\"\",\n ),\n \"regression1d\": RemoteFileMetadata(\n filename=\"regression1d.nc\",\n url=\"https://ndownloader.figshare.com/files/16254899\",\n checksum=\"909e8ffe344e196dad2730b1542881ab5729cb0977dd20ba645a532ffa427278\",\n description=\"\"\"\nA synthetic one dimensional linear regression dataset with latent slope,\nintercept, and noise (\"eps\"). One hundred data points, fit with PyMC3.\n\nTrue slope and intercept are included as deterministic variables.\n\"\"\",\n ),\n \"regression10d\": RemoteFileMetadata(\n filename=\"regression10d.nc\",\n url=\"https://ndownloader.figshare.com/files/16255736\",\n checksum=\"c6716ec7e19926ad2a52d6ae4c1d1dd5ddb747e204c0d811757c8e93fcf9f970\",\n description=\"\"\"\nA synthetic multi-dimensional (10 dimensions) linear regression dataset with\nlatent weights (\"w\"), intercept, and noise (\"eps\"). Five hundred data points,\nfit with PyMC3.\n\nTrue weights and intercept are included as deterministic variables.\n\"\"\",\n ),\n \"classification1d\": RemoteFileMetadata(\n filename=\"classification1d.nc\",\n url=\"https://ndownloader.figshare.com/files/16256678\",\n checksum=\"1cf3806e72c14001f6864bb69d89747dcc09dd55bcbca50aba04e9939daee5a0\",\n description=\"\"\"\nA synthetic one dimensional logistic regression dataset with latent slope and\nintercept, passed into a Bernoulli random variable. One hundred data points,\nfit with PyMC3.\n\nTrue slope and intercept are included as deterministic variables.\n\"\"\",\n ),\n \"classification10d\": RemoteFileMetadata(\n filename=\"classification10d.nc\",\n url=\"https://ndownloader.figshare.com/files/16256681\",\n checksum=\"16c9a45e1e6e0519d573cafc4d266d761ba347e62b6f6a79030aaa8e2fde1367\",\n description=\"\"\"\nA synthetic multi dimensional (10 dimensions) logistic regression dataset with\nlatent weights (\"w\") and intercept, passed into a Bernoulli random variable.\nFive hundred data points, fit with PyMC3.\n\nTrue weights and intercept are included as deterministic variables.\n\"\"\",\n ),\n}\n\n\ndef get_data_home(data_home=None):\n \"\"\"Return the path of the arviz data dir.\n\n This folder is used by some dataset loaders to avoid downloading the\n data several times.\n\n By default the data dir is set to a folder named 'arviz_data' in the\n user home folder.\n\n Alternatively, it can be set by the 'ARVIZ_DATA' environment\n variable or programmatically by giving an explicit folder path. The '~'\n symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n\n Parameters\n ----------\n data_home : str | None\n The path to arviz data dir.\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"ARVIZ_DATA\", os.path.join(\"~\", \"arviz_data\"))\n data_home = os.path.expanduser(data_home)\n if not os.path.exists(data_home):\n os.makedirs(data_home)\n return data_home\n\n\ndef clear_data_home(data_home=None):\n \"\"\"Delete all the content of the data home cache.\n\n Parameters\n ----------\n data_home : str | None\n The path to arviz data dir.\n \"\"\"\n data_home = get_data_home(data_home)\n shutil.rmtree(data_home)\n\n\ndef _sha256(path):\n \"\"\"Calculate the sha256 hash of the file at path.\"\"\"\n sha256hash = hashlib.sha256()\n chunk_size = 8192\n with open(path, \"rb\") as buff:\n while True:\n buffer = buff.read(chunk_size)\n if not buffer:\n break\n sha256hash.update(buffer)\n return sha256hash.hexdigest()\n\n\ndef load_arviz_data(dataset=None, data_home=None):\n \"\"\"Load a local or remote pre-made dataset.\n\n Run with no parameters to get a list of all available models.\n\n The directory to save to can also be set with the environement\n variable `ARVIZ_HOME`. The checksum of the dataset is checked against a\n hardcoded value to watch for data corruption.\n\n Run `az.clear_data_home` to clear the data directory.\n\n Parameters\n ----------\n dataset : str\n Name of dataset to load.\n\n data_home : str, optional\n Where to save remote datasets\n\n Returns\n -------\n xarray.Dataset\n \"\"\"\n if dataset in LOCAL_DATASETS:\n resource = LOCAL_DATASETS[dataset]\n return from_netcdf(resource.filename)\n\n elif dataset in REMOTE_DATASETS:\n remote = REMOTE_DATASETS[dataset]\n home_dir = get_data_home(data_home=data_home)\n file_path = os.path.join(home_dir, remote.filename)\n if not os.path.exists(file_path):\n urlretrieve(remote.url, file_path)\n checksum = _sha256(file_path)\n if remote.checksum != checksum:\n raise IOError(\n \"{} has an SHA256 checksum ({}) differing from expected ({}), \"\n \"file may be corrupted. Run `arviz.clear_data_home()` and try \"\n \"again, or please open an issue.\".format(file_path, checksum, remote.checksum)\n )\n return from_netcdf(file_path)\n else:\n if dataset is None:\n return dict(itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()))\n else:\n raise ValueError(\n \"Dataset {} not found! The following are available:\\n{}\".format(\n dataset, list_datasets()\n )\n )\n\n\ndef list_datasets():\n \"\"\"Get a string representation of all available datasets with descriptions.\"\"\"\n lines = []\n for name, resource in itertools.chain(LOCAL_DATASETS.items(), REMOTE_DATASETS.items()):\n\n if isinstance(resource, LocalFileMetadata):\n location = \"local: {}\".format(resource.filename)\n elif isinstance(resource, RemoteFileMetadata):\n location = \"remote: {}\".format(resource.url)\n else:\n location = \"unknown\"\n lines.append(\"{}\\n{}\\n{}\\n{}\".format(name, \"=\" * len(name), resource.description, location))\n\n return \"\\n\\n{}\\n\\n\".format(10 * \"-\").join(lines)\n", "path": "arviz/data/datasets.py"}]} | 2,633 | 745 |
gh_patches_debug_9203 | rasdani/github-patches | git_diff | Qiskit__qiskit-4081 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve an error message in qiskit.converters.circuit_to_gate()
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
Let's assume we have `QuantumCircuit` object called `qc`, and one tries to convert it into a `Gate` object using `qiskit.converters.circuit_to_gate()`. If `qc` contains some instructions which cannot be converted into `Gate`, the following exception is raised
```
QiskitError: 'One or more instructions in this instruction cannot be converted to a gate'
```
My suggestion is to improve this error message and add some info about the particular instruction preventing the convertion from happening. I believe, something like the instruction name in the error message should be more helpfull, than the current general statement.
Below is a code snippet (for a `qc` containing a measurement operation) which can be used to achieve the error mentioned above
```
from qiskit import QuantumCircuit
from qiskit.converters import circuit_to_gate
qc = QuantumCircuit(1, 1)
qc.h(0)
qc.measure(0, 0)
gate = circuit_to_gate(qc)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/converters/circuit_to_gate.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Helper function for converting a circuit to a gate"""
16
17 from qiskit.circuit.gate import Gate
18 from qiskit.circuit.quantumregister import QuantumRegister, Qubit
19 from qiskit.exceptions import QiskitError
20
21
22 def circuit_to_gate(circuit, parameter_map=None):
23 """Build a ``Gate`` object from a ``QuantumCircuit``.
24
25 The gate is anonymous (not tied to a named quantum register),
26 and so can be inserted into another circuit. The gate will
27 have the same string name as the circuit.
28
29 Args:
30 circuit (QuantumCircuit): the input circuit.
31 parameter_map (dict): For parameterized circuits, a mapping from
32 parameters in the circuit to parameters to be used in the gate.
33 If None, existing circuit parameters will also parameterize the
34 Gate.
35
36 Raises:
37 QiskitError: if circuit is non-unitary or if
38 parameter_map is not compatible with circuit
39
40 Return:
41 Gate: a Gate equivalent to the action of the
42 input circuit. Upon decomposition, this gate will
43 yield the components comprising the original circuit.
44 """
45 if circuit.clbits:
46 raise QiskitError('Circuit with classical bits cannot be converted '
47 'to gate.')
48
49 for inst, _, _ in circuit.data:
50 if not isinstance(inst, Gate):
51 raise QiskitError('One or more instructions in this instruction '
52 'cannot be converted to a gate')
53
54 if parameter_map is None:
55 parameter_dict = {p: p for p in circuit.parameters}
56 else:
57 parameter_dict = circuit._unroll_param_dict(parameter_map)
58
59 if parameter_dict.keys() != circuit.parameters:
60 raise QiskitError(('parameter_map should map all circuit parameters. '
61 'Circuit parameters: {}, parameter_map: {}').format(
62 circuit.parameters, parameter_dict))
63
64 gate = Gate(name=circuit.name,
65 num_qubits=sum([qreg.size for qreg in circuit.qregs]),
66 params=sorted(parameter_dict.values(), key=lambda p: p.name))
67 gate.condition = None
68
69 def find_bit_position(bit):
70 """find the index of a given bit (Register, int) within
71 a flat ordered list of bits of the circuit
72 """
73 if isinstance(bit, Qubit):
74 ordered_regs = circuit.qregs
75 else:
76 ordered_regs = circuit.cregs
77 reg_index = ordered_regs.index(bit.register)
78 return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index
79
80 target = circuit.copy()
81 target._substitute_parameters(parameter_dict)
82
83 # pylint: disable=cyclic-import
84 from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel
85 # pylint: enable=cyclic-import
86 sel.add_equivalence(gate, target)
87
88 definition = target.data
89
90 if gate.num_qubits > 0:
91 q = QuantumRegister(gate.num_qubits, 'q')
92
93 # The 3rd parameter in the output tuple) is hard coded to [] because
94 # Gate objects do not have cregs set and we've verified that all
95 # instructions are gates
96 definition = list(map(
97 lambda x: (x[0],
98 list(map(lambda y: q[find_bit_position(y)], x[1])),
99 []),
100 definition))
101 gate.definition = definition
102
103 return gate
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/converters/circuit_to_gate.py b/qiskit/converters/circuit_to_gate.py
--- a/qiskit/converters/circuit_to_gate.py
+++ b/qiskit/converters/circuit_to_gate.py
@@ -48,8 +48,9 @@
for inst, _, _ in circuit.data:
if not isinstance(inst, Gate):
- raise QiskitError('One or more instructions in this instruction '
- 'cannot be converted to a gate')
+ raise QiskitError(('One or more instructions cannot be converted to'
+ ' a gate. "{}" is not a gate instruction').format(
+ inst.name))
if parameter_map is None:
parameter_dict = {p: p for p in circuit.parameters}
| {"golden_diff": "diff --git a/qiskit/converters/circuit_to_gate.py b/qiskit/converters/circuit_to_gate.py\n--- a/qiskit/converters/circuit_to_gate.py\n+++ b/qiskit/converters/circuit_to_gate.py\n@@ -48,8 +48,9 @@\n \n for inst, _, _ in circuit.data:\n if not isinstance(inst, Gate):\n- raise QiskitError('One or more instructions in this instruction '\n- 'cannot be converted to a gate')\n+ raise QiskitError(('One or more instructions cannot be converted to'\n+ ' a gate. \"{}\" is not a gate instruction').format(\n+ inst.name))\n \n if parameter_map is None:\n parameter_dict = {p: p for p in circuit.parameters}\n", "issue": "Improve an error message in qiskit.converters.circuit_to_gate()\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\n\r\nLet's assume we have `QuantumCircuit` object called `qc`, and one tries to convert it into a `Gate` object using `qiskit.converters.circuit_to_gate()`. If `qc` contains some instructions which cannot be converted into `Gate`, the following exception is raised\r\n```\r\nQiskitError: 'One or more instructions in this instruction cannot be converted to a gate'\r\n```\r\nMy suggestion is to improve this error message and add some info about the particular instruction preventing the convertion from happening. I believe, something like the instruction name in the error message should be more helpfull, than the current general statement.\r\n\r\nBelow is a code snippet (for a `qc` containing a measurement operation) which can be used to achieve the error mentioned above\r\n```\r\nfrom qiskit import QuantumCircuit\r\nfrom qiskit.converters import circuit_to_gate\r\n\r\nqc = QuantumCircuit(1, 1)\r\nqc.h(0)\r\nqc.measure(0, 0)\r\n\r\ngate = circuit_to_gate(qc)\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Helper function for converting a circuit to a gate\"\"\"\n\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister, Qubit\nfrom qiskit.exceptions import QiskitError\n\n\ndef circuit_to_gate(circuit, parameter_map=None):\n \"\"\"Build a ``Gate`` object from a ``QuantumCircuit``.\n\n The gate is anonymous (not tied to a named quantum register),\n and so can be inserted into another circuit. The gate will\n have the same string name as the circuit.\n\n Args:\n circuit (QuantumCircuit): the input circuit.\n parameter_map (dict): For parameterized circuits, a mapping from\n parameters in the circuit to parameters to be used in the gate.\n If None, existing circuit parameters will also parameterize the\n Gate.\n\n Raises:\n QiskitError: if circuit is non-unitary or if\n parameter_map is not compatible with circuit\n\n Return:\n Gate: a Gate equivalent to the action of the\n input circuit. Upon decomposition, this gate will\n yield the components comprising the original circuit.\n \"\"\"\n if circuit.clbits:\n raise QiskitError('Circuit with classical bits cannot be converted '\n 'to gate.')\n\n for inst, _, _ in circuit.data:\n if not isinstance(inst, Gate):\n raise QiskitError('One or more instructions in this instruction '\n 'cannot be converted to a gate')\n\n if parameter_map is None:\n parameter_dict = {p: p for p in circuit.parameters}\n else:\n parameter_dict = circuit._unroll_param_dict(parameter_map)\n\n if parameter_dict.keys() != circuit.parameters:\n raise QiskitError(('parameter_map should map all circuit parameters. '\n 'Circuit parameters: {}, parameter_map: {}').format(\n circuit.parameters, parameter_dict))\n\n gate = Gate(name=circuit.name,\n num_qubits=sum([qreg.size for qreg in circuit.qregs]),\n params=sorted(parameter_dict.values(), key=lambda p: p.name))\n gate.condition = None\n\n def find_bit_position(bit):\n \"\"\"find the index of a given bit (Register, int) within\n a flat ordered list of bits of the circuit\n \"\"\"\n if isinstance(bit, Qubit):\n ordered_regs = circuit.qregs\n else:\n ordered_regs = circuit.cregs\n reg_index = ordered_regs.index(bit.register)\n return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index\n\n target = circuit.copy()\n target._substitute_parameters(parameter_dict)\n\n # pylint: disable=cyclic-import\n from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel\n # pylint: enable=cyclic-import\n sel.add_equivalence(gate, target)\n\n definition = target.data\n\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n\n # The 3rd parameter in the output tuple) is hard coded to [] because\n # Gate objects do not have cregs set and we've verified that all\n # instructions are gates\n definition = list(map(\n lambda x: (x[0],\n list(map(lambda y: q[find_bit_position(y)], x[1])),\n []),\n definition))\n gate.definition = definition\n\n return gate\n", "path": "qiskit/converters/circuit_to_gate.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Helper function for converting a circuit to a gate\"\"\"\n\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister, Qubit\nfrom qiskit.exceptions import QiskitError\n\n\ndef circuit_to_gate(circuit, parameter_map=None):\n \"\"\"Build a ``Gate`` object from a ``QuantumCircuit``.\n\n The gate is anonymous (not tied to a named quantum register),\n and so can be inserted into another circuit. The gate will\n have the same string name as the circuit.\n\n Args:\n circuit (QuantumCircuit): the input circuit.\n parameter_map (dict): For parameterized circuits, a mapping from\n parameters in the circuit to parameters to be used in the gate.\n If None, existing circuit parameters will also parameterize the\n Gate.\n\n Raises:\n QiskitError: if circuit is non-unitary or if\n parameter_map is not compatible with circuit\n\n Return:\n Gate: a Gate equivalent to the action of the\n input circuit. Upon decomposition, this gate will\n yield the components comprising the original circuit.\n \"\"\"\n if circuit.clbits:\n raise QiskitError('Circuit with classical bits cannot be converted '\n 'to gate.')\n\n for inst, _, _ in circuit.data:\n if not isinstance(inst, Gate):\n raise QiskitError(('One or more instructions cannot be converted to'\n ' a gate. \"{}\" is not a gate instruction').format(\n inst.name))\n\n if parameter_map is None:\n parameter_dict = {p: p for p in circuit.parameters}\n else:\n parameter_dict = circuit._unroll_param_dict(parameter_map)\n\n if parameter_dict.keys() != circuit.parameters:\n raise QiskitError(('parameter_map should map all circuit parameters. '\n 'Circuit parameters: {}, parameter_map: {}').format(\n circuit.parameters, parameter_dict))\n\n gate = Gate(name=circuit.name,\n num_qubits=sum([qreg.size for qreg in circuit.qregs]),\n params=sorted(parameter_dict.values(), key=lambda p: p.name))\n gate.condition = None\n\n def find_bit_position(bit):\n \"\"\"find the index of a given bit (Register, int) within\n a flat ordered list of bits of the circuit\n \"\"\"\n if isinstance(bit, Qubit):\n ordered_regs = circuit.qregs\n else:\n ordered_regs = circuit.cregs\n reg_index = ordered_regs.index(bit.register)\n return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index\n\n target = circuit.copy()\n target._substitute_parameters(parameter_dict)\n\n # pylint: disable=cyclic-import\n from qiskit.circuit.equivalence_library import SessionEquivalenceLibrary as sel\n # pylint: enable=cyclic-import\n sel.add_equivalence(gate, target)\n\n definition = target.data\n\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n\n # The 3rd parameter in the output tuple) is hard coded to [] because\n # Gate objects do not have cregs set and we've verified that all\n # instructions are gates\n definition = list(map(\n lambda x: (x[0],\n list(map(lambda y: q[find_bit_position(y)], x[1])),\n []),\n definition))\n gate.definition = definition\n\n return gate\n", "path": "qiskit/converters/circuit_to_gate.py"}]} | 1,613 | 173 |
gh_patches_debug_984 | rasdani/github-patches | git_diff | Mailu__Mailu-2157 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin User Quota sorting is off
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [ x] Check if no issue or pull-request for this already exists.
- [ x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [ x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [ x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [ x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
When sorting by quota in the Admin interface the numbers are sorted like text instead of by number and bytes.
## Expected behaviour
kB is smaller than MB is smaller than GB

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/__init__.py`
Content:
```
1 """ Mailu admin app
2 """
3
4 import flask
5 import flask_bootstrap
6
7 from mailu import utils, debug, models, manage, configuration
8
9 import hmac
10
11 def create_app_from_config(config):
12 """ Create a new application based on the given configuration
13 """
14 app = flask.Flask(__name__, static_folder='static', static_url_path='/static')
15 app.cli.add_command(manage.mailu)
16
17 # Bootstrap is used for error display and flash messages
18 app.bootstrap = flask_bootstrap.Bootstrap(app)
19
20 # Initialize application extensions
21 config.init_app(app)
22 models.db.init_app(app)
23 utils.session.init_app(app)
24 utils.limiter.init_app(app)
25 utils.babel.init_app(app)
26 utils.login.init_app(app)
27 utils.login.user_loader(models.User.get)
28 utils.proxy.init_app(app)
29 utils.migrate.init_app(app, models.db)
30
31 app.device_cookie_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('DEVICE_COOKIE_KEY', 'utf-8'), 'sha256').digest()
32 app.temp_token_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('WEBMAIL_TEMP_TOKEN_KEY', 'utf-8'), 'sha256').digest()
33 app.srs_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('SRS_KEY', 'utf-8'), 'sha256').digest()
34
35 # Initialize list of translations
36 app.config.translations = {
37 str(locale): locale
38 for locale in sorted(
39 utils.babel.list_translations(),
40 key=lambda l: l.get_language_name().title()
41 )
42 }
43
44 # Initialize debugging tools
45 if app.config.get("DEBUG"):
46 debug.toolbar.init_app(app)
47 if app.config.get("DEBUG_PROFILER"):
48 debug.profiler.init_app(app)
49 if assets := app.config.get('DEBUG_ASSETS'):
50 app.static_folder = assets
51
52 # Inject the default variables in the Jinja parser
53 # TODO: move this to blueprints when needed
54 @app.context_processor
55 def inject_defaults():
56 signup_domains = models.Domain.query.filter_by(signup_enabled=True).all()
57 return dict(
58 signup_domains= signup_domains,
59 config = app.config,
60 )
61
62 # Jinja filters
63 @app.template_filter()
64 def format_date(value):
65 return utils.flask_babel.format_date(value) if value else ''
66
67 @app.template_filter()
68 def format_datetime(value):
69 return utils.flask_babel.format_datetime(value) if value else ''
70
71 # Import views
72 from mailu import ui, internal, sso
73 app.register_blueprint(ui.ui, url_prefix=app.config['WEB_ADMIN'])
74 app.register_blueprint(internal.internal, url_prefix='/internal')
75 app.register_blueprint(sso.sso, url_prefix='/sso')
76 return app
77
78
79 def create_app():
80 """ Create a new application based on the config module
81 """
82 config = configuration.ConfigManager()
83 return create_app_from_config(config)
84
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/admin/mailu/__init__.py b/core/admin/mailu/__init__.py
--- a/core/admin/mailu/__init__.py
+++ b/core/admin/mailu/__init__.py
@@ -57,6 +57,7 @@
return dict(
signup_domains= signup_domains,
config = app.config,
+ get_locale = utils.get_locale,
)
# Jinja filters
| {"golden_diff": "diff --git a/core/admin/mailu/__init__.py b/core/admin/mailu/__init__.py\n--- a/core/admin/mailu/__init__.py\n+++ b/core/admin/mailu/__init__.py\n@@ -57,6 +57,7 @@\n return dict(\n signup_domains= signup_domains,\n config = app.config,\n+ get_locale = utils.get_locale,\n )\n \n # Jinja filters\n", "issue": "Admin User Quota sorting is off\nThank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.\r\nFor **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).\r\n\r\nTo be able to help you best, we need some more information.\r\n\r\n## Before you open your issue\r\n- [ x] Check if no issue or pull-request for this already exists.\r\n- [ x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [ x] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [ x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [ x] docker-compose\r\n - [ ] kubernetes\r\n - [ ] docker swarm\r\n\r\n### Versions\r\n1.9\r\n\r\n## Description\r\nWhen sorting by quota in the Admin interface the numbers are sorted like text instead of by number and bytes.\r\n\r\n\r\n## Expected behaviour\r\nkB is smaller than MB is smaller than GB\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\" Mailu admin app\n\"\"\"\n\nimport flask\nimport flask_bootstrap\n\nfrom mailu import utils, debug, models, manage, configuration\n\nimport hmac\n\ndef create_app_from_config(config):\n \"\"\" Create a new application based on the given configuration\n \"\"\"\n app = flask.Flask(__name__, static_folder='static', static_url_path='/static')\n app.cli.add_command(manage.mailu)\n\n # Bootstrap is used for error display and flash messages\n app.bootstrap = flask_bootstrap.Bootstrap(app)\n\n # Initialize application extensions\n config.init_app(app)\n models.db.init_app(app)\n utils.session.init_app(app)\n utils.limiter.init_app(app)\n utils.babel.init_app(app)\n utils.login.init_app(app)\n utils.login.user_loader(models.User.get)\n utils.proxy.init_app(app)\n utils.migrate.init_app(app, models.db)\n\n app.device_cookie_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('DEVICE_COOKIE_KEY', 'utf-8'), 'sha256').digest()\n app.temp_token_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('WEBMAIL_TEMP_TOKEN_KEY', 'utf-8'), 'sha256').digest()\n app.srs_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('SRS_KEY', 'utf-8'), 'sha256').digest()\n\n # Initialize list of translations\n app.config.translations = {\n str(locale): locale\n for locale in sorted(\n utils.babel.list_translations(),\n key=lambda l: l.get_language_name().title()\n )\n }\n\n # Initialize debugging tools\n if app.config.get(\"DEBUG\"):\n debug.toolbar.init_app(app)\n if app.config.get(\"DEBUG_PROFILER\"):\n debug.profiler.init_app(app)\n if assets := app.config.get('DEBUG_ASSETS'):\n app.static_folder = assets\n\n # Inject the default variables in the Jinja parser\n # TODO: move this to blueprints when needed\n @app.context_processor\n def inject_defaults():\n signup_domains = models.Domain.query.filter_by(signup_enabled=True).all()\n return dict(\n signup_domains= signup_domains,\n config = app.config,\n )\n\n # Jinja filters\n @app.template_filter()\n def format_date(value):\n return utils.flask_babel.format_date(value) if value else ''\n\n @app.template_filter()\n def format_datetime(value):\n return utils.flask_babel.format_datetime(value) if value else ''\n\n # Import views\n from mailu import ui, internal, sso\n app.register_blueprint(ui.ui, url_prefix=app.config['WEB_ADMIN'])\n app.register_blueprint(internal.internal, url_prefix='/internal')\n app.register_blueprint(sso.sso, url_prefix='/sso')\n return app\n\n\ndef create_app():\n \"\"\" Create a new application based on the config module\n \"\"\"\n config = configuration.ConfigManager()\n return create_app_from_config(config)\n\n", "path": "core/admin/mailu/__init__.py"}], "after_files": [{"content": "\"\"\" Mailu admin app\n\"\"\"\n\nimport flask\nimport flask_bootstrap\n\nfrom mailu import utils, debug, models, manage, configuration\n\nimport hmac\n\ndef create_app_from_config(config):\n \"\"\" Create a new application based on the given configuration\n \"\"\"\n app = flask.Flask(__name__, static_folder='static', static_url_path='/static')\n app.cli.add_command(manage.mailu)\n\n # Bootstrap is used for error display and flash messages\n app.bootstrap = flask_bootstrap.Bootstrap(app)\n\n # Initialize application extensions\n config.init_app(app)\n models.db.init_app(app)\n utils.session.init_app(app)\n utils.limiter.init_app(app)\n utils.babel.init_app(app)\n utils.login.init_app(app)\n utils.login.user_loader(models.User.get)\n utils.proxy.init_app(app)\n utils.migrate.init_app(app, models.db)\n\n app.device_cookie_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('DEVICE_COOKIE_KEY', 'utf-8'), 'sha256').digest()\n app.temp_token_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('WEBMAIL_TEMP_TOKEN_KEY', 'utf-8'), 'sha256').digest()\n app.srs_key = hmac.new(bytearray(app.secret_key, 'utf-8'), bytearray('SRS_KEY', 'utf-8'), 'sha256').digest()\n\n # Initialize list of translations\n app.config.translations = {\n str(locale): locale\n for locale in sorted(\n utils.babel.list_translations(),\n key=lambda l: l.get_language_name().title()\n )\n }\n\n # Initialize debugging tools\n if app.config.get(\"DEBUG\"):\n debug.toolbar.init_app(app)\n if app.config.get(\"DEBUG_PROFILER\"):\n debug.profiler.init_app(app)\n if assets := app.config.get('DEBUG_ASSETS'):\n app.static_folder = assets\n\n # Inject the default variables in the Jinja parser\n # TODO: move this to blueprints when needed\n @app.context_processor\n def inject_defaults():\n signup_domains = models.Domain.query.filter_by(signup_enabled=True).all()\n return dict(\n signup_domains= signup_domains,\n config = app.config,\n get_locale = utils.get_locale,\n )\n\n # Jinja filters\n @app.template_filter()\n def format_date(value):\n return utils.flask_babel.format_date(value) if value else ''\n\n @app.template_filter()\n def format_datetime(value):\n return utils.flask_babel.format_datetime(value) if value else ''\n\n # Import views\n from mailu import ui, internal, sso\n app.register_blueprint(ui.ui, url_prefix=app.config['WEB_ADMIN'])\n app.register_blueprint(internal.internal, url_prefix='/internal')\n app.register_blueprint(sso.sso, url_prefix='/sso')\n return app\n\n\ndef create_app():\n \"\"\" Create a new application based on the config module\n \"\"\"\n config = configuration.ConfigManager()\n return create_app_from_config(config)\n\n", "path": "core/admin/mailu/__init__.py"}]} | 1,412 | 93 |
gh_patches_debug_6889 | rasdani/github-patches | git_diff | praw-dev__praw-844 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
comment.refresh() causes praw to crash
## Issue Description
This issue arises when calling refresh on a comment. I have been able to reproduce the bug multiple times by running this simplified script:
#!/usr/bin/python3
import praw
reddit = praw.Reddit('bot1', user_agent='python')
askreddit = reddit.subreddit('askreddit')
for comment in askreddit.stream.comments():
print('REFRESHING:', comment.id, comment.body)
try:
comment.refresh()
except praw.exceptions.ClientException:
pass
This script can run for up to half an hour before it crashes. Sometimes it crashes immediately, and sometimes it will run fine for a while before crashing. I also noticed that it always crashes on the same incriminating comments.
This is an example of a comment that that will cause it to crash - dma3mi5. Strangely enough when following the permalink in my browser I cannot see the comment in question, but rather the other comments in the thread. Perhaps this has something to do with the user being shadowbanned?
c = reddit.comment('dma3mi5')
c.refresh()
ERROR OUTPUT
Traceback (most recent call last):
File "./test.py", line 12, in <module>
comment.refresh()
File "/usr/local/lib/python3.4/dist-packages/praw/models/reddit/comment.py", line 196, in refresh
queue.extend(comment._replies)
AttributeError: 'MoreComments' object has no attribute '_replies'
## System Information
PRAW Version: 5.0.1
Python Version: 3.4
Operating System: Debian
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `praw/models/reddit/comment.py`
Content:
```
1 """Provide the Comment class."""
2 from ...const import urljoin
3 from ...exceptions import ClientException
4 from ..comment_forest import CommentForest
5 from .base import RedditBase
6 from .mixins import InboxableMixin, ThingModerationMixin, UserContentMixin
7 from .redditor import Redditor
8
9
10 class Comment(RedditBase, InboxableMixin, UserContentMixin):
11 """A class that represents a reddit comments."""
12
13 MISSING_COMMENT_MESSAGE = ('This comment does not appear to be in the '
14 'comment tree')
15 STR_FIELD = 'id'
16
17 @property
18 def is_root(self):
19 """Return True when the comment is a top level comment."""
20 parent_type = self.parent_id.split('_', 1)[0]
21 return parent_type == self._reddit.config.kinds['submission']
22
23 @property
24 def mod(self):
25 """Provide an instance of :class:`.CommentModeration`."""
26 if self._mod is None:
27 self._mod = CommentModeration(self)
28 return self._mod
29
30 @property
31 def replies(self):
32 """Provide an instance of :class:`.CommentForest`."""
33 if isinstance(self._replies, list):
34 self._replies = CommentForest(self.submission, self._replies)
35 return self._replies
36
37 @property
38 def submission(self):
39 """Return the Submission object this comment belongs to."""
40 if not self._submission: # Comment not from submission
41 self._submission = self._reddit.submission(
42 self._extract_submission_id())
43 return self._submission
44
45 @submission.setter
46 def submission(self, submission):
47 """Update the Submission associated with the Comment."""
48 submission._comments_by_id[self.name] = self
49 self._submission = submission
50 # pylint: disable=not-an-iterable
51 for reply in getattr(self, 'replies', []):
52 reply.submission = submission
53
54 def __init__(self, reddit, id=None, # pylint: disable=redefined-builtin
55 _data=None):
56 """Construct an instance of the Comment object."""
57 if bool(id) == bool(_data):
58 raise TypeError('Either `id` or `_data` must be provided.')
59 self._mod = self._replies = self._submission = None
60 super(Comment, self).__init__(reddit, _data)
61 if id:
62 self.id = id # pylint: disable=invalid-name
63 else:
64 self._fetched = True
65
66 def __setattr__(self, attribute, value):
67 """Objectify author, replies, and subreddit."""
68 if attribute == 'author':
69 value = Redditor.from_data(self._reddit, value)
70 elif attribute == 'replies':
71 if value == '':
72 value = []
73 else:
74 value = self._reddit._objector.objectify(value).children
75 attribute = '_replies'
76 elif attribute == 'subreddit':
77 value = self._reddit.subreddit(value)
78 super(Comment, self).__setattr__(attribute, value)
79
80 def _extract_submission_id(self):
81 if 'context' in self.__dict__:
82 return self.context.rsplit('/', 4)[1]
83 return self.link_id.split('_', 1)[1]
84
85 def parent(self):
86 """Return the parent of the comment.
87
88 The returned parent will be an instance of either
89 :class:`.Comment`, or :class:`.Submission`.
90
91 If this comment was obtained through a :class:`.Submission`, then its
92 entire ancestry should be immediately available, requiring no extra
93 network requests. However, if this comment was obtained through other
94 means, e.g., ``reddit.comment('COMMENT_ID')``, or
95 ``reddit.inbox.comment_replies``, then the returned parent may be a
96 lazy instance of either :class:`.Comment`, or :class:`.Submission`.
97
98 Lazy Comment Example:
99
100 .. code:: python
101
102 comment = reddit.comment('cklhv0f')
103 parent = comment.parent()
104 # `replies` is empty until the comment is refreshed
105 print(parent.replies) # Output: []
106 parent.refresh()
107 print(parent.replies) # Output is at least: [Comment(id='cklhv0f')]
108
109 .. warning:: Successive calls to :meth:`.parent()` may result in a
110 network request per call when the comment is not obtained through a
111 :class:`.Submission`. See below for an example of how to minimize
112 requests.
113
114 If you have a deeply nested comment and wish to most efficiently
115 discover its top-most :class:`.Comment` ancestor you can chain
116 successive calls to :meth:`.parent()` with calls to :meth:`.refresh()`
117 at every 9 levels. For example:
118
119 .. code:: python
120
121 comment = reddit.comment('dkk4qjd')
122 ancestor = comment
123 refresh_counter = 0
124 while not ancestor.is_root:
125 ancestor = ancestor.parent()
126 if refresh_counter % 9 == 0:
127 ancestor.refresh()
128 refresh_counter += 1
129 print('Top-most Ancestor: {}'.format(ancestor))
130
131 The above code should result in 5 network requests to Reddit. Without
132 the calls to :meth:`.refresh` it would make at least 31 network
133 requests.
134
135 """
136 # pylint: disable=no-member
137 if self.parent_id == self.submission.fullname:
138 return self.submission
139
140 if self.parent_id in self.submission._comments_by_id:
141 # The Comment already exists, so simply return it
142 return self.submission._comments_by_id[self.parent_id]
143 # pylint: enable=no-member
144
145 parent = Comment(self._reddit, self.parent_id.split('_', 1)[1])
146 parent._submission = self.submission
147 return parent
148
149 def permalink(self, fast=False):
150 """Return a permalink to the comment.
151
152 :param fast: Return the result as quickly as possible (default: False).
153
154 In order to determine the full permalink for a comment, the Submission
155 may need to be fetched if it hasn't been already. Set ``fast=True`` if
156 you want to bypass that possible load.
157
158 A full permalink looks like:
159 /r/redditdev/comments/2gmzqe/praw_https_enabled/cklhv0f
160
161 A fast-loaded permalink for the same comment will look like:
162 /comments/2gmzqe//cklhv0f
163
164 """
165 # pylint: disable=no-member
166 if not fast or 'permalink' in self.submission.__dict__:
167 return urljoin(self.submission.permalink, self.id)
168 return '/comments/{}//{}'.format(self.submission.id, self.id)
169
170 def refresh(self):
171 """Refresh the comment's attributes.
172
173 If using :meth:`.Reddit.comment` this method must be called in order to
174 obtain the comment's replies.
175
176 """
177 if 'context' in self.__dict__: # Using hasattr triggers a fetch
178 comment_path = self.context.split('?', 1)[0]
179 else:
180 comment_path = '{}_/{}'.format(
181 self.submission._info_path(), # pylint: disable=no-member
182 self.id)
183
184 # The context limit appears to be 8, but let's ask for more anyway.
185 comment_list = self._reddit.get(comment_path,
186 params={'context': 100})[1].children
187 if not comment_list:
188 raise ClientException(self.MISSING_COMMENT_MESSAGE)
189
190 # With context, the comment may be nested so we have to find it
191 comment = None
192 queue = comment_list[:]
193 while queue and (comment is None or comment.id != self.id):
194 comment = queue.pop()
195 queue.extend(comment._replies)
196
197 if comment.id != self.id:
198 raise ClientException(self.MISSING_COMMENT_MESSAGE)
199
200 if self._submission is not None:
201 del comment.__dict__['_submission'] # Don't replace if set
202 self.__dict__.update(comment.__dict__)
203
204 for reply in comment_list:
205 reply.submission = self.submission
206 return self
207
208
209 class CommentModeration(ThingModerationMixin):
210 """Provide a set of functions pertaining to Comment moderation."""
211
212 def __init__(self, comment):
213 """Create a CommentModeration instance.
214
215 :param comment: The comment to moderate.
216
217 """
218 self.thing = comment
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/praw/models/reddit/comment.py b/praw/models/reddit/comment.py
--- a/praw/models/reddit/comment.py
+++ b/praw/models/reddit/comment.py
@@ -192,7 +192,8 @@
queue = comment_list[:]
while queue and (comment is None or comment.id != self.id):
comment = queue.pop()
- queue.extend(comment._replies)
+ if isinstance(comment, Comment):
+ queue.extend(comment._replies)
if comment.id != self.id:
raise ClientException(self.MISSING_COMMENT_MESSAGE)
| {"golden_diff": "diff --git a/praw/models/reddit/comment.py b/praw/models/reddit/comment.py\n--- a/praw/models/reddit/comment.py\n+++ b/praw/models/reddit/comment.py\n@@ -192,7 +192,8 @@\n queue = comment_list[:]\n while queue and (comment is None or comment.id != self.id):\n comment = queue.pop()\n- queue.extend(comment._replies)\n+ if isinstance(comment, Comment):\n+ queue.extend(comment._replies)\n \n if comment.id != self.id:\n raise ClientException(self.MISSING_COMMENT_MESSAGE)\n", "issue": "comment.refresh() causes praw to crash\n## Issue Description\r\n\r\nThis issue arises when calling refresh on a comment. I have been able to reproduce the bug multiple times by running this simplified script:\r\n\r\n #!/usr/bin/python3\r\n import praw\r\n\r\n reddit = praw.Reddit('bot1', user_agent='python')\r\n askreddit = reddit.subreddit('askreddit')\r\n\r\n for comment in askreddit.stream.comments():\r\n print('REFRESHING:', comment.id, comment.body)\r\n try:\r\n comment.refresh()\r\n except praw.exceptions.ClientException:\r\n pass\r\n\r\nThis script can run for up to half an hour before it crashes. Sometimes it crashes immediately, and sometimes it will run fine for a while before crashing. I also noticed that it always crashes on the same incriminating comments.\r\n\r\nThis is an example of a comment that that will cause it to crash - dma3mi5. Strangely enough when following the permalink in my browser I cannot see the comment in question, but rather the other comments in the thread. Perhaps this has something to do with the user being shadowbanned?\r\n\r\n c = reddit.comment('dma3mi5')\r\n c.refresh()\r\n\r\nERROR OUTPUT\r\n\r\n Traceback (most recent call last):\r\n File \"./test.py\", line 12, in <module>\r\n comment.refresh()\r\n File \"/usr/local/lib/python3.4/dist-packages/praw/models/reddit/comment.py\", line 196, in refresh\r\n queue.extend(comment._replies)\r\n AttributeError: 'MoreComments' object has no attribute '_replies'\r\n\r\n\r\n\r\n## System Information\r\n\r\nPRAW Version: 5.0.1\r\nPython Version: 3.4\r\nOperating System: Debian\n", "before_files": [{"content": "\"\"\"Provide the Comment class.\"\"\"\nfrom ...const import urljoin\nfrom ...exceptions import ClientException\nfrom ..comment_forest import CommentForest\nfrom .base import RedditBase\nfrom .mixins import InboxableMixin, ThingModerationMixin, UserContentMixin\nfrom .redditor import Redditor\n\n\nclass Comment(RedditBase, InboxableMixin, UserContentMixin):\n \"\"\"A class that represents a reddit comments.\"\"\"\n\n MISSING_COMMENT_MESSAGE = ('This comment does not appear to be in the '\n 'comment tree')\n STR_FIELD = 'id'\n\n @property\n def is_root(self):\n \"\"\"Return True when the comment is a top level comment.\"\"\"\n parent_type = self.parent_id.split('_', 1)[0]\n return parent_type == self._reddit.config.kinds['submission']\n\n @property\n def mod(self):\n \"\"\"Provide an instance of :class:`.CommentModeration`.\"\"\"\n if self._mod is None:\n self._mod = CommentModeration(self)\n return self._mod\n\n @property\n def replies(self):\n \"\"\"Provide an instance of :class:`.CommentForest`.\"\"\"\n if isinstance(self._replies, list):\n self._replies = CommentForest(self.submission, self._replies)\n return self._replies\n\n @property\n def submission(self):\n \"\"\"Return the Submission object this comment belongs to.\"\"\"\n if not self._submission: # Comment not from submission\n self._submission = self._reddit.submission(\n self._extract_submission_id())\n return self._submission\n\n @submission.setter\n def submission(self, submission):\n \"\"\"Update the Submission associated with the Comment.\"\"\"\n submission._comments_by_id[self.name] = self\n self._submission = submission\n # pylint: disable=not-an-iterable\n for reply in getattr(self, 'replies', []):\n reply.submission = submission\n\n def __init__(self, reddit, id=None, # pylint: disable=redefined-builtin\n _data=None):\n \"\"\"Construct an instance of the Comment object.\"\"\"\n if bool(id) == bool(_data):\n raise TypeError('Either `id` or `_data` must be provided.')\n self._mod = self._replies = self._submission = None\n super(Comment, self).__init__(reddit, _data)\n if id:\n self.id = id # pylint: disable=invalid-name\n else:\n self._fetched = True\n\n def __setattr__(self, attribute, value):\n \"\"\"Objectify author, replies, and subreddit.\"\"\"\n if attribute == 'author':\n value = Redditor.from_data(self._reddit, value)\n elif attribute == 'replies':\n if value == '':\n value = []\n else:\n value = self._reddit._objector.objectify(value).children\n attribute = '_replies'\n elif attribute == 'subreddit':\n value = self._reddit.subreddit(value)\n super(Comment, self).__setattr__(attribute, value)\n\n def _extract_submission_id(self):\n if 'context' in self.__dict__:\n return self.context.rsplit('/', 4)[1]\n return self.link_id.split('_', 1)[1]\n\n def parent(self):\n \"\"\"Return the parent of the comment.\n\n The returned parent will be an instance of either\n :class:`.Comment`, or :class:`.Submission`.\n\n If this comment was obtained through a :class:`.Submission`, then its\n entire ancestry should be immediately available, requiring no extra\n network requests. However, if this comment was obtained through other\n means, e.g., ``reddit.comment('COMMENT_ID')``, or\n ``reddit.inbox.comment_replies``, then the returned parent may be a\n lazy instance of either :class:`.Comment`, or :class:`.Submission`.\n\n Lazy Comment Example:\n\n .. code:: python\n\n comment = reddit.comment('cklhv0f')\n parent = comment.parent()\n # `replies` is empty until the comment is refreshed\n print(parent.replies) # Output: []\n parent.refresh()\n print(parent.replies) # Output is at least: [Comment(id='cklhv0f')]\n\n .. warning:: Successive calls to :meth:`.parent()` may result in a\n network request per call when the comment is not obtained through a\n :class:`.Submission`. See below for an example of how to minimize\n requests.\n\n If you have a deeply nested comment and wish to most efficiently\n discover its top-most :class:`.Comment` ancestor you can chain\n successive calls to :meth:`.parent()` with calls to :meth:`.refresh()`\n at every 9 levels. For example:\n\n .. code:: python\n\n comment = reddit.comment('dkk4qjd')\n ancestor = comment\n refresh_counter = 0\n while not ancestor.is_root:\n ancestor = ancestor.parent()\n if refresh_counter % 9 == 0:\n ancestor.refresh()\n refresh_counter += 1\n print('Top-most Ancestor: {}'.format(ancestor))\n\n The above code should result in 5 network requests to Reddit. Without\n the calls to :meth:`.refresh` it would make at least 31 network\n requests.\n\n \"\"\"\n # pylint: disable=no-member\n if self.parent_id == self.submission.fullname:\n return self.submission\n\n if self.parent_id in self.submission._comments_by_id:\n # The Comment already exists, so simply return it\n return self.submission._comments_by_id[self.parent_id]\n # pylint: enable=no-member\n\n parent = Comment(self._reddit, self.parent_id.split('_', 1)[1])\n parent._submission = self.submission\n return parent\n\n def permalink(self, fast=False):\n \"\"\"Return a permalink to the comment.\n\n :param fast: Return the result as quickly as possible (default: False).\n\n In order to determine the full permalink for a comment, the Submission\n may need to be fetched if it hasn't been already. Set ``fast=True`` if\n you want to bypass that possible load.\n\n A full permalink looks like:\n /r/redditdev/comments/2gmzqe/praw_https_enabled/cklhv0f\n\n A fast-loaded permalink for the same comment will look like:\n /comments/2gmzqe//cklhv0f\n\n \"\"\"\n # pylint: disable=no-member\n if not fast or 'permalink' in self.submission.__dict__:\n return urljoin(self.submission.permalink, self.id)\n return '/comments/{}//{}'.format(self.submission.id, self.id)\n\n def refresh(self):\n \"\"\"Refresh the comment's attributes.\n\n If using :meth:`.Reddit.comment` this method must be called in order to\n obtain the comment's replies.\n\n \"\"\"\n if 'context' in self.__dict__: # Using hasattr triggers a fetch\n comment_path = self.context.split('?', 1)[0]\n else:\n comment_path = '{}_/{}'.format(\n self.submission._info_path(), # pylint: disable=no-member\n self.id)\n\n # The context limit appears to be 8, but let's ask for more anyway.\n comment_list = self._reddit.get(comment_path,\n params={'context': 100})[1].children\n if not comment_list:\n raise ClientException(self.MISSING_COMMENT_MESSAGE)\n\n # With context, the comment may be nested so we have to find it\n comment = None\n queue = comment_list[:]\n while queue and (comment is None or comment.id != self.id):\n comment = queue.pop()\n queue.extend(comment._replies)\n\n if comment.id != self.id:\n raise ClientException(self.MISSING_COMMENT_MESSAGE)\n\n if self._submission is not None:\n del comment.__dict__['_submission'] # Don't replace if set\n self.__dict__.update(comment.__dict__)\n\n for reply in comment_list:\n reply.submission = self.submission\n return self\n\n\nclass CommentModeration(ThingModerationMixin):\n \"\"\"Provide a set of functions pertaining to Comment moderation.\"\"\"\n\n def __init__(self, comment):\n \"\"\"Create a CommentModeration instance.\n\n :param comment: The comment to moderate.\n\n \"\"\"\n self.thing = comment\n", "path": "praw/models/reddit/comment.py"}], "after_files": [{"content": "\"\"\"Provide the Comment class.\"\"\"\nfrom ...const import urljoin\nfrom ...exceptions import ClientException\nfrom ..comment_forest import CommentForest\nfrom .base import RedditBase\nfrom .mixins import InboxableMixin, ThingModerationMixin, UserContentMixin\nfrom .redditor import Redditor\n\n\nclass Comment(RedditBase, InboxableMixin, UserContentMixin):\n \"\"\"A class that represents a reddit comments.\"\"\"\n\n MISSING_COMMENT_MESSAGE = ('This comment does not appear to be in the '\n 'comment tree')\n STR_FIELD = 'id'\n\n @property\n def is_root(self):\n \"\"\"Return True when the comment is a top level comment.\"\"\"\n parent_type = self.parent_id.split('_', 1)[0]\n return parent_type == self._reddit.config.kinds['submission']\n\n @property\n def mod(self):\n \"\"\"Provide an instance of :class:`.CommentModeration`.\"\"\"\n if self._mod is None:\n self._mod = CommentModeration(self)\n return self._mod\n\n @property\n def replies(self):\n \"\"\"Provide an instance of :class:`.CommentForest`.\"\"\"\n if isinstance(self._replies, list):\n self._replies = CommentForest(self.submission, self._replies)\n return self._replies\n\n @property\n def submission(self):\n \"\"\"Return the Submission object this comment belongs to.\"\"\"\n if not self._submission: # Comment not from submission\n self._submission = self._reddit.submission(\n self._extract_submission_id())\n return self._submission\n\n @submission.setter\n def submission(self, submission):\n \"\"\"Update the Submission associated with the Comment.\"\"\"\n submission._comments_by_id[self.name] = self\n self._submission = submission\n # pylint: disable=not-an-iterable\n for reply in getattr(self, 'replies', []):\n reply.submission = submission\n\n def __init__(self, reddit, id=None, # pylint: disable=redefined-builtin\n _data=None):\n \"\"\"Construct an instance of the Comment object.\"\"\"\n if bool(id) == bool(_data):\n raise TypeError('Either `id` or `_data` must be provided.')\n self._mod = self._replies = self._submission = None\n super(Comment, self).__init__(reddit, _data)\n if id:\n self.id = id # pylint: disable=invalid-name\n else:\n self._fetched = True\n\n def __setattr__(self, attribute, value):\n \"\"\"Objectify author, replies, and subreddit.\"\"\"\n if attribute == 'author':\n value = Redditor.from_data(self._reddit, value)\n elif attribute == 'replies':\n if value == '':\n value = []\n else:\n value = self._reddit._objector.objectify(value).children\n attribute = '_replies'\n elif attribute == 'subreddit':\n value = self._reddit.subreddit(value)\n super(Comment, self).__setattr__(attribute, value)\n\n def _extract_submission_id(self):\n if 'context' in self.__dict__:\n return self.context.rsplit('/', 4)[1]\n return self.link_id.split('_', 1)[1]\n\n def parent(self):\n \"\"\"Return the parent of the comment.\n\n The returned parent will be an instance of either\n :class:`.Comment`, or :class:`.Submission`.\n\n If this comment was obtained through a :class:`.Submission`, then its\n entire ancestry should be immediately available, requiring no extra\n network requests. However, if this comment was obtained through other\n means, e.g., ``reddit.comment('COMMENT_ID')``, or\n ``reddit.inbox.comment_replies``, then the returned parent may be a\n lazy instance of either :class:`.Comment`, or :class:`.Submission`.\n\n Lazy Comment Example:\n\n .. code:: python\n\n comment = reddit.comment('cklhv0f')\n parent = comment.parent()\n # `replies` is empty until the comment is refreshed\n print(parent.replies) # Output: []\n parent.refresh()\n print(parent.replies) # Output is at least: [Comment(id='cklhv0f')]\n\n .. warning:: Successive calls to :meth:`.parent()` may result in a\n network request per call when the comment is not obtained through a\n :class:`.Submission`. See below for an example of how to minimize\n requests.\n\n If you have a deeply nested comment and wish to most efficiently\n discover its top-most :class:`.Comment` ancestor you can chain\n successive calls to :meth:`.parent()` with calls to :meth:`.refresh()`\n at every 9 levels. For example:\n\n .. code:: python\n\n comment = reddit.comment('dkk4qjd')\n ancestor = comment\n refresh_counter = 0\n while not ancestor.is_root:\n ancestor = ancestor.parent()\n if refresh_counter % 9 == 0:\n ancestor.refresh()\n refresh_counter += 1\n print('Top-most Ancestor: {}'.format(ancestor))\n\n The above code should result in 5 network requests to Reddit. Without\n the calls to :meth:`.refresh` it would make at least 31 network\n requests.\n\n \"\"\"\n # pylint: disable=no-member\n if self.parent_id == self.submission.fullname:\n return self.submission\n\n if self.parent_id in self.submission._comments_by_id:\n # The Comment already exists, so simply return it\n return self.submission._comments_by_id[self.parent_id]\n # pylint: enable=no-member\n\n parent = Comment(self._reddit, self.parent_id.split('_', 1)[1])\n parent._submission = self.submission\n return parent\n\n def permalink(self, fast=False):\n \"\"\"Return a permalink to the comment.\n\n :param fast: Return the result as quickly as possible (default: False).\n\n In order to determine the full permalink for a comment, the Submission\n may need to be fetched if it hasn't been already. Set ``fast=True`` if\n you want to bypass that possible load.\n\n A full permalink looks like:\n /r/redditdev/comments/2gmzqe/praw_https_enabled/cklhv0f\n\n A fast-loaded permalink for the same comment will look like:\n /comments/2gmzqe//cklhv0f\n\n \"\"\"\n # pylint: disable=no-member\n if not fast or 'permalink' in self.submission.__dict__:\n return urljoin(self.submission.permalink, self.id)\n return '/comments/{}//{}'.format(self.submission.id, self.id)\n\n def refresh(self):\n \"\"\"Refresh the comment's attributes.\n\n If using :meth:`.Reddit.comment` this method must be called in order to\n obtain the comment's replies.\n\n \"\"\"\n if 'context' in self.__dict__: # Using hasattr triggers a fetch\n comment_path = self.context.split('?', 1)[0]\n else:\n comment_path = '{}_/{}'.format(\n self.submission._info_path(), # pylint: disable=no-member\n self.id)\n\n # The context limit appears to be 8, but let's ask for more anyway.\n comment_list = self._reddit.get(comment_path,\n params={'context': 100})[1].children\n if not comment_list:\n raise ClientException(self.MISSING_COMMENT_MESSAGE)\n\n # With context, the comment may be nested so we have to find it\n comment = None\n queue = comment_list[:]\n while queue and (comment is None or comment.id != self.id):\n comment = queue.pop()\n if isinstance(comment, Comment):\n queue.extend(comment._replies)\n\n if comment.id != self.id:\n raise ClientException(self.MISSING_COMMENT_MESSAGE)\n\n if self._submission is not None:\n del comment.__dict__['_submission'] # Don't replace if set\n self.__dict__.update(comment.__dict__)\n\n for reply in comment_list:\n reply.submission = self.submission\n return self\n\n\nclass CommentModeration(ThingModerationMixin):\n \"\"\"Provide a set of functions pertaining to Comment moderation.\"\"\"\n\n def __init__(self, comment):\n \"\"\"Create a CommentModeration instance.\n\n :param comment: The comment to moderate.\n\n \"\"\"\n self.thing = comment\n", "path": "praw/models/reddit/comment.py"}]} | 2,987 | 127 |
gh_patches_debug_7528 | rasdani/github-patches | git_diff | e-valuation__EvaP-1593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set reply-to header
For all emails sent via one of the contact modals, the reply-to header should be set to the address of the person sending the request.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/views.py`
Content:
```
1 import logging
2 from datetime import date, timedelta
3
4 from django.conf import settings
5 from django.contrib import messages, auth
6 from django.contrib.auth.decorators import login_required
7 from django.core.mail import EmailMessage
8 from django.http import HttpResponse, HttpResponseBadRequest
9 from django.shortcuts import redirect, render
10 from django.utils.translation import gettext as _
11 from django.views.decorators.http import require_POST
12 from django.views.decorators.debug import sensitive_post_parameters
13 from django.views.i18n import set_language
14
15 from evap.evaluation.forms import NewKeyForm, LoginEmailForm
16 from evap.evaluation.models import FaqSection, EmailTemplate, Semester
17 from evap.middleware import no_login_required
18 from evap.staff.tools import delete_navbar_cache_for_users
19
20
21 logger = logging.getLogger(__name__)
22
23
24 def redirect_user_to_start_page(user):
25 # pylint: disable=too-many-return-statements
26 active_semester = Semester.active_semester()
27
28 if user.is_reviewer:
29 if active_semester is not None:
30 return redirect('staff:semester_view', active_semester.id)
31 return redirect('staff:index')
32
33 if user.is_grade_publisher:
34 if active_semester is not None:
35 return redirect('grades:semester_view', active_semester.id)
36 return redirect('grades:index')
37
38 if user.is_student:
39 return redirect('student:index')
40 if user.is_responsible_or_contributor_or_delegate:
41 return redirect('contributor:index')
42
43 return redirect('results:index')
44
45
46 @no_login_required
47 @sensitive_post_parameters("password")
48 def index(request):
49 """Main entry page into EvaP providing all the login options available. The OpenID login is thought to be used for
50 internal users. The login key mechanism is meant to be used to include external participants, e.g. visiting
51 students or visiting contributors. A login with email and password is available if OpenID is deactivated.
52 """
53
54 # parse the form data into the respective form
55 submit_type = request.POST.get("submit_type", "no_submit")
56 new_key_form = NewKeyForm(request.POST if submit_type == "new_key" else None)
57 login_email_form = LoginEmailForm(request, request.POST if submit_type == "login_email" else None)
58
59 # process form data
60 if request.method == 'POST':
61 if new_key_form.is_valid():
62 # user wants a new login key
63 profile = new_key_form.get_user()
64 profile.ensure_valid_login_key()
65 profile.save()
66
67 EmailTemplate.send_login_url_to_user(new_key_form.get_user())
68
69 messages.success(request, _("We sent you an email with a one-time login URL. Please check your inbox."))
70 return redirect('evaluation:index')
71
72 if login_email_form.is_valid():
73 # user would like to login with email and password and passed password test
74 auth.login(request, login_email_form.get_user())
75
76 # clean up our test cookie
77 if request.session.test_cookie_worked():
78 request.session.delete_test_cookie()
79 return redirect('evaluation:index')
80
81 # if not logged in by now, render form
82 if not request.user.is_authenticated:
83 # set test cookie to verify whether they work in the next step
84 request.session.set_test_cookie()
85
86 template_data = dict(
87 new_key_form=new_key_form,
88 login_email_form=login_email_form,
89 openid_active=settings.ACTIVATE_OPEN_ID_LOGIN,
90 )
91 return render(request, "index.html", template_data)
92
93 # the cached navbar might contain CSRF tokens that are invalid after a new login
94 delete_navbar_cache_for_users([request.user])
95
96 # check for redirect variable
97 redirect_to = request.GET.get("next", None)
98 if redirect_to is not None:
99 return redirect(redirect_to)
100
101 return redirect_user_to_start_page(request.user)
102
103
104 @no_login_required
105 def login_key_authentication(request, key):
106 user = auth.authenticate(request, key=key)
107
108 if user and not user.is_active:
109 messages.error(request, _("Inactive users are not allowed to login."))
110 return redirect('evaluation:index')
111
112 # If we already have an authenticated user don't try to login a new user. Show an error message if another user
113 # tries to login with a URL in this situation.
114 if request.user.is_authenticated:
115 if user != request.user:
116 messages.error(request, _("Another user is currently logged in. Please logout first and then use the login URL again."))
117 return redirect('evaluation:index')
118
119 if user and user.login_key_valid_until >= date.today():
120 if request.method != "POST":
121 template_data = {
122 'username': user.full_name
123 }
124 return render(request, "external_user_confirm_login.html", template_data)
125
126 # User is valid. Set request.user and persist user in the session by logging the user in.
127 request.user = user
128 auth.login(request, user)
129 messages.success(request, _("Logged in as %s.") % user.full_name)
130 # Invalidate the login key, but keep it stored so we can later identify the user that is trying to login and send a new link
131 user.login_key_valid_until = date.today() - timedelta(1)
132 user.save()
133 elif user:
134 # A user exists, but the login key is not valid anymore. Send the user a new one.
135 user.ensure_valid_login_key()
136 EmailTemplate.send_login_url_to_user(user)
137 messages.warning(request, _("The login URL is not valid anymore. We sent you a new one to your email address."))
138 else:
139 messages.warning(request, _("Invalid login URL. Please request a new one below."))
140
141 return redirect('evaluation:index')
142
143
144 @no_login_required
145 def faq(request):
146 return render(request, "faq.html", dict(sections=FaqSection.objects.all()))
147
148
149 @no_login_required
150 def legal_notice(request):
151 return render(request, "legal_notice.html", dict())
152
153
154 @require_POST
155 @login_required
156 def contact(request):
157 message = request.POST.get("message")
158 title = request.POST.get("title")
159 email = request.user.email or f"User {request.user.id}"
160 subject = f"[EvaP] Message from {email}"
161
162 if message:
163 mail = EmailMessage(
164 subject=subject,
165 body="{}\n{}\n\n{}".format(title, request.user.email, message),
166 to=[settings.CONTACT_EMAIL])
167 try:
168 mail.send()
169 logger.info('Sent contact email: \n{}\n'.format(mail.message()))
170 return HttpResponse()
171 except Exception:
172 logger.exception('An exception occurred when sending the following contact email:\n{}\n'.format(mail.message()))
173 raise
174
175 return HttpResponseBadRequest()
176
177
178 @no_login_required
179 @require_POST
180 def set_lang(request):
181 if request.user.is_authenticated:
182 user = request.user
183 user.language = request.POST['language']
184 user.save()
185
186 return set_language(request)
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py
--- a/evap/evaluation/views.py
+++ b/evap/evaluation/views.py
@@ -163,7 +163,8 @@
mail = EmailMessage(
subject=subject,
body="{}\n{}\n\n{}".format(title, request.user.email, message),
- to=[settings.CONTACT_EMAIL])
+ to=[settings.CONTACT_EMAIL],
+ reply_to=[request.user.email])
try:
mail.send()
logger.info('Sent contact email: \n{}\n'.format(mail.message()))
| {"golden_diff": "diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py\n--- a/evap/evaluation/views.py\n+++ b/evap/evaluation/views.py\n@@ -163,7 +163,8 @@\n mail = EmailMessage(\n subject=subject,\n body=\"{}\\n{}\\n\\n{}\".format(title, request.user.email, message),\n- to=[settings.CONTACT_EMAIL])\n+ to=[settings.CONTACT_EMAIL],\n+ reply_to=[request.user.email])\n try:\n mail.send()\n logger.info('Sent contact email: \\n{}\\n'.format(mail.message()))\n", "issue": "Set reply-to header\nFor all emails sent via one of the contact modals, the reply-to header should be set to the address of the person sending the request.\n", "before_files": [{"content": "import logging\nfrom datetime import date, timedelta\n\nfrom django.conf import settings\nfrom django.contrib import messages, auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import EmailMessage\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.i18n import set_language\n\nfrom evap.evaluation.forms import NewKeyForm, LoginEmailForm\nfrom evap.evaluation.models import FaqSection, EmailTemplate, Semester\nfrom evap.middleware import no_login_required\nfrom evap.staff.tools import delete_navbar_cache_for_users\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef redirect_user_to_start_page(user):\n # pylint: disable=too-many-return-statements\n active_semester = Semester.active_semester()\n\n if user.is_reviewer:\n if active_semester is not None:\n return redirect('staff:semester_view', active_semester.id)\n return redirect('staff:index')\n\n if user.is_grade_publisher:\n if active_semester is not None:\n return redirect('grades:semester_view', active_semester.id)\n return redirect('grades:index')\n\n if user.is_student:\n return redirect('student:index')\n if user.is_responsible_or_contributor_or_delegate:\n return redirect('contributor:index')\n\n return redirect('results:index')\n\n\n@no_login_required\n@sensitive_post_parameters(\"password\")\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. The OpenID login is thought to be used for\n internal users. The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors. A login with email and password is available if OpenID is deactivated.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_email_form = LoginEmailForm(request, request.POST if submit_type == \"login_email\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_user()\n profile.ensure_valid_login_key()\n profile.save()\n\n EmailTemplate.send_login_url_to_user(new_key_form.get_user())\n\n messages.success(request, _(\"We sent you an email with a one-time login URL. Please check your inbox.\"))\n return redirect('evaluation:index')\n\n if login_email_form.is_valid():\n # user would like to login with email and password and passed password test\n auth.login(request, login_email_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n return redirect('evaluation:index')\n\n # if not logged in by now, render form\n if not request.user.is_authenticated:\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n template_data = dict(\n new_key_form=new_key_form,\n login_email_form=login_email_form,\n openid_active=settings.ACTIVATE_OPEN_ID_LOGIN,\n )\n return render(request, \"index.html\", template_data)\n\n # the cached navbar might contain CSRF tokens that are invalid after a new login\n delete_navbar_cache_for_users([request.user])\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n return redirect(redirect_to)\n\n return redirect_user_to_start_page(request.user)\n\n\n@no_login_required\ndef login_key_authentication(request, key):\n user = auth.authenticate(request, key=key)\n\n if user and not user.is_active:\n messages.error(request, _(\"Inactive users are not allowed to login.\"))\n return redirect('evaluation:index')\n\n # If we already have an authenticated user don't try to login a new user. Show an error message if another user\n # tries to login with a URL in this situation.\n if request.user.is_authenticated:\n if user != request.user:\n messages.error(request, _(\"Another user is currently logged in. Please logout first and then use the login URL again.\"))\n return redirect('evaluation:index')\n\n if user and user.login_key_valid_until >= date.today():\n if request.method != \"POST\":\n template_data = {\n 'username': user.full_name\n }\n return render(request, \"external_user_confirm_login.html\", template_data)\n\n # User is valid. Set request.user and persist user in the session by logging the user in.\n request.user = user\n auth.login(request, user)\n messages.success(request, _(\"Logged in as %s.\") % user.full_name)\n # Invalidate the login key, but keep it stored so we can later identify the user that is trying to login and send a new link\n user.login_key_valid_until = date.today() - timedelta(1)\n user.save()\n elif user:\n # A user exists, but the login key is not valid anymore. Send the user a new one.\n user.ensure_valid_login_key()\n EmailTemplate.send_login_url_to_user(user)\n messages.warning(request, _(\"The login URL is not valid anymore. We sent you a new one to your email address.\"))\n else:\n messages.warning(request, _(\"Invalid login URL. Please request a new one below.\"))\n\n return redirect('evaluation:index')\n\n\n@no_login_required\ndef faq(request):\n return render(request, \"faq.html\", dict(sections=FaqSection.objects.all()))\n\n\n@no_login_required\ndef legal_notice(request):\n return render(request, \"legal_notice.html\", dict())\n\n\n@require_POST\n@login_required\ndef contact(request):\n message = request.POST.get(\"message\")\n title = request.POST.get(\"title\")\n email = request.user.email or f\"User {request.user.id}\"\n subject = f\"[EvaP] Message from {email}\"\n\n if message:\n mail = EmailMessage(\n subject=subject,\n body=\"{}\\n{}\\n\\n{}\".format(title, request.user.email, message),\n to=[settings.CONTACT_EMAIL])\n try:\n mail.send()\n logger.info('Sent contact email: \\n{}\\n'.format(mail.message()))\n return HttpResponse()\n except Exception:\n logger.exception('An exception occurred when sending the following contact email:\\n{}\\n'.format(mail.message()))\n raise\n\n return HttpResponseBadRequest()\n\n\n@no_login_required\n@require_POST\ndef set_lang(request):\n if request.user.is_authenticated:\n user = request.user\n user.language = request.POST['language']\n user.save()\n\n return set_language(request)\n", "path": "evap/evaluation/views.py"}], "after_files": [{"content": "import logging\nfrom datetime import date, timedelta\n\nfrom django.conf import settings\nfrom django.contrib import messages, auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import EmailMessage\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.i18n import set_language\n\nfrom evap.evaluation.forms import NewKeyForm, LoginEmailForm\nfrom evap.evaluation.models import FaqSection, EmailTemplate, Semester\nfrom evap.middleware import no_login_required\nfrom evap.staff.tools import delete_navbar_cache_for_users\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef redirect_user_to_start_page(user):\n # pylint: disable=too-many-return-statements\n active_semester = Semester.active_semester()\n\n if user.is_reviewer:\n if active_semester is not None:\n return redirect('staff:semester_view', active_semester.id)\n return redirect('staff:index')\n\n if user.is_grade_publisher:\n if active_semester is not None:\n return redirect('grades:semester_view', active_semester.id)\n return redirect('grades:index')\n\n if user.is_student:\n return redirect('student:index')\n if user.is_responsible_or_contributor_or_delegate:\n return redirect('contributor:index')\n\n return redirect('results:index')\n\n\n@no_login_required\n@sensitive_post_parameters(\"password\")\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. The OpenID login is thought to be used for\n internal users. The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors. A login with email and password is available if OpenID is deactivated.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_email_form = LoginEmailForm(request, request.POST if submit_type == \"login_email\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_user()\n profile.ensure_valid_login_key()\n profile.save()\n\n EmailTemplate.send_login_url_to_user(new_key_form.get_user())\n\n messages.success(request, _(\"We sent you an email with a one-time login URL. Please check your inbox.\"))\n return redirect('evaluation:index')\n\n if login_email_form.is_valid():\n # user would like to login with email and password and passed password test\n auth.login(request, login_email_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n return redirect('evaluation:index')\n\n # if not logged in by now, render form\n if not request.user.is_authenticated:\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n template_data = dict(\n new_key_form=new_key_form,\n login_email_form=login_email_form,\n openid_active=settings.ACTIVATE_OPEN_ID_LOGIN,\n )\n return render(request, \"index.html\", template_data)\n\n # the cached navbar might contain CSRF tokens that are invalid after a new login\n delete_navbar_cache_for_users([request.user])\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n return redirect(redirect_to)\n\n return redirect_user_to_start_page(request.user)\n\n\n@no_login_required\ndef login_key_authentication(request, key):\n user = auth.authenticate(request, key=key)\n\n if user and not user.is_active:\n messages.error(request, _(\"Inactive users are not allowed to login.\"))\n return redirect('evaluation:index')\n\n # If we already have an authenticated user don't try to login a new user. Show an error message if another user\n # tries to login with a URL in this situation.\n if request.user.is_authenticated:\n if user != request.user:\n messages.error(request, _(\"Another user is currently logged in. Please logout first and then use the login URL again.\"))\n return redirect('evaluation:index')\n\n if user and user.login_key_valid_until >= date.today():\n if request.method != \"POST\":\n template_data = {\n 'username': user.full_name\n }\n return render(request, \"external_user_confirm_login.html\", template_data)\n\n # User is valid. Set request.user and persist user in the session by logging the user in.\n request.user = user\n auth.login(request, user)\n messages.success(request, _(\"Logged in as %s.\") % user.full_name)\n # Invalidate the login key, but keep it stored so we can later identify the user that is trying to login and send a new link\n user.login_key_valid_until = date.today() - timedelta(1)\n user.save()\n elif user:\n # A user exists, but the login key is not valid anymore. Send the user a new one.\n user.ensure_valid_login_key()\n EmailTemplate.send_login_url_to_user(user)\n messages.warning(request, _(\"The login URL is not valid anymore. We sent you a new one to your email address.\"))\n else:\n messages.warning(request, _(\"Invalid login URL. Please request a new one below.\"))\n\n return redirect('evaluation:index')\n\n\n@no_login_required\ndef faq(request):\n return render(request, \"faq.html\", dict(sections=FaqSection.objects.all()))\n\n\n@no_login_required\ndef legal_notice(request):\n return render(request, \"legal_notice.html\", dict())\n\n\n@require_POST\n@login_required\ndef contact(request):\n message = request.POST.get(\"message\")\n title = request.POST.get(\"title\")\n email = request.user.email or f\"User {request.user.id}\"\n subject = f\"[EvaP] Message from {email}\"\n\n if message:\n mail = EmailMessage(\n subject=subject,\n body=\"{}\\n{}\\n\\n{}\".format(title, request.user.email, message),\n to=[settings.CONTACT_EMAIL],\n reply_to=[request.user.email])\n try:\n mail.send()\n logger.info('Sent contact email: \\n{}\\n'.format(mail.message()))\n return HttpResponse()\n except Exception:\n logger.exception('An exception occurred when sending the following contact email:\\n{}\\n'.format(mail.message()))\n raise\n\n return HttpResponseBadRequest()\n\n\n@no_login_required\n@require_POST\ndef set_lang(request):\n if request.user.is_authenticated:\n user = request.user\n user.language = request.POST['language']\n user.save()\n\n return set_language(request)\n", "path": "evap/evaluation/views.py"}]} | 2,212 | 134 |
gh_patches_debug_764 | rasdani/github-patches | git_diff | rasterio__rasterio-1692 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
more explicit NotImplementedError messages in read mode ?
In wanting to set a GeoTIFF's CRS, I encountered [this](https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx#L516) NotImplementedError when trying to run the following code:
```
with rasterio.open(filepath) as src:
src.crs = "EPSG:3857"
```
Though in retrospect it is obvious the above will fail without explicitly specifying the proper mode , i.e. `'r+'` in this case, I was momentarily thrown off by the error and assumed something was wrong with my approach. Would a more explicit error message be useful here?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/errors.py`
Content:
```
1 """Errors and Warnings."""
2
3 from click import FileError
4
5
6 class RasterioError(Exception):
7 """Root exception class"""
8
9
10 class WindowError(RasterioError):
11 """Raised when errors occur during window operations"""
12
13
14 class CRSError(ValueError):
15 """Raised when a CRS string or mapping is invalid or cannot serve
16 to define a coordinate transformation."""
17
18
19 class EnvError(RasterioError):
20 """Raised when the state of GDAL/AWS environment cannot be created
21 or modified."""
22
23
24 class DriverRegistrationError(ValueError):
25 """Raised when a format driver is requested but is not registered."""
26
27
28 class FileOverwriteError(FileError):
29 """Raised when Rasterio's CLI refuses to clobber output files."""
30
31 def __init__(self, message):
32 """Raise FileOverwriteError with message as hint."""
33 super(FileOverwriteError, self).__init__('', hint=message)
34
35
36 class RasterioIOError(IOError):
37 """Raised when a dataset cannot be opened using one of the
38 registered format drivers."""
39
40
41 class NodataShadowWarning(UserWarning):
42 """Warn that a dataset's nodata attribute is shadowing its alpha band."""
43
44 def __str__(self):
45 return ("The dataset's nodata attribute is shadowing "
46 "the alpha band. All masks will be determined "
47 "by the nodata attribute")
48
49
50 class NotGeoreferencedWarning(UserWarning):
51 """Warn that a dataset isn't georeferenced."""
52
53
54 class GDALBehaviorChangeException(RuntimeError):
55 """Raised when GDAL's behavior differs from the given arguments. For
56 example, antimeridian cutting is always on as of GDAL 2.2.0. Users
57 expecting it to be off will be presented with a MultiPolygon when the
58 rest of their code expects a Polygon.
59
60 # Raises an exception on GDAL >= 2.2.0
61 rasterio.warp.transform_geometry(
62 src_crs, dst_crs, antimeridian_cutting=False)
63 """
64
65
66 class GDALOptionNotImplementedError(RasterioError):
67 """A dataset opening or dataset creation option can't be supported
68
69 This will be raised from Rasterio's shim modules. For example, when
70 a user passes arguments to open_dataset() that can't be evaluated
71 by GDAL 1.x.
72 """
73
74 class GDALVersionError(RasterioError):
75 """Raised if the runtime version of GDAL does not meet the required
76 version of GDAL."""
77
78
79 class WindowEvaluationError(ValueError):
80 """Raised when window evaluation fails"""
81
82
83 class RasterioDeprecationWarning(UserWarning):
84 """Rasterio module deprecations"""
85
86
87 class RasterBlockError(RasterioError):
88 """Raised when raster block access fails"""
89
90
91 class BandOverviewError(UserWarning):
92 """Raised when a band overview access fails."""
93
94
95 class WarpOptionsError(RasterioError):
96 """Raised when options for a warp operation are invalid"""
97
98
99 class UnsupportedOperation(RasterioError):
100 """Raised when reading from a file opened in 'w' mode"""
101
102
103 class OverviewCreationError(RasterioError):
104 """Raised when creation of an overview fails"""
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/errors.py b/rasterio/errors.py
--- a/rasterio/errors.py
+++ b/rasterio/errors.py
@@ -102,3 +102,7 @@
class OverviewCreationError(RasterioError):
"""Raised when creation of an overview fails"""
+
+
+class DatasetAttributeError(RasterioError, NotImplementedError):
+ """Raised when dataset attributes are misused"""
| {"golden_diff": "diff --git a/rasterio/errors.py b/rasterio/errors.py\n--- a/rasterio/errors.py\n+++ b/rasterio/errors.py\n@@ -102,3 +102,7 @@\n \n class OverviewCreationError(RasterioError):\n \"\"\"Raised when creation of an overview fails\"\"\"\n+\n+\n+class DatasetAttributeError(RasterioError, NotImplementedError):\n+ \"\"\"Raised when dataset attributes are misused\"\"\"\n", "issue": "more explicit NotImplementedError messages in read mode ?\nIn wanting to set a GeoTIFF's CRS, I encountered [this](https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx#L516) NotImplementedError when trying to run the following code:\r\n```\r\nwith rasterio.open(filepath) as src:\r\n src.crs = \"EPSG:3857\"\r\n```\r\nThough in retrospect it is obvious the above will fail without explicitly specifying the proper mode , i.e. `'r+'` in this case, I was momentarily thrown off by the error and assumed something was wrong with my approach. Would a more explicit error message be useful here?\r\n\n", "before_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n\n\nclass WarpOptionsError(RasterioError):\n \"\"\"Raised when options for a warp operation are invalid\"\"\"\n\n\nclass UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n\n\nclass OverviewCreationError(RasterioError):\n \"\"\"Raised when creation of an overview fails\"\"\"\n", "path": "rasterio/errors.py"}], "after_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n\n\nclass WarpOptionsError(RasterioError):\n \"\"\"Raised when options for a warp operation are invalid\"\"\"\n\n\nclass UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n\n\nclass OverviewCreationError(RasterioError):\n \"\"\"Raised when creation of an overview fails\"\"\"\n\n\nclass DatasetAttributeError(RasterioError, NotImplementedError):\n \"\"\"Raised when dataset attributes are misused\"\"\"\n", "path": "rasterio/errors.py"}]} | 1,296 | 91 |
gh_patches_debug_16176 | rasdani/github-patches | git_diff | pyro-ppl__pyro-2358 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Forecasting I tutorial fails on Windows, no bunzip2
Hi,
I have just downloaded the Pytorch 1.4 and Pyro 1.3 to new environment Python 3.7 (Anaconda). The Forecasting I tutorial fails, on line
dataset = load_bart_od()
with message:
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "D:\progs2\anaconda3\envs\pyro13\lib\site-packages\pyro\contrib\examples\bart.py", line 125, in load_bart_od
subprocess.check_call(["bunzip2", "-k", filename])
File "D:\progs2\anaconda3\envs\pyro13\lib\subprocess.py", line 358, in check_call
retcode = call(*popenargs, **kwargs)
File "D:\progs2\anaconda3\envs\pyro13\lib\subprocess.py", line 339, in call
with Popen(*popenargs, **kwargs) as p:
File "D:\progs2\anaconda3\envs\pyro13\lib\subprocess.py", line 800, in __init__
restore_signals, start_new_session)
File "D:\progs2\anaconda3\envs\pyro13\lib\subprocess.py", line 1207, in _execute_child
startupinfo)
FileNotFoundError: [WinError 2] The system cannot find the file specified
Regards,
Slawek
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/contrib/examples/bart.py`
Content:
```
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import argparse
5 import csv
6 import datetime
7 import logging
8 import multiprocessing
9 import os
10 import subprocess
11 import sys
12 import urllib
13
14 import torch
15
16 from pyro.contrib.examples.util import get_data_directory
17
18 DATA = get_data_directory(__file__)
19
20 # https://www.bart.gov/about/reports/ridership
21 SOURCE_DIR = "http://64.111.127.166/origin-destination/"
22 SOURCE_FILES = [
23 "date-hour-soo-dest-2011.csv.gz",
24 "date-hour-soo-dest-2012.csv.gz",
25 "date-hour-soo-dest-2013.csv.gz",
26 "date-hour-soo-dest-2014.csv.gz",
27 "date-hour-soo-dest-2015.csv.gz",
28 "date-hour-soo-dest-2016.csv.gz",
29 "date-hour-soo-dest-2017.csv.gz",
30 "date-hour-soo-dest-2018.csv.gz",
31 "date-hour-soo-dest-2019.csv.gz",
32 ]
33 CACHE_URL = "https://d2hg8soec8ck9v.cloudfront.net/datasets/bart_full.pkl.bz2"
34
35
36 def _mkdir_p(dirname):
37 if not os.path.exists(dirname):
38 try:
39 os.makedirs(dirname)
40 except FileExistsError:
41 pass
42
43
44 def _load_hourly_od(basename):
45 filename = os.path.join(DATA, basename.replace(".csv.gz", ".pkl"))
46 if os.path.exists(filename):
47 return filename
48
49 # Download source files.
50 gz_filename = os.path.join(DATA, basename)
51 if not os.path.exists(gz_filename):
52 url = SOURCE_DIR + basename
53 logging.debug("downloading {}".format(url))
54 urllib.request.urlretrieve(url, gz_filename)
55 csv_filename = gz_filename[:-3]
56 assert csv_filename.endswith(".csv")
57 if not os.path.exists(csv_filename):
58 logging.debug("unzipping {}".format(gz_filename))
59 subprocess.check_call(["gunzip", "-k", gz_filename])
60 assert os.path.exists(csv_filename)
61
62 # Convert to PyTorch.
63 logging.debug("converting {}".format(csv_filename))
64 start_date = datetime.datetime.strptime("2000-01-01", "%Y-%m-%d")
65 stations = {}
66 num_rows = sum(1 for _ in open(csv_filename))
67 logging.info("Formatting {} rows".format(num_rows))
68 rows = torch.empty((num_rows, 4), dtype=torch.long)
69 with open(csv_filename) as f:
70 for i, (date, hour, origin, destin, trip_count) in enumerate(csv.reader(f)):
71 date = datetime.datetime.strptime(date, "%Y-%m-%d")
72 date += datetime.timedelta(hours=int(hour))
73 rows[i, 0] = int((date - start_date).total_seconds() / 3600)
74 rows[i, 1] = stations.setdefault(origin, len(stations))
75 rows[i, 2] = stations.setdefault(destin, len(stations))
76 rows[i, 3] = int(trip_count)
77 if i % 10000 == 0:
78 sys.stderr.write(".")
79 sys.stderr.flush()
80
81 # Save data with metadata.
82 dataset = {
83 "basename": basename,
84 "start_date": start_date,
85 "stations": stations,
86 "rows": rows,
87 "schema": ["time_hours", "origin", "destin", "trip_count"],
88 }
89 dataset["rows"]
90 logging.debug("saving {}".format(filename))
91 torch.save(dataset, filename)
92 return filename
93
94
95 def load_bart_od():
96 """
97 Load a dataset of hourly origin-destination ridership counts for every pair
98 of BART stations during the years 2011-2019.
99
100 **Source** https://www.bart.gov/about/reports/ridership
101
102 This downloads the dataset the first time it is called. On subsequent calls
103 this reads from a local cached file ``.pkl.bz2``. This attempts to
104 download a preprocessed compressed cached file maintained by the Pyro team.
105 On cache hit this should be very fast. On cache miss this falls back to
106 downloading the original data source and preprocessing the dataset,
107 requiring about 350MB of file transfer, storing a few GB of temp files, and
108 taking upwards of 30 minutes.
109
110 :returns: a dataset is a dictionary with fields:
111
112 - "stations": a list of strings of station names
113 - "start_date": a :py:class:`datetime.datetime` for the first observaion
114 - "counts": a ``torch.FloatTensor`` of ridership counts, with shape
115 ``(num_hours, len(stations), len(stations))``.
116 """
117 _mkdir_p(DATA)
118 filename = os.path.join(DATA, "bart_full.pkl.bz2")
119 # Work around apparent bug in torch.load(),torch.save().
120 pkl_file = filename.rsplit(".", 1)[0]
121 if not os.path.exists(pkl_file):
122 try:
123 urllib.request.urlretrieve(CACHE_URL, filename)
124 logging.debug("cache hit, uncompressing")
125 subprocess.check_call(["bunzip2", "-k", filename])
126 except urllib.error.HTTPError:
127 logging.debug("cache miss, preprocessing from scratch")
128 if os.path.exists(pkl_file):
129 return torch.load(pkl_file)
130
131 filenames = multiprocessing.Pool(len(SOURCE_FILES)).map(_load_hourly_od, SOURCE_FILES)
132 datasets = list(map(torch.load, filenames))
133
134 stations = sorted(set().union(*(d["stations"].keys() for d in datasets)))
135 min_time = min(int(d["rows"][:, 0].min()) for d in datasets)
136 max_time = max(int(d["rows"][:, 0].max()) for d in datasets)
137 num_rows = max_time - min_time + 1
138 start_date = datasets[0]["start_date"] + datetime.timedelta(hours=min_time),
139 logging.info("Loaded data from {} stations, {} hours"
140 .format(len(stations), num_rows))
141
142 result = torch.zeros(num_rows, len(stations), len(stations))
143 for dataset in datasets:
144 part_stations = sorted(dataset["stations"], key=dataset["stations"].__getitem__)
145 part_to_whole = torch.tensor(list(map(stations.index, part_stations)))
146 time = dataset["rows"][:, 0] - min_time
147 origin = part_to_whole[dataset["rows"][:, 1]]
148 destin = part_to_whole[dataset["rows"][:, 2]]
149 count = dataset["rows"][:, 3].float()
150 result[time, origin, destin] = count
151 dataset.clear()
152 logging.info("Loaded {} shaped data of mean {:0.3g}"
153 .format(result.shape, result.mean()))
154
155 dataset = {
156 "stations": stations,
157 "start_date": start_date,
158 "counts": result,
159 }
160 torch.save(dataset, pkl_file)
161 subprocess.check_call(["bzip2", "-k", pkl_file])
162 assert os.path.exists(filename)
163 return dataset
164
165
166 def load_fake_od():
167 """
168 Create a tiny synthetic dataset for smoke testing.
169 """
170 dataset = {
171 "stations": ["12TH", "EMBR", "SFIA"],
172 "start_date": datetime.datetime.strptime("2000-01-01", "%Y-%m-%d"),
173 "counts": torch.distributions.Poisson(100).sample([24 * 7 * 8, 3, 3]),
174 }
175 return dataset
176
177
178 if __name__ == "__main__":
179 parser = argparse.ArgumentParser(description="BART data preprocessor")
180 parser.add_argument("-v", "--verbose", action="store_true")
181 args = parser.parse_args()
182
183 logging.basicConfig(format='%(relativeCreated) 9d %(message)s',
184 level=logging.DEBUG if args.verbose else logging.INFO)
185 load_bart_od()
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyro/contrib/examples/bart.py b/pyro/contrib/examples/bart.py
--- a/pyro/contrib/examples/bart.py
+++ b/pyro/contrib/examples/bart.py
@@ -2,6 +2,7 @@
# SPDX-License-Identifier: Apache-2.0
import argparse
+import bz2
import csv
import datetime
import logging
@@ -122,7 +123,8 @@
try:
urllib.request.urlretrieve(CACHE_URL, filename)
logging.debug("cache hit, uncompressing")
- subprocess.check_call(["bunzip2", "-k", filename])
+ with bz2.BZ2File(filename) as src, open(filename[:-4], "wb") as dst:
+ dst.write(src.read())
except urllib.error.HTTPError:
logging.debug("cache miss, preprocessing from scratch")
if os.path.exists(pkl_file):
| {"golden_diff": "diff --git a/pyro/contrib/examples/bart.py b/pyro/contrib/examples/bart.py\n--- a/pyro/contrib/examples/bart.py\n+++ b/pyro/contrib/examples/bart.py\n@@ -2,6 +2,7 @@\n # SPDX-License-Identifier: Apache-2.0\n \n import argparse\n+import bz2\n import csv\n import datetime\n import logging\n@@ -122,7 +123,8 @@\n try:\n urllib.request.urlretrieve(CACHE_URL, filename)\n logging.debug(\"cache hit, uncompressing\")\n- subprocess.check_call([\"bunzip2\", \"-k\", filename])\n+ with bz2.BZ2File(filename) as src, open(filename[:-4], \"wb\") as dst:\n+ dst.write(src.read())\n except urllib.error.HTTPError:\n logging.debug(\"cache miss, preprocessing from scratch\")\n if os.path.exists(pkl_file):\n", "issue": "Forecasting I tutorial fails on Windows, no bunzip2\nHi,\r\nI have just downloaded the Pytorch 1.4 and Pyro 1.3 to new environment Python 3.7 (Anaconda). The Forecasting I tutorial fails, on line\r\ndataset = load_bart_od()\r\nwith message:\r\nTraceback (most recent call last):\r\n File \"<input>\", line 1, in <module>\r\n File \"D:\\progs2\\anaconda3\\envs\\pyro13\\lib\\site-packages\\pyro\\contrib\\examples\\bart.py\", line 125, in load_bart_od\r\n subprocess.check_call([\"bunzip2\", \"-k\", filename])\r\n File \"D:\\progs2\\anaconda3\\envs\\pyro13\\lib\\subprocess.py\", line 358, in check_call\r\n retcode = call(*popenargs, **kwargs)\r\n File \"D:\\progs2\\anaconda3\\envs\\pyro13\\lib\\subprocess.py\", line 339, in call\r\n with Popen(*popenargs, **kwargs) as p:\r\n File \"D:\\progs2\\anaconda3\\envs\\pyro13\\lib\\subprocess.py\", line 800, in __init__\r\n restore_signals, start_new_session)\r\n File \"D:\\progs2\\anaconda3\\envs\\pyro13\\lib\\subprocess.py\", line 1207, in _execute_child\r\n startupinfo)\r\nFileNotFoundError: [WinError 2] The system cannot find the file specified\r\n\r\nRegards,\r\nSlawek\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport csv\nimport datetime\nimport logging\nimport multiprocessing\nimport os\nimport subprocess\nimport sys\nimport urllib\n\nimport torch\n\nfrom pyro.contrib.examples.util import get_data_directory\n\nDATA = get_data_directory(__file__)\n\n# https://www.bart.gov/about/reports/ridership\nSOURCE_DIR = \"http://64.111.127.166/origin-destination/\"\nSOURCE_FILES = [\n \"date-hour-soo-dest-2011.csv.gz\",\n \"date-hour-soo-dest-2012.csv.gz\",\n \"date-hour-soo-dest-2013.csv.gz\",\n \"date-hour-soo-dest-2014.csv.gz\",\n \"date-hour-soo-dest-2015.csv.gz\",\n \"date-hour-soo-dest-2016.csv.gz\",\n \"date-hour-soo-dest-2017.csv.gz\",\n \"date-hour-soo-dest-2018.csv.gz\",\n \"date-hour-soo-dest-2019.csv.gz\",\n]\nCACHE_URL = \"https://d2hg8soec8ck9v.cloudfront.net/datasets/bart_full.pkl.bz2\"\n\n\ndef _mkdir_p(dirname):\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except FileExistsError:\n pass\n\n\ndef _load_hourly_od(basename):\n filename = os.path.join(DATA, basename.replace(\".csv.gz\", \".pkl\"))\n if os.path.exists(filename):\n return filename\n\n # Download source files.\n gz_filename = os.path.join(DATA, basename)\n if not os.path.exists(gz_filename):\n url = SOURCE_DIR + basename\n logging.debug(\"downloading {}\".format(url))\n urllib.request.urlretrieve(url, gz_filename)\n csv_filename = gz_filename[:-3]\n assert csv_filename.endswith(\".csv\")\n if not os.path.exists(csv_filename):\n logging.debug(\"unzipping {}\".format(gz_filename))\n subprocess.check_call([\"gunzip\", \"-k\", gz_filename])\n assert os.path.exists(csv_filename)\n\n # Convert to PyTorch.\n logging.debug(\"converting {}\".format(csv_filename))\n start_date = datetime.datetime.strptime(\"2000-01-01\", \"%Y-%m-%d\")\n stations = {}\n num_rows = sum(1 for _ in open(csv_filename))\n logging.info(\"Formatting {} rows\".format(num_rows))\n rows = torch.empty((num_rows, 4), dtype=torch.long)\n with open(csv_filename) as f:\n for i, (date, hour, origin, destin, trip_count) in enumerate(csv.reader(f)):\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n date += datetime.timedelta(hours=int(hour))\n rows[i, 0] = int((date - start_date).total_seconds() / 3600)\n rows[i, 1] = stations.setdefault(origin, len(stations))\n rows[i, 2] = stations.setdefault(destin, len(stations))\n rows[i, 3] = int(trip_count)\n if i % 10000 == 0:\n sys.stderr.write(\".\")\n sys.stderr.flush()\n\n # Save data with metadata.\n dataset = {\n \"basename\": basename,\n \"start_date\": start_date,\n \"stations\": stations,\n \"rows\": rows,\n \"schema\": [\"time_hours\", \"origin\", \"destin\", \"trip_count\"],\n }\n dataset[\"rows\"]\n logging.debug(\"saving {}\".format(filename))\n torch.save(dataset, filename)\n return filename\n\n\ndef load_bart_od():\n \"\"\"\n Load a dataset of hourly origin-destination ridership counts for every pair\n of BART stations during the years 2011-2019.\n\n **Source** https://www.bart.gov/about/reports/ridership\n\n This downloads the dataset the first time it is called. On subsequent calls\n this reads from a local cached file ``.pkl.bz2``. This attempts to\n download a preprocessed compressed cached file maintained by the Pyro team.\n On cache hit this should be very fast. On cache miss this falls back to\n downloading the original data source and preprocessing the dataset,\n requiring about 350MB of file transfer, storing a few GB of temp files, and\n taking upwards of 30 minutes.\n\n :returns: a dataset is a dictionary with fields:\n\n - \"stations\": a list of strings of station names\n - \"start_date\": a :py:class:`datetime.datetime` for the first observaion\n - \"counts\": a ``torch.FloatTensor`` of ridership counts, with shape\n ``(num_hours, len(stations), len(stations))``.\n \"\"\"\n _mkdir_p(DATA)\n filename = os.path.join(DATA, \"bart_full.pkl.bz2\")\n # Work around apparent bug in torch.load(),torch.save().\n pkl_file = filename.rsplit(\".\", 1)[0]\n if not os.path.exists(pkl_file):\n try:\n urllib.request.urlretrieve(CACHE_URL, filename)\n logging.debug(\"cache hit, uncompressing\")\n subprocess.check_call([\"bunzip2\", \"-k\", filename])\n except urllib.error.HTTPError:\n logging.debug(\"cache miss, preprocessing from scratch\")\n if os.path.exists(pkl_file):\n return torch.load(pkl_file)\n\n filenames = multiprocessing.Pool(len(SOURCE_FILES)).map(_load_hourly_od, SOURCE_FILES)\n datasets = list(map(torch.load, filenames))\n\n stations = sorted(set().union(*(d[\"stations\"].keys() for d in datasets)))\n min_time = min(int(d[\"rows\"][:, 0].min()) for d in datasets)\n max_time = max(int(d[\"rows\"][:, 0].max()) for d in datasets)\n num_rows = max_time - min_time + 1\n start_date = datasets[0][\"start_date\"] + datetime.timedelta(hours=min_time),\n logging.info(\"Loaded data from {} stations, {} hours\"\n .format(len(stations), num_rows))\n\n result = torch.zeros(num_rows, len(stations), len(stations))\n for dataset in datasets:\n part_stations = sorted(dataset[\"stations\"], key=dataset[\"stations\"].__getitem__)\n part_to_whole = torch.tensor(list(map(stations.index, part_stations)))\n time = dataset[\"rows\"][:, 0] - min_time\n origin = part_to_whole[dataset[\"rows\"][:, 1]]\n destin = part_to_whole[dataset[\"rows\"][:, 2]]\n count = dataset[\"rows\"][:, 3].float()\n result[time, origin, destin] = count\n dataset.clear()\n logging.info(\"Loaded {} shaped data of mean {:0.3g}\"\n .format(result.shape, result.mean()))\n\n dataset = {\n \"stations\": stations,\n \"start_date\": start_date,\n \"counts\": result,\n }\n torch.save(dataset, pkl_file)\n subprocess.check_call([\"bzip2\", \"-k\", pkl_file])\n assert os.path.exists(filename)\n return dataset\n\n\ndef load_fake_od():\n \"\"\"\n Create a tiny synthetic dataset for smoke testing.\n \"\"\"\n dataset = {\n \"stations\": [\"12TH\", \"EMBR\", \"SFIA\"],\n \"start_date\": datetime.datetime.strptime(\"2000-01-01\", \"%Y-%m-%d\"),\n \"counts\": torch.distributions.Poisson(100).sample([24 * 7 * 8, 3, 3]),\n }\n return dataset\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"BART data preprocessor\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n args = parser.parse_args()\n\n logging.basicConfig(format='%(relativeCreated) 9d %(message)s',\n level=logging.DEBUG if args.verbose else logging.INFO)\n load_bart_od()\n", "path": "pyro/contrib/examples/bart.py"}], "after_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport bz2\nimport csv\nimport datetime\nimport logging\nimport multiprocessing\nimport os\nimport subprocess\nimport sys\nimport urllib\n\nimport torch\n\nfrom pyro.contrib.examples.util import get_data_directory\n\nDATA = get_data_directory(__file__)\n\n# https://www.bart.gov/about/reports/ridership\nSOURCE_DIR = \"http://64.111.127.166/origin-destination/\"\nSOURCE_FILES = [\n \"date-hour-soo-dest-2011.csv.gz\",\n \"date-hour-soo-dest-2012.csv.gz\",\n \"date-hour-soo-dest-2013.csv.gz\",\n \"date-hour-soo-dest-2014.csv.gz\",\n \"date-hour-soo-dest-2015.csv.gz\",\n \"date-hour-soo-dest-2016.csv.gz\",\n \"date-hour-soo-dest-2017.csv.gz\",\n \"date-hour-soo-dest-2018.csv.gz\",\n \"date-hour-soo-dest-2019.csv.gz\",\n]\nCACHE_URL = \"https://d2hg8soec8ck9v.cloudfront.net/datasets/bart_full.pkl.bz2\"\n\n\ndef _mkdir_p(dirname):\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except FileExistsError:\n pass\n\n\ndef _load_hourly_od(basename):\n filename = os.path.join(DATA, basename.replace(\".csv.gz\", \".pkl\"))\n if os.path.exists(filename):\n return filename\n\n # Download source files.\n gz_filename = os.path.join(DATA, basename)\n if not os.path.exists(gz_filename):\n url = SOURCE_DIR + basename\n logging.debug(\"downloading {}\".format(url))\n urllib.request.urlretrieve(url, gz_filename)\n csv_filename = gz_filename[:-3]\n assert csv_filename.endswith(\".csv\")\n if not os.path.exists(csv_filename):\n logging.debug(\"unzipping {}\".format(gz_filename))\n subprocess.check_call([\"gunzip\", \"-k\", gz_filename])\n assert os.path.exists(csv_filename)\n\n # Convert to PyTorch.\n logging.debug(\"converting {}\".format(csv_filename))\n start_date = datetime.datetime.strptime(\"2000-01-01\", \"%Y-%m-%d\")\n stations = {}\n num_rows = sum(1 for _ in open(csv_filename))\n logging.info(\"Formatting {} rows\".format(num_rows))\n rows = torch.empty((num_rows, 4), dtype=torch.long)\n with open(csv_filename) as f:\n for i, (date, hour, origin, destin, trip_count) in enumerate(csv.reader(f)):\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n date += datetime.timedelta(hours=int(hour))\n rows[i, 0] = int((date - start_date).total_seconds() / 3600)\n rows[i, 1] = stations.setdefault(origin, len(stations))\n rows[i, 2] = stations.setdefault(destin, len(stations))\n rows[i, 3] = int(trip_count)\n if i % 10000 == 0:\n sys.stderr.write(\".\")\n sys.stderr.flush()\n\n # Save data with metadata.\n dataset = {\n \"basename\": basename,\n \"start_date\": start_date,\n \"stations\": stations,\n \"rows\": rows,\n \"schema\": [\"time_hours\", \"origin\", \"destin\", \"trip_count\"],\n }\n dataset[\"rows\"]\n logging.debug(\"saving {}\".format(filename))\n torch.save(dataset, filename)\n return filename\n\n\ndef load_bart_od():\n \"\"\"\n Load a dataset of hourly origin-destination ridership counts for every pair\n of BART stations during the years 2011-2019.\n\n **Source** https://www.bart.gov/about/reports/ridership\n\n This downloads the dataset the first time it is called. On subsequent calls\n this reads from a local cached file ``.pkl.bz2``. This attempts to\n download a preprocessed compressed cached file maintained by the Pyro team.\n On cache hit this should be very fast. On cache miss this falls back to\n downloading the original data source and preprocessing the dataset,\n requiring about 350MB of file transfer, storing a few GB of temp files, and\n taking upwards of 30 minutes.\n\n :returns: a dataset is a dictionary with fields:\n\n - \"stations\": a list of strings of station names\n - \"start_date\": a :py:class:`datetime.datetime` for the first observaion\n - \"counts\": a ``torch.FloatTensor`` of ridership counts, with shape\n ``(num_hours, len(stations), len(stations))``.\n \"\"\"\n _mkdir_p(DATA)\n filename = os.path.join(DATA, \"bart_full.pkl.bz2\")\n # Work around apparent bug in torch.load(),torch.save().\n pkl_file = filename.rsplit(\".\", 1)[0]\n if not os.path.exists(pkl_file):\n try:\n urllib.request.urlretrieve(CACHE_URL, filename)\n logging.debug(\"cache hit, uncompressing\")\n with bz2.BZ2File(filename) as src, open(filename[:-4], \"wb\") as dst:\n dst.write(src.read())\n except urllib.error.HTTPError:\n logging.debug(\"cache miss, preprocessing from scratch\")\n if os.path.exists(pkl_file):\n return torch.load(pkl_file)\n\n filenames = multiprocessing.Pool(len(SOURCE_FILES)).map(_load_hourly_od, SOURCE_FILES)\n datasets = list(map(torch.load, filenames))\n\n stations = sorted(set().union(*(d[\"stations\"].keys() for d in datasets)))\n min_time = min(int(d[\"rows\"][:, 0].min()) for d in datasets)\n max_time = max(int(d[\"rows\"][:, 0].max()) for d in datasets)\n num_rows = max_time - min_time + 1\n start_date = datasets[0][\"start_date\"] + datetime.timedelta(hours=min_time),\n logging.info(\"Loaded data from {} stations, {} hours\"\n .format(len(stations), num_rows))\n\n result = torch.zeros(num_rows, len(stations), len(stations))\n for dataset in datasets:\n part_stations = sorted(dataset[\"stations\"], key=dataset[\"stations\"].__getitem__)\n part_to_whole = torch.tensor(list(map(stations.index, part_stations)))\n time = dataset[\"rows\"][:, 0] - min_time\n origin = part_to_whole[dataset[\"rows\"][:, 1]]\n destin = part_to_whole[dataset[\"rows\"][:, 2]]\n count = dataset[\"rows\"][:, 3].float()\n result[time, origin, destin] = count\n dataset.clear()\n logging.info(\"Loaded {} shaped data of mean {:0.3g}\"\n .format(result.shape, result.mean()))\n\n dataset = {\n \"stations\": stations,\n \"start_date\": start_date,\n \"counts\": result,\n }\n torch.save(dataset, pkl_file)\n subprocess.check_call([\"bzip2\", \"-k\", pkl_file])\n assert os.path.exists(filename)\n return dataset\n\n\ndef load_fake_od():\n \"\"\"\n Create a tiny synthetic dataset for smoke testing.\n \"\"\"\n dataset = {\n \"stations\": [\"12TH\", \"EMBR\", \"SFIA\"],\n \"start_date\": datetime.datetime.strptime(\"2000-01-01\", \"%Y-%m-%d\"),\n \"counts\": torch.distributions.Poisson(100).sample([24 * 7 * 8, 3, 3]),\n }\n return dataset\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"BART data preprocessor\")\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n args = parser.parse_args()\n\n logging.basicConfig(format='%(relativeCreated) 9d %(message)s',\n level=logging.DEBUG if args.verbose else logging.INFO)\n load_bart_od()\n", "path": "pyro/contrib/examples/bart.py"}]} | 2,844 | 196 |
gh_patches_debug_30871 | rasdani/github-patches | git_diff | sublimelsp__LSP-1488 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LspTextCommand should honor both session_name and capability if defined
If `capability` in [LspTextCommand](https://github.com/sublimelsp/LSP/blob/81a6e6aeb2c3a6aebad59fbd6eb0361301243bd1/plugin/core/registry.py#L52-L70) is defined, `session_name` is ignored. You might say that LSP-* plugins exactly know the capabilities of their server and thus never need to specify `capability` in a derived class, but in particular it's impossible for plugins to derive from LspExecuteCommand (which is derived from LspTextCommand), because that class already comes with a predefined `capability`. It can be convenient for a plugin to declare a derived class from LspExecuteCommand, so that their commands are only shown/enabled for corresponding filetypes:
```python
class FooExecuteCommand(LspExecuteCommand):
session_name = "foo"
```
**Describe the solution you'd like**
```python
def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:
if self.capability:
# At least one active session with the given capability must exist.
if not bool(self.best_session(self.capability, get_position(self.view, event, point))):
return False
if self.session_name:
# There must exist an active session with the given (config) name.
if not bool(self.session_by_name(self.session_name)):
return False
if not self.capability and not self.session_name:
# Any session will do.
return any(self.sessions())
return True
```
**Describe alternatives you've considered**
Make `session_name` win against `capability`
**Additional context**
Notice that the implementation suggested above doesn't guarantee that the sessions with the specified name and capability are the same (in case of multiple attached sessions for a view).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/core/registry.py`
Content:
```
1 from .configurations import ConfigManager
2 from .sessions import Session
3 from .settings import client_configs
4 from .typing import Optional, Any, Generator, Iterable
5 from .windows import WindowRegistry
6 import sublime
7 import sublime_plugin
8
9
10 def sessions_for_view(view: sublime.View, capability: Optional[str] = None) -> Generator[Session, None, None]:
11 """
12 Returns all sessions for this view, optionally matching the capability path.
13 """
14 window = view.window()
15 if window:
16 manager = windows.lookup(window)
17 yield from manager.sessions(view, capability)
18
19
20 def best_session(view: sublime.View, sessions: Iterable[Session], point: Optional[int] = None) -> Optional[Session]:
21 if point is None:
22 try:
23 point = view.sel()[0].b
24 except IndexError:
25 return None
26 try:
27 return max(sessions, key=lambda s: view.score_selector(point, s.config.priority_selector)) # type: ignore
28 except ValueError:
29 return None
30
31
32 configs = ConfigManager(client_configs.all)
33 client_configs.set_listener(configs.update)
34 windows = WindowRegistry(configs)
35
36
37 def get_position(view: sublime.View, event: Optional[dict] = None, point: Optional[int] = None) -> int:
38 if isinstance(point, int):
39 return point
40 elif event:
41 return view.window_to_text((event["x"], event["y"]))
42 else:
43 return view.sel()[0].begin()
44
45
46 class LspTextCommand(sublime_plugin.TextCommand):
47 """
48 Inherit from this class to define your requests that should be triggered via the command palette and/or a
49 keybinding.
50 """
51
52 # When this is defined in a derived class, the command is enabled if and only if there exists a session attached
53 # to the view that has the given capability. When both `capability` and `session_name` are defined, `capability`
54 # wins.
55 capability = ''
56
57 # When this is defined in a derived class, the command is enabled if and only if there exists a session attached
58 # to the view that has the given name. When both `capability` and `session_name` are defined, `capability` wins.
59 session_name = ''
60
61 def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:
62 if self.capability:
63 # At least one active session with the given capability must exist.
64 return bool(self.best_session(self.capability, get_position(self.view, event, point)))
65 elif self.session_name:
66 # There must exist an active session with the given (config) name.
67 return bool(self.session_by_name(self.session_name))
68 else:
69 # Any session will do.
70 return any(self.sessions())
71
72 def want_event(self) -> bool:
73 return True
74
75 def best_session(self, capability: str, point: Optional[int] = None) -> Optional[Session]:
76 listener = windows.listener_for_view(self.view)
77 return listener.session(capability, point) if listener else None
78
79 def session_by_name(self, name: Optional[str] = None) -> Optional[Session]:
80 target = name if name else self.session_name
81 for session in self.sessions():
82 if session.config.name == target:
83 return session
84 return None
85
86 def sessions(self, capability: Optional[str] = None) -> Generator[Session, None, None]:
87 yield from sessions_for_view(self.view, capability)
88
89
90 class LspRestartClientCommand(sublime_plugin.TextCommand):
91 def run(self, edit: Any) -> None:
92 window = self.view.window()
93 if window:
94 windows.lookup(window).restart_sessions_async()
95
96
97 class LspRecheckSessionsCommand(sublime_plugin.WindowCommand):
98 def run(self) -> None:
99 sublime.set_timeout_async(lambda: windows.lookup(self.window).restart_sessions_async())
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/core/registry.py b/plugin/core/registry.py
--- a/plugin/core/registry.py
+++ b/plugin/core/registry.py
@@ -49,25 +49,27 @@
keybinding.
"""
- # When this is defined in a derived class, the command is enabled if and only if there exists a session attached
- # to the view that has the given capability. When both `capability` and `session_name` are defined, `capability`
- # wins.
+ # When this is defined in a derived class, the command is enabled only if there exists a session attached to the
+ # view that has the given capability.
capability = ''
- # When this is defined in a derived class, the command is enabled if and only if there exists a session attached
- # to the view that has the given name. When both `capability` and `session_name` are defined, `capability` wins.
+ # When this is defined in a derived class, the command is enabled only if there exists a session attached to the
+ # view that has the given name.
session_name = ''
def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:
if self.capability:
# At least one active session with the given capability must exist.
- return bool(self.best_session(self.capability, get_position(self.view, event, point)))
- elif self.session_name:
+ if not self.best_session(self.capability, get_position(self.view, event, point)):
+ return False
+ if self.session_name:
# There must exist an active session with the given (config) name.
- return bool(self.session_by_name(self.session_name))
- else:
+ if not self.session_by_name(self.session_name):
+ return False
+ if not self.capability and not self.session_name:
# Any session will do.
return any(self.sessions())
+ return True
def want_event(self) -> bool:
return True
| {"golden_diff": "diff --git a/plugin/core/registry.py b/plugin/core/registry.py\n--- a/plugin/core/registry.py\n+++ b/plugin/core/registry.py\n@@ -49,25 +49,27 @@\n keybinding.\n \"\"\"\n \n- # When this is defined in a derived class, the command is enabled if and only if there exists a session attached\n- # to the view that has the given capability. When both `capability` and `session_name` are defined, `capability`\n- # wins.\n+ # When this is defined in a derived class, the command is enabled only if there exists a session attached to the\n+ # view that has the given capability.\n capability = ''\n \n- # When this is defined in a derived class, the command is enabled if and only if there exists a session attached\n- # to the view that has the given name. When both `capability` and `session_name` are defined, `capability` wins.\n+ # When this is defined in a derived class, the command is enabled only if there exists a session attached to the\n+ # view that has the given name.\n session_name = ''\n \n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n if self.capability:\n # At least one active session with the given capability must exist.\n- return bool(self.best_session(self.capability, get_position(self.view, event, point)))\n- elif self.session_name:\n+ if not self.best_session(self.capability, get_position(self.view, event, point)):\n+ return False\n+ if self.session_name:\n # There must exist an active session with the given (config) name.\n- return bool(self.session_by_name(self.session_name))\n- else:\n+ if not self.session_by_name(self.session_name):\n+ return False\n+ if not self.capability and not self.session_name:\n # Any session will do.\n return any(self.sessions())\n+ return True\n \n def want_event(self) -> bool:\n return True\n", "issue": "LspTextCommand should honor both session_name and capability if defined\nIf `capability` in [LspTextCommand](https://github.com/sublimelsp/LSP/blob/81a6e6aeb2c3a6aebad59fbd6eb0361301243bd1/plugin/core/registry.py#L52-L70) is defined, `session_name` is ignored. You might say that LSP-* plugins exactly know the capabilities of their server and thus never need to specify `capability` in a derived class, but in particular it's impossible for plugins to derive from LspExecuteCommand (which is derived from LspTextCommand), because that class already comes with a predefined `capability`. It can be convenient for a plugin to declare a derived class from LspExecuteCommand, so that their commands are only shown/enabled for corresponding filetypes:\r\n```python\r\nclass FooExecuteCommand(LspExecuteCommand):\r\n session_name = \"foo\"\r\n```\r\n\r\n**Describe the solution you'd like**\r\n```python\r\n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\r\n if self.capability:\r\n # At least one active session with the given capability must exist.\r\n if not bool(self.best_session(self.capability, get_position(self.view, event, point))):\r\n return False\r\n if self.session_name:\r\n # There must exist an active session with the given (config) name.\r\n if not bool(self.session_by_name(self.session_name)):\r\n return False\r\n if not self.capability and not self.session_name:\r\n # Any session will do.\r\n return any(self.sessions())\r\n return True\r\n```\r\n\r\n**Describe alternatives you've considered**\r\nMake `session_name` win against `capability`\r\n\r\n**Additional context**\r\nNotice that the implementation suggested above doesn't guarantee that the sessions with the specified name and capability are the same (in case of multiple attached sessions for a view).\n", "before_files": [{"content": "from .configurations import ConfigManager\nfrom .sessions import Session\nfrom .settings import client_configs\nfrom .typing import Optional, Any, Generator, Iterable\nfrom .windows import WindowRegistry\nimport sublime\nimport sublime_plugin\n\n\ndef sessions_for_view(view: sublime.View, capability: Optional[str] = None) -> Generator[Session, None, None]:\n \"\"\"\n Returns all sessions for this view, optionally matching the capability path.\n \"\"\"\n window = view.window()\n if window:\n manager = windows.lookup(window)\n yield from manager.sessions(view, capability)\n\n\ndef best_session(view: sublime.View, sessions: Iterable[Session], point: Optional[int] = None) -> Optional[Session]:\n if point is None:\n try:\n point = view.sel()[0].b\n except IndexError:\n return None\n try:\n return max(sessions, key=lambda s: view.score_selector(point, s.config.priority_selector)) # type: ignore\n except ValueError:\n return None\n\n\nconfigs = ConfigManager(client_configs.all)\nclient_configs.set_listener(configs.update)\nwindows = WindowRegistry(configs)\n\n\ndef get_position(view: sublime.View, event: Optional[dict] = None, point: Optional[int] = None) -> int:\n if isinstance(point, int):\n return point\n elif event:\n return view.window_to_text((event[\"x\"], event[\"y\"]))\n else:\n return view.sel()[0].begin()\n\n\nclass LspTextCommand(sublime_plugin.TextCommand):\n \"\"\"\n Inherit from this class to define your requests that should be triggered via the command palette and/or a\n keybinding.\n \"\"\"\n\n # When this is defined in a derived class, the command is enabled if and only if there exists a session attached\n # to the view that has the given capability. When both `capability` and `session_name` are defined, `capability`\n # wins.\n capability = ''\n\n # When this is defined in a derived class, the command is enabled if and only if there exists a session attached\n # to the view that has the given name. When both `capability` and `session_name` are defined, `capability` wins.\n session_name = ''\n\n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n if self.capability:\n # At least one active session with the given capability must exist.\n return bool(self.best_session(self.capability, get_position(self.view, event, point)))\n elif self.session_name:\n # There must exist an active session with the given (config) name.\n return bool(self.session_by_name(self.session_name))\n else:\n # Any session will do.\n return any(self.sessions())\n\n def want_event(self) -> bool:\n return True\n\n def best_session(self, capability: str, point: Optional[int] = None) -> Optional[Session]:\n listener = windows.listener_for_view(self.view)\n return listener.session(capability, point) if listener else None\n\n def session_by_name(self, name: Optional[str] = None) -> Optional[Session]:\n target = name if name else self.session_name\n for session in self.sessions():\n if session.config.name == target:\n return session\n return None\n\n def sessions(self, capability: Optional[str] = None) -> Generator[Session, None, None]:\n yield from sessions_for_view(self.view, capability)\n\n\nclass LspRestartClientCommand(sublime_plugin.TextCommand):\n def run(self, edit: Any) -> None:\n window = self.view.window()\n if window:\n windows.lookup(window).restart_sessions_async()\n\n\nclass LspRecheckSessionsCommand(sublime_plugin.WindowCommand):\n def run(self) -> None:\n sublime.set_timeout_async(lambda: windows.lookup(self.window).restart_sessions_async())\n", "path": "plugin/core/registry.py"}], "after_files": [{"content": "from .configurations import ConfigManager\nfrom .sessions import Session\nfrom .settings import client_configs\nfrom .typing import Optional, Any, Generator, Iterable\nfrom .windows import WindowRegistry\nimport sublime\nimport sublime_plugin\n\n\ndef sessions_for_view(view: sublime.View, capability: Optional[str] = None) -> Generator[Session, None, None]:\n \"\"\"\n Returns all sessions for this view, optionally matching the capability path.\n \"\"\"\n window = view.window()\n if window:\n manager = windows.lookup(window)\n yield from manager.sessions(view, capability)\n\n\ndef best_session(view: sublime.View, sessions: Iterable[Session], point: Optional[int] = None) -> Optional[Session]:\n if point is None:\n try:\n point = view.sel()[0].b\n except IndexError:\n return None\n try:\n return max(sessions, key=lambda s: view.score_selector(point, s.config.priority_selector)) # type: ignore\n except ValueError:\n return None\n\n\nconfigs = ConfigManager(client_configs.all)\nclient_configs.set_listener(configs.update)\nwindows = WindowRegistry(configs)\n\n\ndef get_position(view: sublime.View, event: Optional[dict] = None, point: Optional[int] = None) -> int:\n if isinstance(point, int):\n return point\n elif event:\n return view.window_to_text((event[\"x\"], event[\"y\"]))\n else:\n return view.sel()[0].begin()\n\n\nclass LspTextCommand(sublime_plugin.TextCommand):\n \"\"\"\n Inherit from this class to define your requests that should be triggered via the command palette and/or a\n keybinding.\n \"\"\"\n\n # When this is defined in a derived class, the command is enabled only if there exists a session attached to the\n # view that has the given capability.\n capability = ''\n\n # When this is defined in a derived class, the command is enabled only if there exists a session attached to the\n # view that has the given name.\n session_name = ''\n\n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n if self.capability:\n # At least one active session with the given capability must exist.\n if not self.best_session(self.capability, get_position(self.view, event, point)):\n return False\n if self.session_name:\n # There must exist an active session with the given (config) name.\n if not self.session_by_name(self.session_name):\n return False\n if not self.capability and not self.session_name:\n # Any session will do.\n return any(self.sessions())\n return True\n\n def want_event(self) -> bool:\n return True\n\n def best_session(self, capability: str, point: Optional[int] = None) -> Optional[Session]:\n listener = windows.listener_for_view(self.view)\n return listener.session(capability, point) if listener else None\n\n def session_by_name(self, name: Optional[str] = None) -> Optional[Session]:\n target = name if name else self.session_name\n for session in self.sessions():\n if session.config.name == target:\n return session\n return None\n\n def sessions(self, capability: Optional[str] = None) -> Generator[Session, None, None]:\n yield from sessions_for_view(self.view, capability)\n\n\nclass LspRestartClientCommand(sublime_plugin.TextCommand):\n def run(self, edit: Any) -> None:\n window = self.view.window()\n if window:\n windows.lookup(window).restart_sessions_async()\n\n\nclass LspRecheckSessionsCommand(sublime_plugin.WindowCommand):\n def run(self) -> None:\n sublime.set_timeout_async(lambda: windows.lookup(self.window).restart_sessions_async())\n", "path": "plugin/core/registry.py"}]} | 1,692 | 449 |
gh_patches_debug_3419 | rasdani/github-patches | git_diff | encode__httpx-71 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
100% Test coverage
Let's get the test coverage up to 100%, and then force-pin it.
Any contributions towards this are welcome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpcore/concurrency.py`
Content:
```
1 """
2 The `Reader` and `Writer` classes here provide a lightweight layer over
3 `asyncio.StreamReader` and `asyncio.StreamWriter`.
4
5 Similarly `PoolSemaphore` is a lightweight layer over `BoundedSemaphore`.
6
7 These classes help encapsulate the timeout logic, make it easier to unit-test
8 protocols, and help keep the rest of the package more `async`/`await`
9 based, and less strictly `asyncio`-specific.
10 """
11 import asyncio
12 import ssl
13 import typing
14
15 from .config import DEFAULT_TIMEOUT_CONFIG, PoolLimits, TimeoutConfig
16 from .exceptions import ConnectTimeout, PoolTimeout, ReadTimeout, WriteTimeout
17 from .interfaces import (
18 BasePoolSemaphore,
19 BaseReader,
20 BaseWriter,
21 ConcurrencyBackend,
22 Protocol,
23 )
24
25 OptionalTimeout = typing.Optional[TimeoutConfig]
26
27
28 SSL_MONKEY_PATCH_APPLIED = False
29
30
31 def ssl_monkey_patch() -> None:
32 """
33 Monky-patch for https://bugs.python.org/issue36709
34
35 This prevents console errors when outstanding HTTPS connections
36 still exist at the point of exiting.
37
38 Clients which have been opened using a `with` block, or which have
39 had `close()` closed, will not exhibit this issue in the first place.
40 """
41 MonkeyPatch = asyncio.selector_events._SelectorSocketTransport # type: ignore
42
43 _write = MonkeyPatch.write
44
45 def _fixed_write(self, data: bytes) -> None: # type: ignore
46 if not self._loop.is_closed():
47 _write(self, data)
48
49 MonkeyPatch.write = _fixed_write
50
51
52 class Reader(BaseReader):
53 def __init__(
54 self, stream_reader: asyncio.StreamReader, timeout: TimeoutConfig
55 ) -> None:
56 self.stream_reader = stream_reader
57 self.timeout = timeout
58
59 async def read(self, n: int, timeout: OptionalTimeout = None) -> bytes:
60 if timeout is None:
61 timeout = self.timeout
62
63 try:
64 data = await asyncio.wait_for(
65 self.stream_reader.read(n), timeout.read_timeout
66 )
67 except asyncio.TimeoutError:
68 raise ReadTimeout()
69
70 return data
71
72
73 class Writer(BaseWriter):
74 def __init__(self, stream_writer: asyncio.StreamWriter, timeout: TimeoutConfig):
75 self.stream_writer = stream_writer
76 self.timeout = timeout
77
78 def write_no_block(self, data: bytes) -> None:
79 self.stream_writer.write(data)
80
81 async def write(self, data: bytes, timeout: OptionalTimeout = None) -> None:
82 if not data:
83 return
84
85 if timeout is None:
86 timeout = self.timeout
87
88 self.stream_writer.write(data)
89 try:
90 await asyncio.wait_for( # type: ignore
91 self.stream_writer.drain(), timeout.write_timeout
92 )
93 except asyncio.TimeoutError:
94 raise WriteTimeout()
95
96 async def close(self) -> None:
97 self.stream_writer.close()
98
99
100 class PoolSemaphore(BasePoolSemaphore):
101 def __init__(self, pool_limits: PoolLimits):
102 self.pool_limits = pool_limits
103
104 @property
105 def semaphore(self) -> typing.Optional[asyncio.BoundedSemaphore]:
106 if not hasattr(self, "_semaphore"):
107 max_connections = self.pool_limits.hard_limit
108 if max_connections is None:
109 self._semaphore = None
110 else:
111 self._semaphore = asyncio.BoundedSemaphore(value=max_connections)
112 return self._semaphore
113
114 async def acquire(self) -> None:
115 if self.semaphore is None:
116 return
117
118 timeout = self.pool_limits.pool_timeout
119 try:
120 await asyncio.wait_for(self.semaphore.acquire(), timeout)
121 except asyncio.TimeoutError:
122 raise PoolTimeout()
123
124 def release(self) -> None:
125 if self.semaphore is None:
126 return
127
128 self.semaphore.release()
129
130
131 class AsyncioBackend(ConcurrencyBackend):
132 def __init__(self) -> None:
133 global SSL_MONKEY_PATCH_APPLIED
134
135 if not SSL_MONKEY_PATCH_APPLIED:
136 ssl_monkey_patch()
137 SSL_MONKEY_PATCH_APPLIED = True
138
139 async def connect(
140 self,
141 hostname: str,
142 port: int,
143 ssl_context: typing.Optional[ssl.SSLContext],
144 timeout: TimeoutConfig,
145 ) -> typing.Tuple[BaseReader, BaseWriter, Protocol]:
146 try:
147 stream_reader, stream_writer = await asyncio.wait_for( # type: ignore
148 asyncio.open_connection(hostname, port, ssl=ssl_context),
149 timeout.connect_timeout,
150 )
151 except asyncio.TimeoutError:
152 raise ConnectTimeout()
153
154 ssl_object = stream_writer.get_extra_info("ssl_object")
155 if ssl_object is None:
156 ident = "http/1.1"
157 else:
158 ident = ssl_object.selected_alpn_protocol()
159 if ident is None:
160 ident = ssl_object.selected_npn_protocol()
161
162 reader = Reader(stream_reader=stream_reader, timeout=timeout)
163 writer = Writer(stream_writer=stream_writer, timeout=timeout)
164 protocol = Protocol.HTTP_2 if ident == "h2" else Protocol.HTTP_11
165
166 return (reader, writer, protocol)
167
168 def get_semaphore(self, limits: PoolLimits) -> BasePoolSemaphore:
169 return PoolSemaphore(limits)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpcore/concurrency.py b/httpcore/concurrency.py
--- a/httpcore/concurrency.py
+++ b/httpcore/concurrency.py
@@ -76,7 +76,7 @@
self.timeout = timeout
def write_no_block(self, data: bytes) -> None:
- self.stream_writer.write(data)
+ self.stream_writer.write(data) # pragma: nocover
async def write(self, data: bytes, timeout: OptionalTimeout = None) -> None:
if not data:
| {"golden_diff": "diff --git a/httpcore/concurrency.py b/httpcore/concurrency.py\n--- a/httpcore/concurrency.py\n+++ b/httpcore/concurrency.py\n@@ -76,7 +76,7 @@\n self.timeout = timeout\n \n def write_no_block(self, data: bytes) -> None:\n- self.stream_writer.write(data)\n+ self.stream_writer.write(data) # pragma: nocover\n \n async def write(self, data: bytes, timeout: OptionalTimeout = None) -> None:\n if not data:\n", "issue": "100% Test coverage\nLet's get the test coverage up to 100%, and then force-pin it.\r\n\r\nAny contributions towards this are welcome.\n", "before_files": [{"content": "\"\"\"\nThe `Reader` and `Writer` classes here provide a lightweight layer over\n`asyncio.StreamReader` and `asyncio.StreamWriter`.\n\nSimilarly `PoolSemaphore` is a lightweight layer over `BoundedSemaphore`.\n\nThese classes help encapsulate the timeout logic, make it easier to unit-test\nprotocols, and help keep the rest of the package more `async`/`await`\nbased, and less strictly `asyncio`-specific.\n\"\"\"\nimport asyncio\nimport ssl\nimport typing\n\nfrom .config import DEFAULT_TIMEOUT_CONFIG, PoolLimits, TimeoutConfig\nfrom .exceptions import ConnectTimeout, PoolTimeout, ReadTimeout, WriteTimeout\nfrom .interfaces import (\n BasePoolSemaphore,\n BaseReader,\n BaseWriter,\n ConcurrencyBackend,\n Protocol,\n)\n\nOptionalTimeout = typing.Optional[TimeoutConfig]\n\n\nSSL_MONKEY_PATCH_APPLIED = False\n\n\ndef ssl_monkey_patch() -> None:\n \"\"\"\n Monky-patch for https://bugs.python.org/issue36709\n\n This prevents console errors when outstanding HTTPS connections\n still exist at the point of exiting.\n\n Clients which have been opened using a `with` block, or which have\n had `close()` closed, will not exhibit this issue in the first place.\n \"\"\"\n MonkeyPatch = asyncio.selector_events._SelectorSocketTransport # type: ignore\n\n _write = MonkeyPatch.write\n\n def _fixed_write(self, data: bytes) -> None: # type: ignore\n if not self._loop.is_closed():\n _write(self, data)\n\n MonkeyPatch.write = _fixed_write\n\n\nclass Reader(BaseReader):\n def __init__(\n self, stream_reader: asyncio.StreamReader, timeout: TimeoutConfig\n ) -> None:\n self.stream_reader = stream_reader\n self.timeout = timeout\n\n async def read(self, n: int, timeout: OptionalTimeout = None) -> bytes:\n if timeout is None:\n timeout = self.timeout\n\n try:\n data = await asyncio.wait_for(\n self.stream_reader.read(n), timeout.read_timeout\n )\n except asyncio.TimeoutError:\n raise ReadTimeout()\n\n return data\n\n\nclass Writer(BaseWriter):\n def __init__(self, stream_writer: asyncio.StreamWriter, timeout: TimeoutConfig):\n self.stream_writer = stream_writer\n self.timeout = timeout\n\n def write_no_block(self, data: bytes) -> None:\n self.stream_writer.write(data)\n\n async def write(self, data: bytes, timeout: OptionalTimeout = None) -> None:\n if not data:\n return\n\n if timeout is None:\n timeout = self.timeout\n\n self.stream_writer.write(data)\n try:\n await asyncio.wait_for( # type: ignore\n self.stream_writer.drain(), timeout.write_timeout\n )\n except asyncio.TimeoutError:\n raise WriteTimeout()\n\n async def close(self) -> None:\n self.stream_writer.close()\n\n\nclass PoolSemaphore(BasePoolSemaphore):\n def __init__(self, pool_limits: PoolLimits):\n self.pool_limits = pool_limits\n\n @property\n def semaphore(self) -> typing.Optional[asyncio.BoundedSemaphore]:\n if not hasattr(self, \"_semaphore\"):\n max_connections = self.pool_limits.hard_limit\n if max_connections is None:\n self._semaphore = None\n else:\n self._semaphore = asyncio.BoundedSemaphore(value=max_connections)\n return self._semaphore\n\n async def acquire(self) -> None:\n if self.semaphore is None:\n return\n\n timeout = self.pool_limits.pool_timeout\n try:\n await asyncio.wait_for(self.semaphore.acquire(), timeout)\n except asyncio.TimeoutError:\n raise PoolTimeout()\n\n def release(self) -> None:\n if self.semaphore is None:\n return\n\n self.semaphore.release()\n\n\nclass AsyncioBackend(ConcurrencyBackend):\n def __init__(self) -> None:\n global SSL_MONKEY_PATCH_APPLIED\n\n if not SSL_MONKEY_PATCH_APPLIED:\n ssl_monkey_patch()\n SSL_MONKEY_PATCH_APPLIED = True\n\n async def connect(\n self,\n hostname: str,\n port: int,\n ssl_context: typing.Optional[ssl.SSLContext],\n timeout: TimeoutConfig,\n ) -> typing.Tuple[BaseReader, BaseWriter, Protocol]:\n try:\n stream_reader, stream_writer = await asyncio.wait_for( # type: ignore\n asyncio.open_connection(hostname, port, ssl=ssl_context),\n timeout.connect_timeout,\n )\n except asyncio.TimeoutError:\n raise ConnectTimeout()\n\n ssl_object = stream_writer.get_extra_info(\"ssl_object\")\n if ssl_object is None:\n ident = \"http/1.1\"\n else:\n ident = ssl_object.selected_alpn_protocol()\n if ident is None:\n ident = ssl_object.selected_npn_protocol()\n\n reader = Reader(stream_reader=stream_reader, timeout=timeout)\n writer = Writer(stream_writer=stream_writer, timeout=timeout)\n protocol = Protocol.HTTP_2 if ident == \"h2\" else Protocol.HTTP_11\n\n return (reader, writer, protocol)\n\n def get_semaphore(self, limits: PoolLimits) -> BasePoolSemaphore:\n return PoolSemaphore(limits)\n", "path": "httpcore/concurrency.py"}], "after_files": [{"content": "\"\"\"\nThe `Reader` and `Writer` classes here provide a lightweight layer over\n`asyncio.StreamReader` and `asyncio.StreamWriter`.\n\nSimilarly `PoolSemaphore` is a lightweight layer over `BoundedSemaphore`.\n\nThese classes help encapsulate the timeout logic, make it easier to unit-test\nprotocols, and help keep the rest of the package more `async`/`await`\nbased, and less strictly `asyncio`-specific.\n\"\"\"\nimport asyncio\nimport ssl\nimport typing\n\nfrom .config import DEFAULT_TIMEOUT_CONFIG, PoolLimits, TimeoutConfig\nfrom .exceptions import ConnectTimeout, PoolTimeout, ReadTimeout, WriteTimeout\nfrom .interfaces import (\n BasePoolSemaphore,\n BaseReader,\n BaseWriter,\n ConcurrencyBackend,\n Protocol,\n)\n\nOptionalTimeout = typing.Optional[TimeoutConfig]\n\n\nSSL_MONKEY_PATCH_APPLIED = False\n\n\ndef ssl_monkey_patch() -> None:\n \"\"\"\n Monky-patch for https://bugs.python.org/issue36709\n\n This prevents console errors when outstanding HTTPS connections\n still exist at the point of exiting.\n\n Clients which have been opened using a `with` block, or which have\n had `close()` closed, will not exhibit this issue in the first place.\n \"\"\"\n MonkeyPatch = asyncio.selector_events._SelectorSocketTransport # type: ignore\n\n _write = MonkeyPatch.write\n\n def _fixed_write(self, data: bytes) -> None: # type: ignore\n if not self._loop.is_closed():\n _write(self, data)\n\n MonkeyPatch.write = _fixed_write\n\n\nclass Reader(BaseReader):\n def __init__(\n self, stream_reader: asyncio.StreamReader, timeout: TimeoutConfig\n ) -> None:\n self.stream_reader = stream_reader\n self.timeout = timeout\n\n async def read(self, n: int, timeout: OptionalTimeout = None) -> bytes:\n if timeout is None:\n timeout = self.timeout\n\n try:\n data = await asyncio.wait_for(\n self.stream_reader.read(n), timeout.read_timeout\n )\n except asyncio.TimeoutError:\n raise ReadTimeout()\n\n return data\n\n\nclass Writer(BaseWriter):\n def __init__(self, stream_writer: asyncio.StreamWriter, timeout: TimeoutConfig):\n self.stream_writer = stream_writer\n self.timeout = timeout\n\n def write_no_block(self, data: bytes) -> None:\n self.stream_writer.write(data) # pragma: nocover\n\n async def write(self, data: bytes, timeout: OptionalTimeout = None) -> None:\n if not data:\n return\n\n if timeout is None:\n timeout = self.timeout\n\n self.stream_writer.write(data)\n try:\n await asyncio.wait_for( # type: ignore\n self.stream_writer.drain(), timeout.write_timeout\n )\n except asyncio.TimeoutError:\n raise WriteTimeout()\n\n async def close(self) -> None:\n self.stream_writer.close()\n\n\nclass PoolSemaphore(BasePoolSemaphore):\n def __init__(self, pool_limits: PoolLimits):\n self.pool_limits = pool_limits\n\n @property\n def semaphore(self) -> typing.Optional[asyncio.BoundedSemaphore]:\n if not hasattr(self, \"_semaphore\"):\n max_connections = self.pool_limits.hard_limit\n if max_connections is None:\n self._semaphore = None\n else:\n self._semaphore = asyncio.BoundedSemaphore(value=max_connections)\n return self._semaphore\n\n async def acquire(self) -> None:\n if self.semaphore is None:\n return\n\n timeout = self.pool_limits.pool_timeout\n try:\n await asyncio.wait_for(self.semaphore.acquire(), timeout)\n except asyncio.TimeoutError:\n raise PoolTimeout()\n\n def release(self) -> None:\n if self.semaphore is None:\n return\n\n self.semaphore.release()\n\n\nclass AsyncioBackend(ConcurrencyBackend):\n def __init__(self) -> None:\n global SSL_MONKEY_PATCH_APPLIED\n\n if not SSL_MONKEY_PATCH_APPLIED:\n ssl_monkey_patch()\n SSL_MONKEY_PATCH_APPLIED = True\n\n async def connect(\n self,\n hostname: str,\n port: int,\n ssl_context: typing.Optional[ssl.SSLContext],\n timeout: TimeoutConfig,\n ) -> typing.Tuple[BaseReader, BaseWriter, Protocol]:\n try:\n stream_reader, stream_writer = await asyncio.wait_for( # type: ignore\n asyncio.open_connection(hostname, port, ssl=ssl_context),\n timeout.connect_timeout,\n )\n except asyncio.TimeoutError:\n raise ConnectTimeout()\n\n ssl_object = stream_writer.get_extra_info(\"ssl_object\")\n if ssl_object is None:\n ident = \"http/1.1\"\n else:\n ident = ssl_object.selected_alpn_protocol()\n if ident is None:\n ident = ssl_object.selected_npn_protocol()\n\n reader = Reader(stream_reader=stream_reader, timeout=timeout)\n writer = Writer(stream_writer=stream_writer, timeout=timeout)\n protocol = Protocol.HTTP_2 if ident == \"h2\" else Protocol.HTTP_11\n\n return (reader, writer, protocol)\n\n def get_semaphore(self, limits: PoolLimits) -> BasePoolSemaphore:\n return PoolSemaphore(limits)\n", "path": "httpcore/concurrency.py"}]} | 1,823 | 112 |
gh_patches_debug_37183 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-2995 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scheduled-query create example throws BadRequest error
I tried playing around the scheduled-query cli commands and kept running into issues parsing the LogAnalytics query we were attempting to use in the condition. So, I simply tried using the condition provided in the examples and it had similar issues.
```
C:\Users\XXXXX>az monitor scheduled-query create -g <RG> -n Another-Alert --scopes <RESOUCE ID> --description "This is a description" --condition "count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName=="Error" or SeverityLevel=="err"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points"
BadRequest: {
"error": {
"message": "The request had some invalid properties",
"code": "BadArgumentError",
"correlationId": "fb91e6e3-85e8-4a34-a2c3-c5c76468f26a",
"innererror": {
"code": "SemanticError",
"message": "A semantic error occurred.",
"innererror": {
"code": "SEM0100",
"message": "'where' operator: Failed to resolve column or scalar expression named 'Error'"
}
}
}
```
Any help or guidance would be greatly appreciated...
---
#### Document Details
⚠ *Do not edit this section. It is required for docs.microsoft.com ➟ GitHub issue linking.*
* ID: 445d2458-1e27-7cf4-8a04-95ec798ebdc1
* Version Independent ID: 3f80ed4a-d3c4-d15c-3e5d-50a48e0eb72c
* Content: [az monitor scheduled-query](https://docs.microsoft.com/en-us/cli/azure/ext/scheduled-query/monitor/scheduled-query?view=azure-cli-latest)
* Content Source: [latest/docs-ref-autogen/ext/scheduled-query/monitor/scheduled-query.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/scheduled-query/monitor/scheduled-query.yml)
* GitHub Login: @rloutlaw
* Microsoft Alias: **routlaw**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py`
Content:
```
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5 # pylint: disable=all
6 from .ScheduleQueryConditionListener import ScheduleQueryConditionListener
7 from azext_scheduled_query.vendored_sdks.azure_mgmt_scheduled_query.models import ConditionFailingPeriods
8
9
10 op_conversion = {
11 '=': 'Equals',
12 '!=': 'NotEquals',
13 '>': 'GreaterThan',
14 '>=': 'GreaterThanOrEqual',
15 '<': 'LessThan',
16 '<=': 'LessThanOrEqual'
17 }
18
19 agg_conversion = {
20 'avg': 'Average',
21 'min': 'Minimum',
22 'max': 'Maximum',
23 'total': 'Total',
24 'count': 'Count'
25 }
26
27 dim_op_conversion = {
28 'includes': 'Include',
29 'excludes': 'Exclude'
30 }
31
32 # This class defines a complete listener for a parse tree produced by MetricAlertConditionParser.
33 class ScheduleQueryConditionValidator(ScheduleQueryConditionListener):
34
35 def __init__(self):
36 super(ScheduleQueryConditionValidator, self).__init__()
37 self.parameters = {}
38 self._dimension_index = 0
39
40 # Exit a parse tree produced by MetricAlertConditionParser#aggregation.
41 def exitAggregation(self, ctx):
42 aggregation = agg_conversion[ctx.getText().strip()]
43 self.parameters['time_aggregation'] = aggregation
44
45 # Exit a parse tree produced by MetricAlertConditionParser#metric.
46 def exitMetric(self, ctx):
47 self.parameters['metric_measure_column'] = ctx.getText().strip()
48
49 # Exit a parse tree produced by MetricAlertConditionParser#operator.
50 def exitOperator(self, ctx):
51 operator = op_conversion[ctx.getText().strip()]
52 self.parameters['operator'] = operator
53
54 # Exit a parse tree produced by MetricAlertConditionParser#threshold.
55 def exitThreshold(self, ctx):
56 self.parameters['threshold'] = ctx.getText().strip()
57
58 # Exit a parse tree produced by MetricAlertConditionParser#threshold.
59 def exitQuery(self, ctx):
60 self.parameters['query'] = ctx.getText().strip()
61
62 # Exit a parse tree produced by MetricAlertConditionParser#threshold.
63 def exitResource_id(self, ctx):
64 self.parameters['resource_id_column'] = ctx.getText().strip()
65
66 # Enter a parse tree produced by MetricAlertConditionParser#dimensions.
67 def enterFalling_period(self, ctx):
68 self.parameters['failing_periods'] = ConditionFailingPeriods()
69
70 # Exit a parse tree produced by MetricAlertConditionParser#threshold.
71 def exitMin_times(self, ctx):
72 self.parameters['failing_periods'].min_failing_periods_to_alert = int(float(ctx.getText().strip()))
73
74 # Exit a parse tree produced by MetricAlertConditionParser#threshold.
75 def exitEvaluation_period(self, ctx):
76 self.parameters['failing_periods'].number_of_evaluation_periods = int(float(ctx.getText().strip()))
77
78 # Enter a parse tree produced by MetricAlertConditionParser#dimensions.
79 def enterDimensions(self, ctx):
80 self.parameters['dimensions'] = []
81
82 # Enter a parse tree produced by MetricAlertConditionParser#dimension.
83 def enterDimension(self, ctx):
84 self.parameters['dimensions'].append({})
85
86 # Exit a parse tree produced by MetricAlertConditionParser#dimension.
87 def exitDimension(self, ctx):
88 self._dimension_index = self._dimension_index + 1
89
90 # Exit a parse tree produced by MetricAlertConditionParser#dname.
91 def exitDim_name(self, ctx):
92 self.parameters['dimensions'][self._dimension_index]['name'] = ctx.getText().strip()
93
94 # Exit a parse tree produced by MetricAlertConditionParser#dop.
95 def exitDim_operator(self, ctx):
96 op_text = ctx.getText().strip()
97 self.parameters['dimensions'][self._dimension_index]['operator'] = dim_op_conversion[op_text.lower()]
98
99 # Exit a parse tree produced by MetricAlertConditionParser#dvalues.
100 def exitDim_values(self, ctx):
101 dvalues = ctx.getText().strip().split(' ')
102 self.parameters['dimensions'][self._dimension_index]['values'] = [x for x in dvalues if x not in ['', 'or']]
103
104 def result(self):
105 from azext_scheduled_query.vendored_sdks.azure_mgmt_scheduled_query.models import Condition, Dimension
106 dim_params = self.parameters.get('dimensions', [])
107 dimensions = []
108 for dim in dim_params:
109 dimensions.append(Dimension(**dim))
110 self.parameters['dimensions'] = dimensions
111 return Condition(**self.parameters)
112
```
Path: `src/scheduled-query/setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8
9 from codecs import open
10 from setuptools import setup, find_packages
11 try:
12 from azure_bdist_wheel import cmdclass
13 except ImportError:
14 from distutils import log as logger
15 logger.warn("Wheel is not available, disabling bdist_wheel hook")
16
17 # TODO: Confirm this is the right version number you want and it matches your
18 # HISTORY.rst entry.
19 VERSION = '0.2.1'
20
21 # The full list of classifiers is available at
22 # https://pypi.python.org/pypi?%3Aaction=list_classifiers
23 CLASSIFIERS = [
24 'Development Status :: 4 - Beta',
25 'Intended Audience :: Developers',
26 'Intended Audience :: System Administrators',
27 'Programming Language :: Python',
28 'Programming Language :: Python :: 3',
29 'Programming Language :: Python :: 3.6',
30 'Programming Language :: Python :: 3.7',
31 'Programming Language :: Python :: 3.8',
32 'License :: OSI Approved :: MIT License',
33 ]
34
35 with open('README.rst', 'r', encoding='utf-8') as f:
36 README = f.read()
37 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
38 HISTORY = f.read()
39
40 setup(
41 name='scheduled-query',
42 version=VERSION,
43 description='Microsoft Azure Command-Line Tools Scheduled_query Extension',
44 # TODO: Update author and email, if applicable
45 author='Microsoft Corporation',
46 author_email='[email protected]',
47 # TODO: consider pointing directly to your source code instead of the generic repo
48 url='https://github.com/Azure/azure-cli-extensions',
49 long_description=README + '\n\n' + HISTORY,
50 license='MIT',
51 classifiers=CLASSIFIERS,
52 packages=find_packages(),
53 package_data={'azext_scheduled_query': ['azext_metadata.json']},
54 )
55
```
Path: `src/scheduled-query/azext_scheduled_query/_help.py`
Content:
```
1 # coding=utf-8
2 # --------------------------------------------------------------------------------------------
3 # Copyright (c) Microsoft Corporation. All rights reserved.
4 # Licensed under the MIT License. See License.txt in the project root for license information.
5 # --------------------------------------------------------------------------------------------
6
7 from knack.help_files import helps # pylint: disable=unused-import
8
9
10 helps['monitor scheduled-query'] = """
11 type: group
12 short-summary: Commands to manage scheduled queries.
13 """
14
15 helps['monitor scheduled-query create'] = """
16 type: command
17 short-summary: Create a scheduled query.
18 parameters:
19 - name: --action -a
20 short-summary: Add an action group and optional webhook properties to fire when the alert is triggered.
21 long-summary: |
22 Usage: --action ACTION_GROUP_NAME_OR_ID [KEY=VAL [KEY=VAL ...]]
23
24 Multiple action groups can be specified by using more than one `--action` argument.
25 - name: --condition
26 short-summary: The condition which triggers the rule.
27 long-summary: |
28 Usage: --condition {avg,min,max,total,count} ["METRIC COLUMN" from]
29 "QUERY" {=,!=,>,>=,<,<=} THRESHOLD
30 [resource id RESOURCEID]
31 [where DIMENSION {includes,excludes} VALUE [or VALUE ...]
32 [and DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]
33 [at least MinTimeToFail violations out of EvaluationPeriod aggregated points]'
34
35 Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.
36 examples:
37 - name: Create a scheduled query for a VM.
38 text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {vm_id} --condition "count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\"Error\" or SeverityLevel==\"err\"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points" --description "Test rule"
39 - name: Create a scheduled query for VMs in a resource group.
40 text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {rg_id} --condition "count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\"Error\" or SeverityLevel==\"err\"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points" --description "Test rule"
41 """
42
43 helps['monitor scheduled-query update'] = """
44 type: command
45 short-summary: Update a scheduled query.
46 parameters:
47 - name: --action -a
48 short-summary: Add an action group and optional webhook properties to fire when the alert is triggered.
49 long-summary: |
50 Usage: --action ACTION_GROUP_NAME_OR_ID [KEY=VAL [KEY=VAL ...]]
51
52 Multiple action groups can be specified by using more than one `--action` argument.
53 - name: --condition
54 short-summary: The condition which triggers the rule.
55 long-summary: |
56 Usage: --condition {avg,min,max,total,count} ["METRIC COLUMN" from]
57 "QUERY" {=,!=,>,>=,<,<=} THRESHOLD
58 [resource id RESOURCEID]
59 [where DIMENSION {includes,excludes} VALUE [or VALUE ...]
60 [and DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]
61 [at least MinTimeToFail violations out of EvaluationPeriod aggregated points]'
62
63 Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.
64 """
65
66 helps['monitor scheduled-query list'] = """
67 type: command
68 short-summary: List all scheduled queries.
69 """
70
71 helps['monitor scheduled-query show'] = """
72 type: command
73 short-summary: Show detail of a scheduled query.
74 """
75
76 helps['monitor scheduled-query delete'] = """
77 type: command
78 short-summary: Delete a scheduled query.
79 """
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/scheduled-query/azext_scheduled_query/_help.py b/src/scheduled-query/azext_scheduled_query/_help.py
--- a/src/scheduled-query/azext_scheduled_query/_help.py
+++ b/src/scheduled-query/azext_scheduled_query/_help.py
@@ -35,9 +35,9 @@
Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.
examples:
- name: Create a scheduled query for a VM.
- text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {vm_id} --condition "count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\"Error\" or SeverityLevel==\"err\"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points" --description "Test rule"
+ text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {vm_id} --condition "count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\'Error\\' or SeverityLevel==\\'err\\'' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points" --description "Test rule"
- name: Create a scheduled query for VMs in a resource group.
- text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {rg_id} --condition "count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\"Error\" or SeverityLevel==\"err\"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points" --description "Test rule"
+ text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {rg_id} --condition "count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\'Error\\' or SeverityLevel==\\'err\\'' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points" --description "Test rule"
"""
helps['monitor scheduled-query update'] = """
diff --git a/src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py b/src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py
--- a/src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py
+++ b/src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py
@@ -57,7 +57,10 @@
# Exit a parse tree produced by MetricAlertConditionParser#threshold.
def exitQuery(self, ctx):
- self.parameters['query'] = ctx.getText().strip()
+ query = ctx.getText().strip()
+ query = query.replace("\\\"", "\"")
+ query = query.replace("\\\'", "\'")
+ self.parameters['query'] = query
# Exit a parse tree produced by MetricAlertConditionParser#threshold.
def exitResource_id(self, ctx):
diff --git a/src/scheduled-query/setup.py b/src/scheduled-query/setup.py
--- a/src/scheduled-query/setup.py
+++ b/src/scheduled-query/setup.py
@@ -16,7 +16,7 @@
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.
-VERSION = '0.2.1'
+VERSION = '0.2.2'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
| {"golden_diff": "diff --git a/src/scheduled-query/azext_scheduled_query/_help.py b/src/scheduled-query/azext_scheduled_query/_help.py\n--- a/src/scheduled-query/azext_scheduled_query/_help.py\n+++ b/src/scheduled-query/azext_scheduled_query/_help.py\n@@ -35,9 +35,9 @@\n Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.\n examples:\n - name: Create a scheduled query for a VM.\n- text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {vm_id} --condition \"count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\\"Error\\\" or SeverityLevel==\\\"err\\\"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points\" --description \"Test rule\"\n+ text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {vm_id} --condition \"count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\\\'Error\\\\' or SeverityLevel==\\\\'err\\\\'' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points\" --description \"Test rule\"\n - name: Create a scheduled query for VMs in a resource group.\n- text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {rg_id} --condition \"count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\\"Error\\\" or SeverityLevel==\\\"err\\\"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points\" --description \"Test rule\"\n+ text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {rg_id} --condition \"count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\\\'Error\\\\' or SeverityLevel==\\\\'err\\\\'' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points\" --description \"Test rule\"\n \"\"\"\n \n helps['monitor scheduled-query update'] = \"\"\"\ndiff --git a/src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py b/src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py\n--- a/src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py\n+++ b/src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py\n@@ -57,7 +57,10 @@\n \n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitQuery(self, ctx):\n- self.parameters['query'] = ctx.getText().strip()\n+ query = ctx.getText().strip()\n+ query = query.replace(\"\\\\\\\"\", \"\\\"\")\n+ query = query.replace(\"\\\\\\'\", \"\\'\")\n+ self.parameters['query'] = query\n \n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitResource_id(self, ctx):\ndiff --git a/src/scheduled-query/setup.py b/src/scheduled-query/setup.py\n--- a/src/scheduled-query/setup.py\n+++ b/src/scheduled-query/setup.py\n@@ -16,7 +16,7 @@\n \n # TODO: Confirm this is the right version number you want and it matches your\n # HISTORY.rst entry.\n-VERSION = '0.2.1'\n+VERSION = '0.2.2'\n \n # The full list of classifiers is available at\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "Scheduled-query create example throws BadRequest error\nI tried playing around the scheduled-query cli commands and kept running into issues parsing the LogAnalytics query we were attempting to use in the condition. So, I simply tried using the condition provided in the examples and it had similar issues.\r\n\r\n```\r\nC:\\Users\\XXXXX>az monitor scheduled-query create -g <RG> -n Another-Alert --scopes <RESOUCE ID> --description \"This is a description\" --condition \"count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\"Error\" or SeverityLevel==\"err\"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points\"\r\nBadRequest: {\r\n \"error\": {\r\n \"message\": \"The request had some invalid properties\",\r\n \"code\": \"BadArgumentError\",\r\n \"correlationId\": \"fb91e6e3-85e8-4a34-a2c3-c5c76468f26a\",\r\n \"innererror\": {\r\n \"code\": \"SemanticError\",\r\n \"message\": \"A semantic error occurred.\",\r\n \"innererror\": {\r\n \"code\": \"SEM0100\",\r\n \"message\": \"'where' operator: Failed to resolve column or scalar expression named 'Error'\"\r\n }\r\n }\r\n }\r\n```\r\n\r\nAny help or guidance would be greatly appreciated...\r\n\r\n---\r\n#### Document Details\r\n\r\n\u26a0 *Do not edit this section. It is required for docs.microsoft.com \u279f GitHub issue linking.*\r\n\r\n* ID: 445d2458-1e27-7cf4-8a04-95ec798ebdc1\r\n* Version Independent ID: 3f80ed4a-d3c4-d15c-3e5d-50a48e0eb72c\r\n* Content: [az monitor scheduled-query](https://docs.microsoft.com/en-us/cli/azure/ext/scheduled-query/monitor/scheduled-query?view=azure-cli-latest)\r\n* Content Source: [latest/docs-ref-autogen/ext/scheduled-query/monitor/scheduled-query.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/scheduled-query/monitor/scheduled-query.yml)\r\n* GitHub Login: @rloutlaw\r\n* Microsoft Alias: **routlaw**\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=all\nfrom .ScheduleQueryConditionListener import ScheduleQueryConditionListener\nfrom azext_scheduled_query.vendored_sdks.azure_mgmt_scheduled_query.models import ConditionFailingPeriods\n\n\nop_conversion = {\n '=': 'Equals',\n '!=': 'NotEquals',\n '>': 'GreaterThan',\n '>=': 'GreaterThanOrEqual',\n '<': 'LessThan',\n '<=': 'LessThanOrEqual'\n}\n\nagg_conversion = {\n 'avg': 'Average',\n 'min': 'Minimum',\n 'max': 'Maximum',\n 'total': 'Total',\n 'count': 'Count'\n}\n\ndim_op_conversion = {\n 'includes': 'Include',\n 'excludes': 'Exclude'\n}\n\n# This class defines a complete listener for a parse tree produced by MetricAlertConditionParser.\nclass ScheduleQueryConditionValidator(ScheduleQueryConditionListener):\n\n def __init__(self):\n super(ScheduleQueryConditionValidator, self).__init__()\n self.parameters = {}\n self._dimension_index = 0\n\n # Exit a parse tree produced by MetricAlertConditionParser#aggregation.\n def exitAggregation(self, ctx):\n aggregation = agg_conversion[ctx.getText().strip()]\n self.parameters['time_aggregation'] = aggregation\n\n # Exit a parse tree produced by MetricAlertConditionParser#metric.\n def exitMetric(self, ctx):\n self.parameters['metric_measure_column'] = ctx.getText().strip()\n\n # Exit a parse tree produced by MetricAlertConditionParser#operator.\n def exitOperator(self, ctx):\n operator = op_conversion[ctx.getText().strip()]\n self.parameters['operator'] = operator\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitThreshold(self, ctx):\n self.parameters['threshold'] = ctx.getText().strip()\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitQuery(self, ctx):\n self.parameters['query'] = ctx.getText().strip()\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitResource_id(self, ctx):\n self.parameters['resource_id_column'] = ctx.getText().strip()\n\n # Enter a parse tree produced by MetricAlertConditionParser#dimensions.\n def enterFalling_period(self, ctx):\n self.parameters['failing_periods'] = ConditionFailingPeriods()\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitMin_times(self, ctx):\n self.parameters['failing_periods'].min_failing_periods_to_alert = int(float(ctx.getText().strip()))\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitEvaluation_period(self, ctx):\n self.parameters['failing_periods'].number_of_evaluation_periods = int(float(ctx.getText().strip()))\n\n # Enter a parse tree produced by MetricAlertConditionParser#dimensions.\n def enterDimensions(self, ctx):\n self.parameters['dimensions'] = []\n\n # Enter a parse tree produced by MetricAlertConditionParser#dimension.\n def enterDimension(self, ctx):\n self.parameters['dimensions'].append({})\n\n # Exit a parse tree produced by MetricAlertConditionParser#dimension.\n def exitDimension(self, ctx):\n self._dimension_index = self._dimension_index + 1\n\n # Exit a parse tree produced by MetricAlertConditionParser#dname.\n def exitDim_name(self, ctx):\n self.parameters['dimensions'][self._dimension_index]['name'] = ctx.getText().strip()\n\n # Exit a parse tree produced by MetricAlertConditionParser#dop.\n def exitDim_operator(self, ctx):\n op_text = ctx.getText().strip()\n self.parameters['dimensions'][self._dimension_index]['operator'] = dim_op_conversion[op_text.lower()]\n\n # Exit a parse tree produced by MetricAlertConditionParser#dvalues.\n def exitDim_values(self, ctx):\n dvalues = ctx.getText().strip().split(' ')\n self.parameters['dimensions'][self._dimension_index]['values'] = [x for x in dvalues if x not in ['', 'or']]\n\n def result(self):\n from azext_scheduled_query.vendored_sdks.azure_mgmt_scheduled_query.models import Condition, Dimension\n dim_params = self.parameters.get('dimensions', [])\n dimensions = []\n for dim in dim_params:\n dimensions.append(Dimension(**dim))\n self.parameters['dimensions'] = dimensions\n return Condition(**self.parameters)\n", "path": "src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.2.1'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\nwith open('README.rst', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='scheduled-query',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools Scheduled_query Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n # TODO: consider pointing directly to your source code instead of the generic repo\n url='https://github.com/Azure/azure-cli-extensions',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n package_data={'azext_scheduled_query': ['azext_metadata.json']},\n)\n", "path": "src/scheduled-query/setup.py"}, {"content": "# coding=utf-8\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps # pylint: disable=unused-import\n\n\nhelps['monitor scheduled-query'] = \"\"\"\n type: group\n short-summary: Commands to manage scheduled queries.\n\"\"\"\n\nhelps['monitor scheduled-query create'] = \"\"\"\ntype: command\nshort-summary: Create a scheduled query.\nparameters:\n - name: --action -a\n short-summary: Add an action group and optional webhook properties to fire when the alert is triggered.\n long-summary: |\n Usage: --action ACTION_GROUP_NAME_OR_ID [KEY=VAL [KEY=VAL ...]]\n\n Multiple action groups can be specified by using more than one `--action` argument.\n - name: --condition\n short-summary: The condition which triggers the rule.\n long-summary: |\n Usage: --condition {avg,min,max,total,count} [\"METRIC COLUMN\" from]\n \"QUERY\" {=,!=,>,>=,<,<=} THRESHOLD\n [resource id RESOURCEID]\n [where DIMENSION {includes,excludes} VALUE [or VALUE ...]\n [and DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]\n [at least MinTimeToFail violations out of EvaluationPeriod aggregated points]'\n\n Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.\nexamples:\n - name: Create a scheduled query for a VM.\n text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {vm_id} --condition \"count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\\"Error\\\" or SeverityLevel==\\\"err\\\"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points\" --description \"Test rule\"\n - name: Create a scheduled query for VMs in a resource group.\n text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {rg_id} --condition \"count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\\"Error\\\" or SeverityLevel==\\\"err\\\"' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points\" --description \"Test rule\"\n\"\"\"\n\nhelps['monitor scheduled-query update'] = \"\"\"\ntype: command\nshort-summary: Update a scheduled query.\nparameters:\n - name: --action -a\n short-summary: Add an action group and optional webhook properties to fire when the alert is triggered.\n long-summary: |\n Usage: --action ACTION_GROUP_NAME_OR_ID [KEY=VAL [KEY=VAL ...]]\n\n Multiple action groups can be specified by using more than one `--action` argument.\n - name: --condition\n short-summary: The condition which triggers the rule.\n long-summary: |\n Usage: --condition {avg,min,max,total,count} [\"METRIC COLUMN\" from]\n \"QUERY\" {=,!=,>,>=,<,<=} THRESHOLD\n [resource id RESOURCEID]\n [where DIMENSION {includes,excludes} VALUE [or VALUE ...]\n [and DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]\n [at least MinTimeToFail violations out of EvaluationPeriod aggregated points]'\n\n Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.\n\"\"\"\n\nhelps['monitor scheduled-query list'] = \"\"\"\n type: command\n short-summary: List all scheduled queries.\n\"\"\"\n\nhelps['monitor scheduled-query show'] = \"\"\"\n type: command\n short-summary: Show detail of a scheduled query.\n\"\"\"\n\nhelps['monitor scheduled-query delete'] = \"\"\"\n type: command\n short-summary: Delete a scheduled query.\n\"\"\"\n", "path": "src/scheduled-query/azext_scheduled_query/_help.py"}], "after_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=all\nfrom .ScheduleQueryConditionListener import ScheduleQueryConditionListener\nfrom azext_scheduled_query.vendored_sdks.azure_mgmt_scheduled_query.models import ConditionFailingPeriods\n\n\nop_conversion = {\n '=': 'Equals',\n '!=': 'NotEquals',\n '>': 'GreaterThan',\n '>=': 'GreaterThanOrEqual',\n '<': 'LessThan',\n '<=': 'LessThanOrEqual'\n}\n\nagg_conversion = {\n 'avg': 'Average',\n 'min': 'Minimum',\n 'max': 'Maximum',\n 'total': 'Total',\n 'count': 'Count'\n}\n\ndim_op_conversion = {\n 'includes': 'Include',\n 'excludes': 'Exclude'\n}\n\n# This class defines a complete listener for a parse tree produced by MetricAlertConditionParser.\nclass ScheduleQueryConditionValidator(ScheduleQueryConditionListener):\n\n def __init__(self):\n super(ScheduleQueryConditionValidator, self).__init__()\n self.parameters = {}\n self._dimension_index = 0\n\n # Exit a parse tree produced by MetricAlertConditionParser#aggregation.\n def exitAggregation(self, ctx):\n aggregation = agg_conversion[ctx.getText().strip()]\n self.parameters['time_aggregation'] = aggregation\n\n # Exit a parse tree produced by MetricAlertConditionParser#metric.\n def exitMetric(self, ctx):\n self.parameters['metric_measure_column'] = ctx.getText().strip()\n\n # Exit a parse tree produced by MetricAlertConditionParser#operator.\n def exitOperator(self, ctx):\n operator = op_conversion[ctx.getText().strip()]\n self.parameters['operator'] = operator\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitThreshold(self, ctx):\n self.parameters['threshold'] = ctx.getText().strip()\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitQuery(self, ctx):\n query = ctx.getText().strip()\n query = query.replace(\"\\\\\\\"\", \"\\\"\")\n query = query.replace(\"\\\\\\'\", \"\\'\")\n self.parameters['query'] = query\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitResource_id(self, ctx):\n self.parameters['resource_id_column'] = ctx.getText().strip()\n\n # Enter a parse tree produced by MetricAlertConditionParser#dimensions.\n def enterFalling_period(self, ctx):\n self.parameters['failing_periods'] = ConditionFailingPeriods()\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitMin_times(self, ctx):\n self.parameters['failing_periods'].min_failing_periods_to_alert = int(float(ctx.getText().strip()))\n\n # Exit a parse tree produced by MetricAlertConditionParser#threshold.\n def exitEvaluation_period(self, ctx):\n self.parameters['failing_periods'].number_of_evaluation_periods = int(float(ctx.getText().strip()))\n\n # Enter a parse tree produced by MetricAlertConditionParser#dimensions.\n def enterDimensions(self, ctx):\n self.parameters['dimensions'] = []\n\n # Enter a parse tree produced by MetricAlertConditionParser#dimension.\n def enterDimension(self, ctx):\n self.parameters['dimensions'].append({})\n\n # Exit a parse tree produced by MetricAlertConditionParser#dimension.\n def exitDimension(self, ctx):\n self._dimension_index = self._dimension_index + 1\n\n # Exit a parse tree produced by MetricAlertConditionParser#dname.\n def exitDim_name(self, ctx):\n self.parameters['dimensions'][self._dimension_index]['name'] = ctx.getText().strip()\n\n # Exit a parse tree produced by MetricAlertConditionParser#dop.\n def exitDim_operator(self, ctx):\n op_text = ctx.getText().strip()\n self.parameters['dimensions'][self._dimension_index]['operator'] = dim_op_conversion[op_text.lower()]\n\n # Exit a parse tree produced by MetricAlertConditionParser#dvalues.\n def exitDim_values(self, ctx):\n dvalues = ctx.getText().strip().split(' ')\n self.parameters['dimensions'][self._dimension_index]['values'] = [x for x in dvalues if x not in ['', 'or']]\n\n def result(self):\n from azext_scheduled_query.vendored_sdks.azure_mgmt_scheduled_query.models import Condition, Dimension\n dim_params = self.parameters.get('dimensions', [])\n dimensions = []\n for dim in dim_params:\n dimensions.append(Dimension(**dim))\n self.parameters['dimensions'] = dimensions\n return Condition(**self.parameters)\n", "path": "src/scheduled-query/azext_scheduled_query/grammar/scheduled_query/ScheduleQueryConditionValidator.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.2.2'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\nwith open('README.rst', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='scheduled-query',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools Scheduled_query Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n # TODO: consider pointing directly to your source code instead of the generic repo\n url='https://github.com/Azure/azure-cli-extensions',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n package_data={'azext_scheduled_query': ['azext_metadata.json']},\n)\n", "path": "src/scheduled-query/setup.py"}, {"content": "# coding=utf-8\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps # pylint: disable=unused-import\n\n\nhelps['monitor scheduled-query'] = \"\"\"\n type: group\n short-summary: Commands to manage scheduled queries.\n\"\"\"\n\nhelps['monitor scheduled-query create'] = \"\"\"\ntype: command\nshort-summary: Create a scheduled query.\nparameters:\n - name: --action -a\n short-summary: Add an action group and optional webhook properties to fire when the alert is triggered.\n long-summary: |\n Usage: --action ACTION_GROUP_NAME_OR_ID [KEY=VAL [KEY=VAL ...]]\n\n Multiple action groups can be specified by using more than one `--action` argument.\n - name: --condition\n short-summary: The condition which triggers the rule.\n long-summary: |\n Usage: --condition {avg,min,max,total,count} [\"METRIC COLUMN\" from]\n \"QUERY\" {=,!=,>,>=,<,<=} THRESHOLD\n [resource id RESOURCEID]\n [where DIMENSION {includes,excludes} VALUE [or VALUE ...]\n [and DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]\n [at least MinTimeToFail violations out of EvaluationPeriod aggregated points]'\n\n Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.\nexamples:\n - name: Create a scheduled query for a VM.\n text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {vm_id} --condition \"count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\\\'Error\\\\' or SeverityLevel==\\\\'err\\\\'' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points\" --description \"Test rule\"\n - name: Create a scheduled query for VMs in a resource group.\n text: az monitor scheduled-query create -g {rg} -n {name1} --scopes {rg_id} --condition \"count 'union Event, Syslog | where TimeGenerated > ago(1h) | where EventLevelName==\\\\'Error\\\\' or SeverityLevel==\\\\'err\\\\'' > 360 resource id _ResourceID at least 1 violations out of 5 aggregated points\" --description \"Test rule\"\n\"\"\"\n\nhelps['monitor scheduled-query update'] = \"\"\"\ntype: command\nshort-summary: Update a scheduled query.\nparameters:\n - name: --action -a\n short-summary: Add an action group and optional webhook properties to fire when the alert is triggered.\n long-summary: |\n Usage: --action ACTION_GROUP_NAME_OR_ID [KEY=VAL [KEY=VAL ...]]\n\n Multiple action groups can be specified by using more than one `--action` argument.\n - name: --condition\n short-summary: The condition which triggers the rule.\n long-summary: |\n Usage: --condition {avg,min,max,total,count} [\"METRIC COLUMN\" from]\n \"QUERY\" {=,!=,>,>=,<,<=} THRESHOLD\n [resource id RESOURCEID]\n [where DIMENSION {includes,excludes} VALUE [or VALUE ...]\n [and DIMENSION {includes,excludes} VALUE [or VALUE ...] ...]]\n [at least MinTimeToFail violations out of EvaluationPeriod aggregated points]'\n\n Dimensions can be queried by adding the 'where' keyword and multiple dimensions can be queried by combining them with the 'and' keyword.\n\"\"\"\n\nhelps['monitor scheduled-query list'] = \"\"\"\n type: command\n short-summary: List all scheduled queries.\n\"\"\"\n\nhelps['monitor scheduled-query show'] = \"\"\"\n type: command\n short-summary: Show detail of a scheduled query.\n\"\"\"\n\nhelps['monitor scheduled-query delete'] = \"\"\"\n type: command\n short-summary: Delete a scheduled query.\n\"\"\"\n", "path": "src/scheduled-query/azext_scheduled_query/_help.py"}]} | 3,600 | 846 |
gh_patches_debug_40374 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-4432 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In chargeback rate assignment - 'Add' button
In chargeback rate assignment form, when the form is filled and we are about to press the 'Add' (assignment) button, the Add button of the row is pressed instead:
Reproduction branch: https://github.com/gshefer/integration_tests/tree/chargeback_reports
Reproduction file: cfme/tests/containers/test_chargeback.py

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cfme/web_ui/form_buttons.py`
Content:
```
1 """This module unifies working with CRUD form buttons.
2
3 Whenever you use Add, Save, Cancel, Reset button, use this module.
4 You can use it also for the other buttons with same shape like those CRUD ones.
5 """
6 from selenium.common.exceptions import NoSuchElementException
7 from xml.sax.saxutils import quoteattr
8
9 from cfme.fixtures import pytest_selenium as sel
10 from cfme.web_ui import fill
11 from utils import version
12 from utils.log import logger
13 from utils.pretty import Pretty
14
15
16 class FormButton(Pretty):
17 """This class represents the buttons usually located in forms or CRUD.
18
19 Args:
20 alt: The text from ``alt`` field of the image.
21 dimmed_alt: In case the ``alt`` param is different in the dimmed variant of the button.
22 force_click: Click always, even if it is dimmed. (Causes an error if not visible)
23 partial_alt: Whether the alt matching should be only partial (``in``).
24 ng_click: To match the angular buttons, you can use this to specify the contents of
25 ``ng-click`` attributeh.
26 """
27 pretty_attrs = ['_alt', '_dimmed_alt', '_force', '_partial', '_ng_click']
28
29 class Button:
30 """Holds pieces of the XPath to be assembled."""
31 TAG_TYPES = "//a | //button | //img | //input"
32 TYPE_CONDITION = (
33 "(contains(@class, 'button') or contains(@class, 'btn') or contains(@src, 'button'))"
34 )
35 DIMMED = "(contains(@class, 'dimmed') " \
36 "or contains(@class, 'disabled') " \
37 "or contains(@class, 'btn-disabled'))"
38 NOT_DIMMED = "not{}".format(DIMMED)
39 IS_DISPLAYED = (
40 "not(ancestor::*[contains(@style, 'display:none') "
41 "or contains(@style, 'display: none')])")
42 ON_CURRENT_TAB = (
43 "not(ancestor::div[contains(@class, 'tab-pane') and not(contains(@class, 'active'))])")
44
45 def __init__(self, alt, dimmed_alt=None, force_click=False, partial_alt=False, ng_click=None):
46 self._alt = alt
47 self._dimmed_alt = dimmed_alt
48 self._force = force_click
49 self._partial = partial_alt
50 self._ng_click = ng_click
51
52 def alt_expr(self, dimmed=False):
53 if self._partial:
54 if self._ng_click is None:
55 return (
56 "(contains(normalize-space(@alt), {alt}) or "
57 "contains(normalize-space(text()), {alt}))".format(
58 alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt)))
59 else:
60 return (
61 "(contains(normalize-space(@alt), {alt}) or "
62 "@ng-click={click} or "
63 "contains(normalize-space(text()), {alt}))".format(
64 alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt),
65 click=quoteattr(self._ng_click)))
66 else:
67 if self._ng_click is None:
68 return (
69 "(normalize-space(@alt)={alt} or "
70 "normalize-space(text())={alt})".format(
71 alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt)))
72 else:
73 return (
74 "(normalize-space(@alt)={alt} or "
75 "@ng-click={click} or "
76 "normalize-space(text())={alt})".format(
77 alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt),
78 click=quoteattr(self._ng_click)))
79
80 def _format_generator(self, dimmed=False, include_dimmed_alt=False):
81 """Generates a dict that will be passed to the formatting strings."""
82 d = {}
83 for key, value in self.Button.__dict__.iteritems():
84 if not key.startswith("_"):
85 d[key] = value
86 d["ALT_EXPR"] = self.alt_expr(dimmed=dimmed)
87 if include_dimmed_alt:
88 d["DIMMED_ALT"] = quoteattr(self._dimmed_alt or self._alt)
89 return d
90
91 def locate(self):
92 return (
93 "({TAG_TYPES})[{ALT_EXPR} and {NOT_DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} "
94 "and {ON_CURRENT_TAB}]"
95 .format(**self._format_generator(dimmed=False)))
96
97 @property
98 def is_dimmed(self):
99 locator = (
100 "({TAG_TYPES})[{ALT_EXPR} and {DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} "
101 "and {ON_CURRENT_TAB}]"
102 "|" # A bit different type of a button
103 "({TAG_TYPES})[normalize-space(.)={DIMMED_ALT} and {IS_DISPLAYED} and "
104 "(@disabled='true' or contains(@class, 'btn-disabled')) and {ON_CURRENT_TAB}]"
105 .format(**self._format_generator(dimmed=True, include_dimmed_alt=True)))
106 return sel.is_displayed(locator)
107
108 @property
109 def can_be_clicked(self):
110 """Whether the button is displayed, therefore clickable."""
111 try:
112 return sel.is_displayed(self, move_to=True)
113 except NoSuchElementException:
114 return False
115
116 def __call__(self, *args, **kwargs):
117 """For maintaining backward compatibility"""
118 sel.click(self)
119
120 def _custom_click_handler(self, wait_ajax):
121 """Handler called from pytest_selenium"""
122 if self.is_dimmed and not self._force:
123 logger.error("Could not click %s because it was dimmed", repr(self))
124 return
125 sel.wait_for_element(self, timeout=5)
126 return sel.click(self, no_custom_handler=True, wait_ajax=wait_ajax)
127
128 def __str__(self):
129 return self.locate()
130
131 def __repr__(self):
132 return "{}({})".format(type(self).__name__, str(repr(self._alt)))
133
134
135 add = FormButton("Add")
136 save = FormButton("Save Changes", dimmed_alt="Save", ng_click="saveClicked()")
137 angular_save = FormButton("Save changes", ng_click="saveClicked()")
138 cancel = FormButton("Cancel")
139 cancel_changes = FormButton("Cancel Changes")
140 submit = FormButton("Submit")
141 reset = FormButton("Reset Changes", dimmed_alt="Reset")
142 validate = FormButton("Validate the credentials by logging into the Server", dimmed_alt="Validate")
143 validate_short = FormButton("Validate the credentials")
144 validate_multi_host = FormButton("Validate the credentials by logging into the selected Host")
145 host_provision_submit = FormButton("Submit this provisioning request")
146 host_provision_cancel = FormButton("Cancel this provisioning request")
147 retrieve = FormButton("LDAP Group Lookup")
148
149
150 _stored_pw_script = '//a[contains(@id, "change_stored_password")]'
151 _stored_pw_angular = "//a[@ng-hide='bChangeStoredPassword']"
152
153
154 def change_stored_password():
155 if version.current_version() > '5.5':
156 if sel.is_displayed(_stored_pw_script):
157 sel.execute_script(
158 sel.get_attribute(
159 sel.element(_stored_pw_script), 'onClick'))
160 sel.wait_for_ajax() # To play safe
161 elif sel.is_displayed(_stored_pw_angular):
162 sel.click(_stored_pw_angular)
163 else:
164 logger.info("Probably no creds")
165
166
167 @fill.method((FormButton, bool))
168 def _fill_fb_bool(fb, b):
169 if b:
170 sel.click(fb)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cfme/web_ui/form_buttons.py b/cfme/web_ui/form_buttons.py
--- a/cfme/web_ui/form_buttons.py
+++ b/cfme/web_ui/form_buttons.py
@@ -6,6 +6,8 @@
from selenium.common.exceptions import NoSuchElementException
from xml.sax.saxutils import quoteattr
+from widgetastic.xpath import quote
+
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import fill
from utils import version
@@ -26,6 +28,8 @@
"""
pretty_attrs = ['_alt', '_dimmed_alt', '_force', '_partial', '_ng_click']
+ PRIMARY = 'btn-primary'
+
class Button:
"""Holds pieces of the XPath to be assembled."""
TAG_TYPES = "//a | //button | //img | //input"
@@ -42,12 +46,15 @@
ON_CURRENT_TAB = (
"not(ancestor::div[contains(@class, 'tab-pane') and not(contains(@class, 'active'))])")
- def __init__(self, alt, dimmed_alt=None, force_click=False, partial_alt=False, ng_click=None):
+ def __init__(
+ self, alt, dimmed_alt=None, force_click=False, partial_alt=False, ng_click=None,
+ classes=None):
self._alt = alt
self._dimmed_alt = dimmed_alt
self._force = force_click
self._partial = partial_alt
self._ng_click = ng_click
+ self._classes = classes or []
def alt_expr(self, dimmed=False):
if self._partial:
@@ -86,19 +93,24 @@
d["ALT_EXPR"] = self.alt_expr(dimmed=dimmed)
if include_dimmed_alt:
d["DIMMED_ALT"] = quoteattr(self._dimmed_alt or self._alt)
+ if self._classes:
+ d['CLASSES'] = 'and ({})'.format(
+ ' and '.join('contains(@class, {})'.format(quote(kls)) for kls in self._classes))
+ else:
+ d['CLASSES'] = ''
return d
def locate(self):
return (
"({TAG_TYPES})[{ALT_EXPR} and {NOT_DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} "
- "and {ON_CURRENT_TAB}]"
+ "and {ON_CURRENT_TAB} {CLASSES}]"
.format(**self._format_generator(dimmed=False)))
@property
def is_dimmed(self):
locator = (
"({TAG_TYPES})[{ALT_EXPR} and {DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} "
- "and {ON_CURRENT_TAB}]"
+ "and {ON_CURRENT_TAB} {CLASSES}]"
"|" # A bit different type of a button
"({TAG_TYPES})[normalize-space(.)={DIMMED_ALT} and {IS_DISPLAYED} and "
"(@disabled='true' or contains(@class, 'btn-disabled')) and {ON_CURRENT_TAB}]"
| {"golden_diff": "diff --git a/cfme/web_ui/form_buttons.py b/cfme/web_ui/form_buttons.py\n--- a/cfme/web_ui/form_buttons.py\n+++ b/cfme/web_ui/form_buttons.py\n@@ -6,6 +6,8 @@\n from selenium.common.exceptions import NoSuchElementException\n from xml.sax.saxutils import quoteattr\n \n+from widgetastic.xpath import quote\n+\n from cfme.fixtures import pytest_selenium as sel\n from cfme.web_ui import fill\n from utils import version\n@@ -26,6 +28,8 @@\n \"\"\"\n pretty_attrs = ['_alt', '_dimmed_alt', '_force', '_partial', '_ng_click']\n \n+ PRIMARY = 'btn-primary'\n+\n class Button:\n \"\"\"Holds pieces of the XPath to be assembled.\"\"\"\n TAG_TYPES = \"//a | //button | //img | //input\"\n@@ -42,12 +46,15 @@\n ON_CURRENT_TAB = (\n \"not(ancestor::div[contains(@class, 'tab-pane') and not(contains(@class, 'active'))])\")\n \n- def __init__(self, alt, dimmed_alt=None, force_click=False, partial_alt=False, ng_click=None):\n+ def __init__(\n+ self, alt, dimmed_alt=None, force_click=False, partial_alt=False, ng_click=None,\n+ classes=None):\n self._alt = alt\n self._dimmed_alt = dimmed_alt\n self._force = force_click\n self._partial = partial_alt\n self._ng_click = ng_click\n+ self._classes = classes or []\n \n def alt_expr(self, dimmed=False):\n if self._partial:\n@@ -86,19 +93,24 @@\n d[\"ALT_EXPR\"] = self.alt_expr(dimmed=dimmed)\n if include_dimmed_alt:\n d[\"DIMMED_ALT\"] = quoteattr(self._dimmed_alt or self._alt)\n+ if self._classes:\n+ d['CLASSES'] = 'and ({})'.format(\n+ ' and '.join('contains(@class, {})'.format(quote(kls)) for kls in self._classes))\n+ else:\n+ d['CLASSES'] = ''\n return d\n \n def locate(self):\n return (\n \"({TAG_TYPES})[{ALT_EXPR} and {NOT_DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} \"\n- \"and {ON_CURRENT_TAB}]\"\n+ \"and {ON_CURRENT_TAB} {CLASSES}]\"\n .format(**self._format_generator(dimmed=False)))\n \n @property\n def is_dimmed(self):\n locator = (\n \"({TAG_TYPES})[{ALT_EXPR} and {DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} \"\n- \"and {ON_CURRENT_TAB}]\"\n+ \"and {ON_CURRENT_TAB} {CLASSES}]\"\n \"|\" # A bit different type of a button\n \"({TAG_TYPES})[normalize-space(.)={DIMMED_ALT} and {IS_DISPLAYED} and \"\n \"(@disabled='true' or contains(@class, 'btn-disabled')) and {ON_CURRENT_TAB}]\"\n", "issue": "In chargeback rate assignment - 'Add' button\nIn chargeback rate assignment form, when the form is filled and we are about to press the 'Add' (assignment) button, the Add button of the row is pressed instead:\r\nReproduction branch: https://github.com/gshefer/integration_tests/tree/chargeback_reports\r\nReproduction file: cfme/tests/containers/test_chargeback.py\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"This module unifies working with CRUD form buttons.\n\nWhenever you use Add, Save, Cancel, Reset button, use this module.\nYou can use it also for the other buttons with same shape like those CRUD ones.\n\"\"\"\nfrom selenium.common.exceptions import NoSuchElementException\nfrom xml.sax.saxutils import quoteattr\n\nfrom cfme.fixtures import pytest_selenium as sel\nfrom cfme.web_ui import fill\nfrom utils import version\nfrom utils.log import logger\nfrom utils.pretty import Pretty\n\n\nclass FormButton(Pretty):\n \"\"\"This class represents the buttons usually located in forms or CRUD.\n\n Args:\n alt: The text from ``alt`` field of the image.\n dimmed_alt: In case the ``alt`` param is different in the dimmed variant of the button.\n force_click: Click always, even if it is dimmed. (Causes an error if not visible)\n partial_alt: Whether the alt matching should be only partial (``in``).\n ng_click: To match the angular buttons, you can use this to specify the contents of\n ``ng-click`` attributeh.\n \"\"\"\n pretty_attrs = ['_alt', '_dimmed_alt', '_force', '_partial', '_ng_click']\n\n class Button:\n \"\"\"Holds pieces of the XPath to be assembled.\"\"\"\n TAG_TYPES = \"//a | //button | //img | //input\"\n TYPE_CONDITION = (\n \"(contains(@class, 'button') or contains(@class, 'btn') or contains(@src, 'button'))\"\n )\n DIMMED = \"(contains(@class, 'dimmed') \" \\\n \"or contains(@class, 'disabled') \" \\\n \"or contains(@class, 'btn-disabled'))\"\n NOT_DIMMED = \"not{}\".format(DIMMED)\n IS_DISPLAYED = (\n \"not(ancestor::*[contains(@style, 'display:none') \"\n \"or contains(@style, 'display: none')])\")\n ON_CURRENT_TAB = (\n \"not(ancestor::div[contains(@class, 'tab-pane') and not(contains(@class, 'active'))])\")\n\n def __init__(self, alt, dimmed_alt=None, force_click=False, partial_alt=False, ng_click=None):\n self._alt = alt\n self._dimmed_alt = dimmed_alt\n self._force = force_click\n self._partial = partial_alt\n self._ng_click = ng_click\n\n def alt_expr(self, dimmed=False):\n if self._partial:\n if self._ng_click is None:\n return (\n \"(contains(normalize-space(@alt), {alt}) or \"\n \"contains(normalize-space(text()), {alt}))\".format(\n alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt)))\n else:\n return (\n \"(contains(normalize-space(@alt), {alt}) or \"\n \"@ng-click={click} or \"\n \"contains(normalize-space(text()), {alt}))\".format(\n alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt),\n click=quoteattr(self._ng_click)))\n else:\n if self._ng_click is None:\n return (\n \"(normalize-space(@alt)={alt} or \"\n \"normalize-space(text())={alt})\".format(\n alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt)))\n else:\n return (\n \"(normalize-space(@alt)={alt} or \"\n \"@ng-click={click} or \"\n \"normalize-space(text())={alt})\".format(\n alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt),\n click=quoteattr(self._ng_click)))\n\n def _format_generator(self, dimmed=False, include_dimmed_alt=False):\n \"\"\"Generates a dict that will be passed to the formatting strings.\"\"\"\n d = {}\n for key, value in self.Button.__dict__.iteritems():\n if not key.startswith(\"_\"):\n d[key] = value\n d[\"ALT_EXPR\"] = self.alt_expr(dimmed=dimmed)\n if include_dimmed_alt:\n d[\"DIMMED_ALT\"] = quoteattr(self._dimmed_alt or self._alt)\n return d\n\n def locate(self):\n return (\n \"({TAG_TYPES})[{ALT_EXPR} and {NOT_DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} \"\n \"and {ON_CURRENT_TAB}]\"\n .format(**self._format_generator(dimmed=False)))\n\n @property\n def is_dimmed(self):\n locator = (\n \"({TAG_TYPES})[{ALT_EXPR} and {DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} \"\n \"and {ON_CURRENT_TAB}]\"\n \"|\" # A bit different type of a button\n \"({TAG_TYPES})[normalize-space(.)={DIMMED_ALT} and {IS_DISPLAYED} and \"\n \"(@disabled='true' or contains(@class, 'btn-disabled')) and {ON_CURRENT_TAB}]\"\n .format(**self._format_generator(dimmed=True, include_dimmed_alt=True)))\n return sel.is_displayed(locator)\n\n @property\n def can_be_clicked(self):\n \"\"\"Whether the button is displayed, therefore clickable.\"\"\"\n try:\n return sel.is_displayed(self, move_to=True)\n except NoSuchElementException:\n return False\n\n def __call__(self, *args, **kwargs):\n \"\"\"For maintaining backward compatibility\"\"\"\n sel.click(self)\n\n def _custom_click_handler(self, wait_ajax):\n \"\"\"Handler called from pytest_selenium\"\"\"\n if self.is_dimmed and not self._force:\n logger.error(\"Could not click %s because it was dimmed\", repr(self))\n return\n sel.wait_for_element(self, timeout=5)\n return sel.click(self, no_custom_handler=True, wait_ajax=wait_ajax)\n\n def __str__(self):\n return self.locate()\n\n def __repr__(self):\n return \"{}({})\".format(type(self).__name__, str(repr(self._alt)))\n\n\nadd = FormButton(\"Add\")\nsave = FormButton(\"Save Changes\", dimmed_alt=\"Save\", ng_click=\"saveClicked()\")\nangular_save = FormButton(\"Save changes\", ng_click=\"saveClicked()\")\ncancel = FormButton(\"Cancel\")\ncancel_changes = FormButton(\"Cancel Changes\")\nsubmit = FormButton(\"Submit\")\nreset = FormButton(\"Reset Changes\", dimmed_alt=\"Reset\")\nvalidate = FormButton(\"Validate the credentials by logging into the Server\", dimmed_alt=\"Validate\")\nvalidate_short = FormButton(\"Validate the credentials\")\nvalidate_multi_host = FormButton(\"Validate the credentials by logging into the selected Host\")\nhost_provision_submit = FormButton(\"Submit this provisioning request\")\nhost_provision_cancel = FormButton(\"Cancel this provisioning request\")\nretrieve = FormButton(\"LDAP Group Lookup\")\n\n\n_stored_pw_script = '//a[contains(@id, \"change_stored_password\")]'\n_stored_pw_angular = \"//a[@ng-hide='bChangeStoredPassword']\"\n\n\ndef change_stored_password():\n if version.current_version() > '5.5':\n if sel.is_displayed(_stored_pw_script):\n sel.execute_script(\n sel.get_attribute(\n sel.element(_stored_pw_script), 'onClick'))\n sel.wait_for_ajax() # To play safe\n elif sel.is_displayed(_stored_pw_angular):\n sel.click(_stored_pw_angular)\n else:\n logger.info(\"Probably no creds\")\n\n\[email protected]((FormButton, bool))\ndef _fill_fb_bool(fb, b):\n if b:\n sel.click(fb)\n", "path": "cfme/web_ui/form_buttons.py"}], "after_files": [{"content": "\"\"\"This module unifies working with CRUD form buttons.\n\nWhenever you use Add, Save, Cancel, Reset button, use this module.\nYou can use it also for the other buttons with same shape like those CRUD ones.\n\"\"\"\nfrom selenium.common.exceptions import NoSuchElementException\nfrom xml.sax.saxutils import quoteattr\n\nfrom widgetastic.xpath import quote\n\nfrom cfme.fixtures import pytest_selenium as sel\nfrom cfme.web_ui import fill\nfrom utils import version\nfrom utils.log import logger\nfrom utils.pretty import Pretty\n\n\nclass FormButton(Pretty):\n \"\"\"This class represents the buttons usually located in forms or CRUD.\n\n Args:\n alt: The text from ``alt`` field of the image.\n dimmed_alt: In case the ``alt`` param is different in the dimmed variant of the button.\n force_click: Click always, even if it is dimmed. (Causes an error if not visible)\n partial_alt: Whether the alt matching should be only partial (``in``).\n ng_click: To match the angular buttons, you can use this to specify the contents of\n ``ng-click`` attributeh.\n \"\"\"\n pretty_attrs = ['_alt', '_dimmed_alt', '_force', '_partial', '_ng_click']\n\n PRIMARY = 'btn-primary'\n\n class Button:\n \"\"\"Holds pieces of the XPath to be assembled.\"\"\"\n TAG_TYPES = \"//a | //button | //img | //input\"\n TYPE_CONDITION = (\n \"(contains(@class, 'button') or contains(@class, 'btn') or contains(@src, 'button'))\"\n )\n DIMMED = \"(contains(@class, 'dimmed') \" \\\n \"or contains(@class, 'disabled') \" \\\n \"or contains(@class, 'btn-disabled'))\"\n NOT_DIMMED = \"not{}\".format(DIMMED)\n IS_DISPLAYED = (\n \"not(ancestor::*[contains(@style, 'display:none') \"\n \"or contains(@style, 'display: none')])\")\n ON_CURRENT_TAB = (\n \"not(ancestor::div[contains(@class, 'tab-pane') and not(contains(@class, 'active'))])\")\n\n def __init__(\n self, alt, dimmed_alt=None, force_click=False, partial_alt=False, ng_click=None,\n classes=None):\n self._alt = alt\n self._dimmed_alt = dimmed_alt\n self._force = force_click\n self._partial = partial_alt\n self._ng_click = ng_click\n self._classes = classes or []\n\n def alt_expr(self, dimmed=False):\n if self._partial:\n if self._ng_click is None:\n return (\n \"(contains(normalize-space(@alt), {alt}) or \"\n \"contains(normalize-space(text()), {alt}))\".format(\n alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt)))\n else:\n return (\n \"(contains(normalize-space(@alt), {alt}) or \"\n \"@ng-click={click} or \"\n \"contains(normalize-space(text()), {alt}))\".format(\n alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt),\n click=quoteattr(self._ng_click)))\n else:\n if self._ng_click is None:\n return (\n \"(normalize-space(@alt)={alt} or \"\n \"normalize-space(text())={alt})\".format(\n alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt)))\n else:\n return (\n \"(normalize-space(@alt)={alt} or \"\n \"@ng-click={click} or \"\n \"normalize-space(text())={alt})\".format(\n alt=quoteattr((self._dimmed_alt or self._alt) if dimmed else self._alt),\n click=quoteattr(self._ng_click)))\n\n def _format_generator(self, dimmed=False, include_dimmed_alt=False):\n \"\"\"Generates a dict that will be passed to the formatting strings.\"\"\"\n d = {}\n for key, value in self.Button.__dict__.iteritems():\n if not key.startswith(\"_\"):\n d[key] = value\n d[\"ALT_EXPR\"] = self.alt_expr(dimmed=dimmed)\n if include_dimmed_alt:\n d[\"DIMMED_ALT\"] = quoteattr(self._dimmed_alt or self._alt)\n if self._classes:\n d['CLASSES'] = 'and ({})'.format(\n ' and '.join('contains(@class, {})'.format(quote(kls)) for kls in self._classes))\n else:\n d['CLASSES'] = ''\n return d\n\n def locate(self):\n return (\n \"({TAG_TYPES})[{ALT_EXPR} and {NOT_DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} \"\n \"and {ON_CURRENT_TAB} {CLASSES}]\"\n .format(**self._format_generator(dimmed=False)))\n\n @property\n def is_dimmed(self):\n locator = (\n \"({TAG_TYPES})[{ALT_EXPR} and {DIMMED} and {TYPE_CONDITION} and {IS_DISPLAYED} \"\n \"and {ON_CURRENT_TAB} {CLASSES}]\"\n \"|\" # A bit different type of a button\n \"({TAG_TYPES})[normalize-space(.)={DIMMED_ALT} and {IS_DISPLAYED} and \"\n \"(@disabled='true' or contains(@class, 'btn-disabled')) and {ON_CURRENT_TAB}]\"\n .format(**self._format_generator(dimmed=True, include_dimmed_alt=True)))\n return sel.is_displayed(locator)\n\n @property\n def can_be_clicked(self):\n \"\"\"Whether the button is displayed, therefore clickable.\"\"\"\n try:\n return sel.is_displayed(self, move_to=True)\n except NoSuchElementException:\n return False\n\n def __call__(self, *args, **kwargs):\n \"\"\"For maintaining backward compatibility\"\"\"\n sel.click(self)\n\n def _custom_click_handler(self, wait_ajax):\n \"\"\"Handler called from pytest_selenium\"\"\"\n if self.is_dimmed and not self._force:\n logger.error(\"Could not click %s because it was dimmed\", repr(self))\n return\n sel.wait_for_element(self, timeout=5)\n return sel.click(self, no_custom_handler=True, wait_ajax=wait_ajax)\n\n def __str__(self):\n return self.locate()\n\n def __repr__(self):\n return \"{}({})\".format(type(self).__name__, str(repr(self._alt)))\n\n\nadd = FormButton(\"Add\")\nsave = FormButton(\"Save Changes\", dimmed_alt=\"Save\", ng_click=\"saveClicked()\")\nangular_save = FormButton(\"Save changes\", ng_click=\"saveClicked()\")\ncancel = FormButton(\"Cancel\")\ncancel_changes = FormButton(\"Cancel Changes\")\nsubmit = FormButton(\"Submit\")\nreset = FormButton(\"Reset Changes\", dimmed_alt=\"Reset\")\nvalidate = FormButton(\"Validate the credentials by logging into the Server\", dimmed_alt=\"Validate\")\nvalidate_short = FormButton(\"Validate the credentials\")\nvalidate_multi_host = FormButton(\"Validate the credentials by logging into the selected Host\")\nhost_provision_submit = FormButton(\"Submit this provisioning request\")\nhost_provision_cancel = FormButton(\"Cancel this provisioning request\")\nretrieve = FormButton(\"LDAP Group Lookup\")\n\n\n_stored_pw_script = '//a[contains(@id, \"change_stored_password\")]'\n_stored_pw_angular = \"//a[@ng-hide='bChangeStoredPassword']\"\n\n\ndef change_stored_password():\n if version.current_version() > '5.5':\n if sel.is_displayed(_stored_pw_script):\n sel.execute_script(\n sel.get_attribute(\n sel.element(_stored_pw_script), 'onClick'))\n sel.wait_for_ajax() # To play safe\n elif sel.is_displayed(_stored_pw_angular):\n sel.click(_stored_pw_angular)\n else:\n logger.info(\"Probably no creds\")\n\n\[email protected]((FormButton, bool))\ndef _fill_fb_bool(fb, b):\n if b:\n sel.click(fb)\n", "path": "cfme/web_ui/form_buttons.py"}]} | 2,451 | 681 |
gh_patches_debug_5880 | rasdani/github-patches | git_diff | quantumlib__Cirq-5247 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add validation for Kraus operators
Many of the methods involving noise/channels mention that the matrices making up a mixture or channel must satisfy \sum_{k=0}^{r-1} A_i^\dagger A_i = I. This is never enforced, which allows the user to simulate unphysical systems and get invalid density matrices (see example below).
Validation is easy enough (`assert np.sum([m.T @ m for m in object._channel_()`) but I'm not sure where to put this @Strilanc ? Ideally this will be called exactly once, before a call to `simulate`.
Code Example:
```
from typing import Iterable
import cirq
import numpy as np
class UnnormalizedChannel(cirq.GeneralizedAmplitudeDampingChannel):
"""Overwrite the correct amplitude damping channel with a bad one."""
def _channel_(self) -> Iterable[np.ndarray]:
p0 = 1
sqrt_g = np.sqrt(self._gamma)
return (
p0 * np.array([[1., 0.], [0., 1.]]),
p0 * np.array([[0., sqrt_g], [0., 0.]]),
)
def unnormalized_channel(gamma):
return UnnormalizedChannel(1.0, gamma)
q0 = cirq.LineQubit(0)
bad_noise_inst = unnormalized_channel(0.3)
c = cirq.Circuit.from_ops(cirq.X(q0), bad_noise_inst.on(q0))
dm = cirq.DensityMatrixSimulator().simulate(c).final_density_matrix
kraus_sum = np.sum(m.T @ m for m in bad_noise_inst._channel_())
print(np.allclose(kraus_sum, np.eye(2)))
print(np.isclose(np.trace(dm), 1.0))
```
which prints
```
>>> False
>>> False
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/ops/projector.py`
Content:
```
1 # pylint: disable=wrong-or-nonexistent-copyright-notice
2 import itertools
3 from typing import Any, Dict, Iterable, List, Mapping, Optional, Union
4
5 import numpy as np
6 from scipy.sparse import csr_matrix
7
8 from cirq import value
9 from cirq.ops import raw_types
10
11
12 def _check_qids_dimension(qids):
13 """A utility to check that we only have Qubits."""
14 for qid in qids:
15 if qid.dimension != 2:
16 raise ValueError(f"Only qubits are supported, but {qid} has dimension {qid.dimension}")
17
18
19 @value.value_equality(approximate=True)
20 class ProjectorString:
21 def __init__(
22 self, projector_dict: Dict[raw_types.Qid, int], coefficient: Union[int, float, complex] = 1
23 ):
24 """Contructor for ProjectorString
25
26 Args:
27 projector_dict: A python dictionary mapping from cirq.Qid to integers. A key value pair
28 represents the desired computational basis state for that qubit.
29 coefficient: Initial scalar coefficient. Defaults to 1.
30 """
31 _check_qids_dimension(projector_dict.keys())
32 self._projector_dict = projector_dict
33 self._coefficient = complex(coefficient)
34
35 @property
36 def projector_dict(self) -> Dict[raw_types.Qid, int]:
37 return self._projector_dict
38
39 @property
40 def coefficient(self) -> complex:
41 return self._coefficient
42
43 def matrix(self, projector_qids: Optional[Iterable[raw_types.Qid]] = None) -> csr_matrix:
44 """Returns the matrix of self in computational basis of qubits.
45
46 Args:
47 projector_qids: Ordered collection of qubits that determine the subspace
48 in which the matrix representation of the ProjectorString is to
49 be computed. Qbits absent from self.qubits are acted on by
50 the identity. Defaults to the qubits of the projector_dict.
51
52 Returns:
53 A sparse matrix that is the projection in the specified basis.
54 """
55 projector_qids = self._projector_dict.keys() if projector_qids is None else projector_qids
56 _check_qids_dimension(projector_qids)
57 idx_to_keep = [
58 [self._projector_dict[qid]] if qid in self._projector_dict else [0, 1]
59 for qid in projector_qids
60 ]
61
62 total_d = np.prod([qid.dimension for qid in projector_qids], dtype=np.int64)
63
64 ones_idx = []
65 for idx in itertools.product(*idx_to_keep):
66 d = total_d
67 kron_idx = 0
68 for i, qid in zip(idx, projector_qids):
69 d //= qid.dimension
70 kron_idx += i * d
71 ones_idx.append(kron_idx)
72
73 return csr_matrix(
74 ([self._coefficient] * len(ones_idx), (ones_idx, ones_idx)), shape=(total_d, total_d)
75 )
76
77 def _get_idx_to_keep(self, qid_map: Mapping[raw_types.Qid, int]):
78 num_qubits = len(qid_map)
79 idx_to_keep: List[Any] = [slice(0, 2)] * num_qubits
80 for q in self.projector_dict.keys():
81 idx_to_keep[qid_map[q]] = self.projector_dict[q]
82 return tuple(idx_to_keep)
83
84 def expectation_from_state_vector(
85 self, state_vector: np.ndarray, qid_map: Mapping[raw_types.Qid, int]
86 ) -> complex:
87 """Expectation of the projection from a state vector.
88
89 Computes the expectation value of this ProjectorString on the provided state vector.
90
91 Args:
92 state_vector: An array representing a valid state vector.
93 qid_map: A map from all qubits used in this ProjectorString to the
94 indices of the qubits that `state_vector` is defined over.
95
96 Returns:
97 The expectation value of the input state.
98 """
99 _check_qids_dimension(qid_map.keys())
100 num_qubits = len(qid_map)
101 index = self._get_idx_to_keep(qid_map)
102 return self._coefficient * np.sum(
103 np.abs(np.reshape(state_vector, (2,) * num_qubits)[index]) ** 2
104 )
105
106 def expectation_from_density_matrix(
107 self, state: np.ndarray, qid_map: Mapping[raw_types.Qid, int]
108 ) -> complex:
109 """Expectation of the projection from a density matrix.
110
111 Computes the expectation value of this ProjectorString on the provided state.
112
113 Args:
114 state: An array representing a valid density matrix.
115 qid_map: A map from all qubits used in this ProjectorString to the
116 indices of the qubits that `state_vector` is defined over.
117
118 Returns:
119 The expectation value of the input state.
120 """
121 _check_qids_dimension(qid_map.keys())
122 num_qubits = len(qid_map)
123 index = self._get_idx_to_keep(qid_map) * 2
124 result = np.reshape(state, (2,) * (2 * num_qubits))[index]
125 while any(result.shape):
126 result = np.trace(result, axis1=0, axis2=len(result.shape) // 2)
127 return self._coefficient * result
128
129 def __repr__(self) -> str:
130 return (
131 f"cirq.ProjectorString(projector_dict={self._projector_dict},"
132 + f"coefficient={self._coefficient})"
133 )
134
135 def _json_dict_(self) -> Dict[str, Any]:
136 return {
137 'projector_dict': list(self._projector_dict.items()),
138 'coefficient': self._coefficient,
139 }
140
141 @classmethod
142 def _from_json_dict_(cls, projector_dict, coefficient, **kwargs):
143 return cls(projector_dict=dict(projector_dict), coefficient=coefficient)
144
145 def _value_equality_values_(self) -> Any:
146 projector_dict = sorted(self._projector_dict.items())
147 return (tuple(projector_dict), self._coefficient)
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-core/cirq/ops/projector.py b/cirq-core/cirq/ops/projector.py
--- a/cirq-core/cirq/ops/projector.py
+++ b/cirq-core/cirq/ops/projector.py
@@ -21,7 +21,7 @@
def __init__(
self, projector_dict: Dict[raw_types.Qid, int], coefficient: Union[int, float, complex] = 1
):
- """Contructor for ProjectorString
+ """Constructor for ProjectorString
Args:
projector_dict: A python dictionary mapping from cirq.Qid to integers. A key value pair
| {"golden_diff": "diff --git a/cirq-core/cirq/ops/projector.py b/cirq-core/cirq/ops/projector.py\n--- a/cirq-core/cirq/ops/projector.py\n+++ b/cirq-core/cirq/ops/projector.py\n@@ -21,7 +21,7 @@\n def __init__(\n self, projector_dict: Dict[raw_types.Qid, int], coefficient: Union[int, float, complex] = 1\n ):\n- \"\"\"Contructor for ProjectorString\n+ \"\"\"Constructor for ProjectorString\n \n Args:\n projector_dict: A python dictionary mapping from cirq.Qid to integers. A key value pair\n", "issue": "Add validation for Kraus operators \nMany of the methods involving noise/channels mention that the matrices making up a mixture or channel must satisfy \\sum_{k=0}^{r-1} A_i^\\dagger A_i = I. This is never enforced, which allows the user to simulate unphysical systems and get invalid density matrices (see example below).\r\n\r\nValidation is easy enough (`assert np.sum([m.T @ m for m in object._channel_()`) but I'm not sure where to put this @Strilanc ? Ideally this will be called exactly once, before a call to `simulate`. \r\n\r\n\r\nCode Example:\r\n```\r\nfrom typing import Iterable\r\n\r\nimport cirq\r\nimport numpy as np\r\n\r\nclass UnnormalizedChannel(cirq.GeneralizedAmplitudeDampingChannel):\r\n \"\"\"Overwrite the correct amplitude damping channel with a bad one.\"\"\"\r\n def _channel_(self) -> Iterable[np.ndarray]:\r\n p0 = 1\r\n sqrt_g = np.sqrt(self._gamma)\r\n return (\r\n p0 * np.array([[1., 0.], [0., 1.]]),\r\n p0 * np.array([[0., sqrt_g], [0., 0.]]),\r\n )\r\n\r\n\r\ndef unnormalized_channel(gamma):\r\n return UnnormalizedChannel(1.0, gamma)\r\n\r\n\r\nq0 = cirq.LineQubit(0)\r\nbad_noise_inst = unnormalized_channel(0.3)\r\nc = cirq.Circuit.from_ops(cirq.X(q0), bad_noise_inst.on(q0))\r\ndm = cirq.DensityMatrixSimulator().simulate(c).final_density_matrix\r\nkraus_sum = np.sum(m.T @ m for m in bad_noise_inst._channel_())\r\nprint(np.allclose(kraus_sum, np.eye(2)))\r\nprint(np.isclose(np.trace(dm), 1.0))\r\n```\r\nwhich prints\r\n```\r\n>>> False\r\n>>> False\r\n```\n", "before_files": [{"content": "# pylint: disable=wrong-or-nonexistent-copyright-notice\nimport itertools\nfrom typing import Any, Dict, Iterable, List, Mapping, Optional, Union\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\nfrom cirq import value\nfrom cirq.ops import raw_types\n\n\ndef _check_qids_dimension(qids):\n \"\"\"A utility to check that we only have Qubits.\"\"\"\n for qid in qids:\n if qid.dimension != 2:\n raise ValueError(f\"Only qubits are supported, but {qid} has dimension {qid.dimension}\")\n\n\[email protected]_equality(approximate=True)\nclass ProjectorString:\n def __init__(\n self, projector_dict: Dict[raw_types.Qid, int], coefficient: Union[int, float, complex] = 1\n ):\n \"\"\"Contructor for ProjectorString\n\n Args:\n projector_dict: A python dictionary mapping from cirq.Qid to integers. A key value pair\n represents the desired computational basis state for that qubit.\n coefficient: Initial scalar coefficient. Defaults to 1.\n \"\"\"\n _check_qids_dimension(projector_dict.keys())\n self._projector_dict = projector_dict\n self._coefficient = complex(coefficient)\n\n @property\n def projector_dict(self) -> Dict[raw_types.Qid, int]:\n return self._projector_dict\n\n @property\n def coefficient(self) -> complex:\n return self._coefficient\n\n def matrix(self, projector_qids: Optional[Iterable[raw_types.Qid]] = None) -> csr_matrix:\n \"\"\"Returns the matrix of self in computational basis of qubits.\n\n Args:\n projector_qids: Ordered collection of qubits that determine the subspace\n in which the matrix representation of the ProjectorString is to\n be computed. Qbits absent from self.qubits are acted on by\n the identity. Defaults to the qubits of the projector_dict.\n\n Returns:\n A sparse matrix that is the projection in the specified basis.\n \"\"\"\n projector_qids = self._projector_dict.keys() if projector_qids is None else projector_qids\n _check_qids_dimension(projector_qids)\n idx_to_keep = [\n [self._projector_dict[qid]] if qid in self._projector_dict else [0, 1]\n for qid in projector_qids\n ]\n\n total_d = np.prod([qid.dimension for qid in projector_qids], dtype=np.int64)\n\n ones_idx = []\n for idx in itertools.product(*idx_to_keep):\n d = total_d\n kron_idx = 0\n for i, qid in zip(idx, projector_qids):\n d //= qid.dimension\n kron_idx += i * d\n ones_idx.append(kron_idx)\n\n return csr_matrix(\n ([self._coefficient] * len(ones_idx), (ones_idx, ones_idx)), shape=(total_d, total_d)\n )\n\n def _get_idx_to_keep(self, qid_map: Mapping[raw_types.Qid, int]):\n num_qubits = len(qid_map)\n idx_to_keep: List[Any] = [slice(0, 2)] * num_qubits\n for q in self.projector_dict.keys():\n idx_to_keep[qid_map[q]] = self.projector_dict[q]\n return tuple(idx_to_keep)\n\n def expectation_from_state_vector(\n self, state_vector: np.ndarray, qid_map: Mapping[raw_types.Qid, int]\n ) -> complex:\n \"\"\"Expectation of the projection from a state vector.\n\n Computes the expectation value of this ProjectorString on the provided state vector.\n\n Args:\n state_vector: An array representing a valid state vector.\n qid_map: A map from all qubits used in this ProjectorString to the\n indices of the qubits that `state_vector` is defined over.\n\n Returns:\n The expectation value of the input state.\n \"\"\"\n _check_qids_dimension(qid_map.keys())\n num_qubits = len(qid_map)\n index = self._get_idx_to_keep(qid_map)\n return self._coefficient * np.sum(\n np.abs(np.reshape(state_vector, (2,) * num_qubits)[index]) ** 2\n )\n\n def expectation_from_density_matrix(\n self, state: np.ndarray, qid_map: Mapping[raw_types.Qid, int]\n ) -> complex:\n \"\"\"Expectation of the projection from a density matrix.\n\n Computes the expectation value of this ProjectorString on the provided state.\n\n Args:\n state: An array representing a valid density matrix.\n qid_map: A map from all qubits used in this ProjectorString to the\n indices of the qubits that `state_vector` is defined over.\n\n Returns:\n The expectation value of the input state.\n \"\"\"\n _check_qids_dimension(qid_map.keys())\n num_qubits = len(qid_map)\n index = self._get_idx_to_keep(qid_map) * 2\n result = np.reshape(state, (2,) * (2 * num_qubits))[index]\n while any(result.shape):\n result = np.trace(result, axis1=0, axis2=len(result.shape) // 2)\n return self._coefficient * result\n\n def __repr__(self) -> str:\n return (\n f\"cirq.ProjectorString(projector_dict={self._projector_dict},\"\n + f\"coefficient={self._coefficient})\"\n )\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'projector_dict': list(self._projector_dict.items()),\n 'coefficient': self._coefficient,\n }\n\n @classmethod\n def _from_json_dict_(cls, projector_dict, coefficient, **kwargs):\n return cls(projector_dict=dict(projector_dict), coefficient=coefficient)\n\n def _value_equality_values_(self) -> Any:\n projector_dict = sorted(self._projector_dict.items())\n return (tuple(projector_dict), self._coefficient)\n", "path": "cirq-core/cirq/ops/projector.py"}], "after_files": [{"content": "# pylint: disable=wrong-or-nonexistent-copyright-notice\nimport itertools\nfrom typing import Any, Dict, Iterable, List, Mapping, Optional, Union\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\nfrom cirq import value\nfrom cirq.ops import raw_types\n\n\ndef _check_qids_dimension(qids):\n \"\"\"A utility to check that we only have Qubits.\"\"\"\n for qid in qids:\n if qid.dimension != 2:\n raise ValueError(f\"Only qubits are supported, but {qid} has dimension {qid.dimension}\")\n\n\[email protected]_equality(approximate=True)\nclass ProjectorString:\n def __init__(\n self, projector_dict: Dict[raw_types.Qid, int], coefficient: Union[int, float, complex] = 1\n ):\n \"\"\"Constructor for ProjectorString\n\n Args:\n projector_dict: A python dictionary mapping from cirq.Qid to integers. A key value pair\n represents the desired computational basis state for that qubit.\n coefficient: Initial scalar coefficient. Defaults to 1.\n \"\"\"\n _check_qids_dimension(projector_dict.keys())\n self._projector_dict = projector_dict\n self._coefficient = complex(coefficient)\n\n @property\n def projector_dict(self) -> Dict[raw_types.Qid, int]:\n return self._projector_dict\n\n @property\n def coefficient(self) -> complex:\n return self._coefficient\n\n def matrix(self, projector_qids: Optional[Iterable[raw_types.Qid]] = None) -> csr_matrix:\n \"\"\"Returns the matrix of self in computational basis of qubits.\n\n Args:\n projector_qids: Ordered collection of qubits that determine the subspace\n in which the matrix representation of the ProjectorString is to\n be computed. Qbits absent from self.qubits are acted on by\n the identity. Defaults to the qubits of the projector_dict.\n\n Returns:\n A sparse matrix that is the projection in the specified basis.\n \"\"\"\n projector_qids = self._projector_dict.keys() if projector_qids is None else projector_qids\n _check_qids_dimension(projector_qids)\n idx_to_keep = [\n [self._projector_dict[qid]] if qid in self._projector_dict else [0, 1]\n for qid in projector_qids\n ]\n\n total_d = np.prod([qid.dimension for qid in projector_qids], dtype=np.int64)\n\n ones_idx = []\n for idx in itertools.product(*idx_to_keep):\n d = total_d\n kron_idx = 0\n for i, qid in zip(idx, projector_qids):\n d //= qid.dimension\n kron_idx += i * d\n ones_idx.append(kron_idx)\n\n return csr_matrix(\n ([self._coefficient] * len(ones_idx), (ones_idx, ones_idx)), shape=(total_d, total_d)\n )\n\n def _get_idx_to_keep(self, qid_map: Mapping[raw_types.Qid, int]):\n num_qubits = len(qid_map)\n idx_to_keep: List[Any] = [slice(0, 2)] * num_qubits\n for q in self.projector_dict.keys():\n idx_to_keep[qid_map[q]] = self.projector_dict[q]\n return tuple(idx_to_keep)\n\n def expectation_from_state_vector(\n self, state_vector: np.ndarray, qid_map: Mapping[raw_types.Qid, int]\n ) -> complex:\n \"\"\"Expectation of the projection from a state vector.\n\n Computes the expectation value of this ProjectorString on the provided state vector.\n\n Args:\n state_vector: An array representing a valid state vector.\n qid_map: A map from all qubits used in this ProjectorString to the\n indices of the qubits that `state_vector` is defined over.\n\n Returns:\n The expectation value of the input state.\n \"\"\"\n _check_qids_dimension(qid_map.keys())\n num_qubits = len(qid_map)\n index = self._get_idx_to_keep(qid_map)\n return self._coefficient * np.sum(\n np.abs(np.reshape(state_vector, (2,) * num_qubits)[index]) ** 2\n )\n\n def expectation_from_density_matrix(\n self, state: np.ndarray, qid_map: Mapping[raw_types.Qid, int]\n ) -> complex:\n \"\"\"Expectation of the projection from a density matrix.\n\n Computes the expectation value of this ProjectorString on the provided state.\n\n Args:\n state: An array representing a valid density matrix.\n qid_map: A map from all qubits used in this ProjectorString to the\n indices of the qubits that `state_vector` is defined over.\n\n Returns:\n The expectation value of the input state.\n \"\"\"\n _check_qids_dimension(qid_map.keys())\n num_qubits = len(qid_map)\n index = self._get_idx_to_keep(qid_map) * 2\n result = np.reshape(state, (2,) * (2 * num_qubits))[index]\n while any(result.shape):\n result = np.trace(result, axis1=0, axis2=len(result.shape) // 2)\n return self._coefficient * result\n\n def __repr__(self) -> str:\n return (\n f\"cirq.ProjectorString(projector_dict={self._projector_dict},\"\n + f\"coefficient={self._coefficient})\"\n )\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'projector_dict': list(self._projector_dict.items()),\n 'coefficient': self._coefficient,\n }\n\n @classmethod\n def _from_json_dict_(cls, projector_dict, coefficient, **kwargs):\n return cls(projector_dict=dict(projector_dict), coefficient=coefficient)\n\n def _value_equality_values_(self) -> Any:\n projector_dict = sorted(self._projector_dict.items())\n return (tuple(projector_dict), self._coefficient)\n", "path": "cirq-core/cirq/ops/projector.py"}]} | 2,313 | 142 |
gh_patches_debug_27147 | rasdani/github-patches | git_diff | scverse__scanpy-133 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tl.score_genes does not work with one gene given for scoring
Hi,
I have found that using `sc.api.tl.score_genes()` gives the following error if I input a single gene as gene list:
```
computing score 'score'
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-27-526dfa387800> in <module>()
----> 1 sc.tl.score_genes(adata=adata,gene_list= genes)
~/Documents/Python/scanpy/scanpy/tools/score_genes.py in score_genes(adata, gene_list, ctrl_size, gene_pool, n_bins, score_name, random_state, copy)
96 gene_list = list(gene_list)
97
---> 98 score = np.mean(adata[:, gene_list].X, axis=1) - np.mean(adata[:, control_genes].X, axis=1)
99 adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)
100
~/miniconda3/lib/python3.6/site-packages/numpy/core/fromnumeric.py in mean(a, axis, dtype, out, keepdims)
2904 pass
2905 else:
-> 2906 return mean(axis=axis, dtype=dtype, out=out, **kwargs)
2907
2908 return _methods._mean(a, axis=axis, dtype=dtype,
~/miniconda3/lib/python3.6/site-packages/numpy/core/_methods.py in _mean(a, axis, dtype, out, keepdims)
55
56 is_float16_result = False
---> 57 rcount = _count_reduce_items(arr, axis)
58 # Make this warning show up first
59 if rcount == 0:
~/miniconda3/lib/python3.6/site-packages/numpy/core/_methods.py in _count_reduce_items(arr, axis)
48 items = 1
49 for ax in axis:
---> 50 items *= arr.shape[ax]
51 return items
52
IndexError: tuple index out of range
```
I suggest that you include a check for the length of the input `gene_list`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/tools/score_genes.py`
Content:
```
1 """Calculate scores based on the expression of gene lists.
2 """
3
4 import numpy as np
5 import pandas as pd
6 import scipy.sparse
7 from .. import settings
8 from .. import logging as logg
9
10
11 def score_genes(
12 adata,
13 gene_list,
14 ctrl_size=50,
15 gene_pool=None,
16 n_bins=25,
17 score_name='score',
18 random_state=0,
19 copy=False): # we use the scikit-learn convention of calling the seed "random_state"
20 """Score a set of genes [Satija15]_.
21
22 The score is the average expression of a set of genes subtracted with the
23 average expression of a reference set of genes. The reference set is
24 randomly sampled from the `gene_pool` for each binned expression value.
25
26 This reproduces the approach in Seurat [Satija15]_ and has been implemented
27 for Scanpy by Davide Cittaro.
28
29 Parameters
30 ----------
31 adata : :class:`~scanpy.api.AnnData`
32 The annotated data matrix.
33 gene_list : iterable
34 The list of gene names used for score calculation.
35 ctrl_size : `int`, optional (default: 50)
36 Number of reference genes to be sampled. If `len(gene_list)` is not too
37 low, you can set `ctrl_size=len(gene_list)`.
38 gene_pool : `list` or `None`, optional (default: `None`)
39 Genes for sampling the reference set. Default is all genes.
40 n_bins : `int`, optional (default: 25)
41 Number of expression level bins for sampling.
42 score_name : `str`, optional (default: `'score'`)
43 Name of the field to be added in `.obs`.
44 random_state : `int`, optional (default: 0)
45 The random seed for sampling.
46 copy : `bool`, optional (default: `False`)
47 Copy `adata` or modify it inplace.
48
49 Returns
50 -------
51 Depending on `copy`, returns or updates `adata` with an additional field
52 `score_name`.
53
54 Examples
55 --------
56 See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`_.
57 """
58 logg.info('computing score \'{}\''.format(score_name), r=True)
59 adata = adata.copy() if copy else adata
60
61 if random_state:
62 np.random.seed(random_state)
63
64 gene_list = set([x for x in gene_list if x in adata.var_names])
65
66 if not gene_pool:
67 gene_pool = list(adata.var_names)
68 else:
69 gene_pool = [x for x in gene_pool if x in adata.var_names]
70
71 # Trying here to match the Seurat approach in scoring cells.
72 # Basically we need to compare genes against random genes in a matched
73 # interval of expression.
74
75 # TODO: this densifies the whole data matrix for `gene_pool`
76 if scipy.sparse.issparse(adata.X):
77 obs_avg = pd.Series(
78 np.nanmean(
79 adata[:, gene_pool].X.toarray(), axis=0), index=gene_pool) # average expression of genes
80 else:
81 obs_avg = pd.Series(
82 np.nanmean(adata[:, gene_pool].X, axis=0), index=gene_pool) # average expression of genes
83
84 n_items = int(np.round(len(obs_avg) / (n_bins - 1)))
85 obs_cut = obs_avg.rank(method='min') // n_items
86 control_genes = set()
87
88 # now pick `ctrl_size` genes from every cut
89 for cut in np.unique(obs_cut.loc[gene_list]):
90 r_genes = np.array(obs_cut[obs_cut == cut].index)
91 np.random.shuffle(r_genes)
92 control_genes.update(set(r_genes[:ctrl_size])) # uses full r_genes if ctrl_size > len(r_genes)
93
94 # To index, we need a list - indexing implies an order.
95 control_genes = list(control_genes - gene_list)
96 gene_list = list(gene_list)
97
98 score = np.mean(adata[:, gene_list].X, axis=1) - np.mean(adata[:, control_genes].X, axis=1)
99 adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)
100
101 logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
102 logg.hint('added\n'
103 ' \'{}\', score of gene set (adata.obs)'.format(score_name))
104 return adata if copy else None
105
106
107 def score_genes_cell_cycle(
108 adata,
109 s_genes,
110 g2m_genes,
111 copy=False,
112 **kwargs):
113 """Score cell cycle genes [Satija15]_.
114
115 Given two lists of genes associated to S phase and G2M phase, calculates
116 scores and assigns a cell cycle phase (G1, S or G2M). See
117 :func:`~scanpy.api.score_genes` for more explanation.
118
119 Parameters
120 ----------
121 adata : :class:`~scanpy.api.AnnData`
122 The annotated data matrix.
123 s_genes : `list`
124 List of genes associated with S phase.
125 g2m_genes : `list`
126 List of genes associated with G2M phase.
127 copy : `bool`, optional (default: `False`)
128 Copy `adata` or modify it inplace.
129 **kwargs : optional keyword arguments
130 Are passed to :func:`~scanpy.api.score_genes`. `ctrl_size` is not
131 possible, as it's set as `min(len(s_genes), len(g2m_genes))`.
132
133 Returns
134 -------
135 Depending on `copy`, returns or updates `adata` with the following fields.
136
137 S_score : `adata.obs`, dtype `object`
138 The score for S phase for each cell.
139 G2M_score : `adata.obs`, dtype `object`
140 The score for G2M phase for each cell.
141 phase : `adata.obs`, dtype `object`
142 The cell cycle phase (`S`, `G2M` or `G1`) for each cell.
143
144 See also
145 --------
146 score_genes
147
148 Examples
149 --------
150 See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`_.
151 """
152 logg.info('calculating cell cycle phase')
153
154 adata = adata.copy() if copy else adata
155 ctrl_size = min(len(s_genes), len(g2m_genes))
156 # add s-score
157 score_genes(adata, gene_list=s_genes, score_name='S_score', ctrl_size=ctrl_size, **kwargs)
158 # add g2m-score
159 score_genes(adata, gene_list=g2m_genes, score_name='G2M_score', ctrl_size=ctrl_size, **kwargs)
160 scores = adata.obs[['S_score', 'G2M_score']]
161
162 # default phase is S
163 phase = pd.Series('S', index=scores.index)
164
165 # if G2M is higher than S, it's G2M
166 phase[scores.G2M_score > scores.S_score] = 'G2M'
167
168 # if all scores are negative, it's G1...
169 phase[np.all(scores < 0, axis=1)] = 'G1'
170
171 adata.obs['phase'] = phase
172 logg.hint(' \'phase\', cell cycle phase (adata.obs)')
173 return adata if copy else None
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scanpy/tools/score_genes.py b/scanpy/tools/score_genes.py
--- a/scanpy/tools/score_genes.py
+++ b/scanpy/tools/score_genes.py
@@ -81,6 +81,9 @@
obs_avg = pd.Series(
np.nanmean(adata[:, gene_pool].X, axis=0), index=gene_pool) # average expression of genes
+ obs_avg = obs_avg[np.isfinite(obs_avg)] # Sometimes (and I don't know how) missing data may be there, with nansfor
+ gene_pool = list(obs_avg.index)
+
n_items = int(np.round(len(obs_avg) / (n_bins - 1)))
obs_cut = obs_avg.rank(method='min') // n_items
control_genes = set()
@@ -95,7 +98,16 @@
control_genes = list(control_genes - gene_list)
gene_list = list(gene_list)
- score = np.mean(adata[:, gene_list].X, axis=1) - np.mean(adata[:, control_genes].X, axis=1)
+ if len(gene_list) == 0:
+ # We shouldn't even get here, but just in case
+ logg.hint('could not add \n'
+ ' \'{}\', score of gene set (adata.obs)'.format(score_name))
+ return adata if copy else None
+ elif len(gene_list) == 1:
+ score = np.nanmean(adata[:, gene_list].X) - np.nanmean(adata[:, control_genes].X)
+ else:
+ score = np.nanmean(adata[:, gene_list].X, axis=1) - np.nanmean(adata[:, control_genes].X, axis=1)
+
adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
| {"golden_diff": "diff --git a/scanpy/tools/score_genes.py b/scanpy/tools/score_genes.py\n--- a/scanpy/tools/score_genes.py\n+++ b/scanpy/tools/score_genes.py\n@@ -81,6 +81,9 @@\n obs_avg = pd.Series(\n np.nanmean(adata[:, gene_pool].X, axis=0), index=gene_pool) # average expression of genes\n \n+ obs_avg = obs_avg[np.isfinite(obs_avg)] # Sometimes (and I don't know how) missing data may be there, with nansfor\n+ gene_pool = list(obs_avg.index)\n+\n n_items = int(np.round(len(obs_avg) / (n_bins - 1)))\n obs_cut = obs_avg.rank(method='min') // n_items\n control_genes = set()\n@@ -95,7 +98,16 @@\n control_genes = list(control_genes - gene_list)\n gene_list = list(gene_list)\n \n- score = np.mean(adata[:, gene_list].X, axis=1) - np.mean(adata[:, control_genes].X, axis=1)\n+ if len(gene_list) == 0:\n+ # We shouldn't even get here, but just in case\n+ logg.hint('could not add \\n'\n+ ' \\'{}\\', score of gene set (adata.obs)'.format(score_name))\n+ return adata if copy else None\n+ elif len(gene_list) == 1:\n+ score = np.nanmean(adata[:, gene_list].X) - np.nanmean(adata[:, control_genes].X)\n+ else:\n+ score = np.nanmean(adata[:, gene_list].X, axis=1) - np.nanmean(adata[:, control_genes].X, axis=1)\n+\n adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)\n \n logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\\n')\n", "issue": "tl.score_genes does not work with one gene given for scoring\nHi,\r\n\r\nI have found that using `sc.api.tl.score_genes()` gives the following error if I input a single gene as gene list:\r\n```\r\ncomputing score 'score'\r\n---------------------------------------------------------------------------\r\nIndexError Traceback (most recent call last)\r\n<ipython-input-27-526dfa387800> in <module>()\r\n----> 1 sc.tl.score_genes(adata=adata,gene_list= genes)\r\n\r\n~/Documents/Python/scanpy/scanpy/tools/score_genes.py in score_genes(adata, gene_list, ctrl_size, gene_pool, n_bins, score_name, random_state, copy)\r\n 96 gene_list = list(gene_list)\r\n 97 \r\n---> 98 score = np.mean(adata[:, gene_list].X, axis=1) - np.mean(adata[:, control_genes].X, axis=1)\r\n 99 adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)\r\n 100 \r\n\r\n~/miniconda3/lib/python3.6/site-packages/numpy/core/fromnumeric.py in mean(a, axis, dtype, out, keepdims)\r\n 2904 pass\r\n 2905 else:\r\n-> 2906 return mean(axis=axis, dtype=dtype, out=out, **kwargs)\r\n 2907 \r\n 2908 return _methods._mean(a, axis=axis, dtype=dtype,\r\n\r\n~/miniconda3/lib/python3.6/site-packages/numpy/core/_methods.py in _mean(a, axis, dtype, out, keepdims)\r\n 55 \r\n 56 is_float16_result = False\r\n---> 57 rcount = _count_reduce_items(arr, axis)\r\n 58 # Make this warning show up first\r\n 59 if rcount == 0:\r\n\r\n~/miniconda3/lib/python3.6/site-packages/numpy/core/_methods.py in _count_reduce_items(arr, axis)\r\n 48 items = 1\r\n 49 for ax in axis:\r\n---> 50 items *= arr.shape[ax]\r\n 51 return items\r\n 52 \r\n\r\nIndexError: tuple index out of range\r\n```\r\nI suggest that you include a check for the length of the input `gene_list`. \n", "before_files": [{"content": "\"\"\"Calculate scores based on the expression of gene lists.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse\nfrom .. import settings\nfrom .. import logging as logg\n\n\ndef score_genes(\n adata,\n gene_list,\n ctrl_size=50,\n gene_pool=None,\n n_bins=25,\n score_name='score',\n random_state=0,\n copy=False): # we use the scikit-learn convention of calling the seed \"random_state\"\n \"\"\"Score a set of genes [Satija15]_.\n\n The score is the average expression of a set of genes subtracted with the\n average expression of a reference set of genes. The reference set is\n randomly sampled from the `gene_pool` for each binned expression value.\n\n This reproduces the approach in Seurat [Satija15]_ and has been implemented\n for Scanpy by Davide Cittaro.\n\n Parameters\n ----------\n adata : :class:`~scanpy.api.AnnData`\n The annotated data matrix.\n gene_list : iterable\n The list of gene names used for score calculation.\n ctrl_size : `int`, optional (default: 50)\n Number of reference genes to be sampled. If `len(gene_list)` is not too\n low, you can set `ctrl_size=len(gene_list)`.\n gene_pool : `list` or `None`, optional (default: `None`)\n Genes for sampling the reference set. Default is all genes.\n n_bins : `int`, optional (default: 25)\n Number of expression level bins for sampling.\n score_name : `str`, optional (default: `'score'`)\n Name of the field to be added in `.obs`.\n random_state : `int`, optional (default: 0)\n The random seed for sampling.\n copy : `bool`, optional (default: `False`)\n Copy `adata` or modify it inplace.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with an additional field\n `score_name`.\n\n Examples\n --------\n See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`_.\n \"\"\"\n logg.info('computing score \\'{}\\''.format(score_name), r=True)\n adata = adata.copy() if copy else adata\n\n if random_state:\n np.random.seed(random_state)\n\n gene_list = set([x for x in gene_list if x in adata.var_names])\n\n if not gene_pool:\n gene_pool = list(adata.var_names)\n else:\n gene_pool = [x for x in gene_pool if x in adata.var_names]\n\n # Trying here to match the Seurat approach in scoring cells.\n # Basically we need to compare genes against random genes in a matched\n # interval of expression.\n\n # TODO: this densifies the whole data matrix for `gene_pool`\n if scipy.sparse.issparse(adata.X):\n obs_avg = pd.Series(\n np.nanmean(\n adata[:, gene_pool].X.toarray(), axis=0), index=gene_pool) # average expression of genes\n else:\n obs_avg = pd.Series(\n np.nanmean(adata[:, gene_pool].X, axis=0), index=gene_pool) # average expression of genes\n\n n_items = int(np.round(len(obs_avg) / (n_bins - 1)))\n obs_cut = obs_avg.rank(method='min') // n_items\n control_genes = set()\n\n # now pick `ctrl_size` genes from every cut\n for cut in np.unique(obs_cut.loc[gene_list]):\n r_genes = np.array(obs_cut[obs_cut == cut].index)\n np.random.shuffle(r_genes)\n control_genes.update(set(r_genes[:ctrl_size])) # uses full r_genes if ctrl_size > len(r_genes)\n\n # To index, we need a list - indexing implies an order.\n control_genes = list(control_genes - gene_list)\n gene_list = list(gene_list)\n\n score = np.mean(adata[:, gene_list].X, axis=1) - np.mean(adata[:, control_genes].X, axis=1)\n adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)\n\n logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\\n')\n logg.hint('added\\n'\n ' \\'{}\\', score of gene set (adata.obs)'.format(score_name))\n return adata if copy else None\n\n\ndef score_genes_cell_cycle(\n adata,\n s_genes,\n g2m_genes,\n copy=False,\n **kwargs):\n \"\"\"Score cell cycle genes [Satija15]_.\n\n Given two lists of genes associated to S phase and G2M phase, calculates\n scores and assigns a cell cycle phase (G1, S or G2M). See\n :func:`~scanpy.api.score_genes` for more explanation.\n\n Parameters\n ----------\n adata : :class:`~scanpy.api.AnnData`\n The annotated data matrix.\n s_genes : `list`\n List of genes associated with S phase.\n g2m_genes : `list`\n List of genes associated with G2M phase.\n copy : `bool`, optional (default: `False`)\n Copy `adata` or modify it inplace.\n **kwargs : optional keyword arguments\n Are passed to :func:`~scanpy.api.score_genes`. `ctrl_size` is not\n possible, as it's set as `min(len(s_genes), len(g2m_genes))`.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with the following fields.\n\n S_score : `adata.obs`, dtype `object`\n The score for S phase for each cell.\n G2M_score : `adata.obs`, dtype `object`\n The score for G2M phase for each cell.\n phase : `adata.obs`, dtype `object`\n The cell cycle phase (`S`, `G2M` or `G1`) for each cell.\n\n See also\n --------\n score_genes\n\n Examples\n --------\n See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`_.\n \"\"\"\n logg.info('calculating cell cycle phase')\n\n adata = adata.copy() if copy else adata\n ctrl_size = min(len(s_genes), len(g2m_genes))\n # add s-score\n score_genes(adata, gene_list=s_genes, score_name='S_score', ctrl_size=ctrl_size, **kwargs)\n # add g2m-score\n score_genes(adata, gene_list=g2m_genes, score_name='G2M_score', ctrl_size=ctrl_size, **kwargs)\n scores = adata.obs[['S_score', 'G2M_score']]\n\n # default phase is S\n phase = pd.Series('S', index=scores.index)\n\n # if G2M is higher than S, it's G2M\n phase[scores.G2M_score > scores.S_score] = 'G2M'\n\n # if all scores are negative, it's G1...\n phase[np.all(scores < 0, axis=1)] = 'G1'\n\n adata.obs['phase'] = phase\n logg.hint(' \\'phase\\', cell cycle phase (adata.obs)')\n return adata if copy else None\n", "path": "scanpy/tools/score_genes.py"}], "after_files": [{"content": "\"\"\"Calculate scores based on the expression of gene lists.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse\nfrom .. import settings\nfrom .. import logging as logg\n\n\ndef score_genes(\n adata,\n gene_list,\n ctrl_size=50,\n gene_pool=None,\n n_bins=25,\n score_name='score',\n random_state=0,\n copy=False): # we use the scikit-learn convention of calling the seed \"random_state\"\n \"\"\"Score a set of genes [Satija15]_.\n\n The score is the average expression of a set of genes subtracted with the\n average expression of a reference set of genes. The reference set is\n randomly sampled from the `gene_pool` for each binned expression value.\n\n This reproduces the approach in Seurat [Satija15]_ and has been implemented\n for Scanpy by Davide Cittaro.\n\n Parameters\n ----------\n adata : :class:`~scanpy.api.AnnData`\n The annotated data matrix.\n gene_list : iterable\n The list of gene names used for score calculation.\n ctrl_size : `int`, optional (default: 50)\n Number of reference genes to be sampled. If `len(gene_list)` is not too\n low, you can set `ctrl_size=len(gene_list)`.\n gene_pool : `list` or `None`, optional (default: `None`)\n Genes for sampling the reference set. Default is all genes.\n n_bins : `int`, optional (default: 25)\n Number of expression level bins for sampling.\n score_name : `str`, optional (default: `'score'`)\n Name of the field to be added in `.obs`.\n random_state : `int`, optional (default: 0)\n The random seed for sampling.\n copy : `bool`, optional (default: `False`)\n Copy `adata` or modify it inplace.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with an additional field\n `score_name`.\n\n Examples\n --------\n See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`_.\n \"\"\"\n logg.info('computing score \\'{}\\''.format(score_name), r=True)\n adata = adata.copy() if copy else adata\n\n if random_state:\n np.random.seed(random_state)\n\n gene_list = set([x for x in gene_list if x in adata.var_names])\n\n if not gene_pool:\n gene_pool = list(adata.var_names)\n else:\n gene_pool = [x for x in gene_pool if x in adata.var_names]\n\n # Trying here to match the Seurat approach in scoring cells.\n # Basically we need to compare genes against random genes in a matched\n # interval of expression.\n\n # TODO: this densifies the whole data matrix for `gene_pool`\n if scipy.sparse.issparse(adata.X):\n obs_avg = pd.Series(\n np.nanmean(\n adata[:, gene_pool].X.toarray(), axis=0), index=gene_pool) # average expression of genes\n else:\n obs_avg = pd.Series(\n np.nanmean(adata[:, gene_pool].X, axis=0), index=gene_pool) # average expression of genes\n\n obs_avg = obs_avg[np.isfinite(obs_avg)] # Sometimes (and I don't know how) missing data may be there, with nansfor\n gene_pool = list(obs_avg.index)\n\n n_items = int(np.round(len(obs_avg) / (n_bins - 1)))\n obs_cut = obs_avg.rank(method='min') // n_items\n control_genes = set()\n\n # now pick `ctrl_size` genes from every cut\n for cut in np.unique(obs_cut.loc[gene_list]):\n r_genes = np.array(obs_cut[obs_cut == cut].index)\n np.random.shuffle(r_genes)\n control_genes.update(set(r_genes[:ctrl_size])) # uses full r_genes if ctrl_size > len(r_genes)\n\n # To index, we need a list - indexing implies an order.\n control_genes = list(control_genes - gene_list)\n gene_list = list(gene_list)\n\n if len(gene_list) == 0:\n # We shouldn't even get here, but just in case\n logg.hint('could not add \\n'\n ' \\'{}\\', score of gene set (adata.obs)'.format(score_name))\n return adata if copy else None\n elif len(gene_list) == 1:\n score = np.nanmean(adata[:, gene_list].X) - np.nanmean(adata[:, control_genes].X)\n else:\n score = np.nanmean(adata[:, gene_list].X, axis=1) - np.nanmean(adata[:, control_genes].X, axis=1)\n\n adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)\n\n logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\\n')\n logg.hint('added\\n'\n ' \\'{}\\', score of gene set (adata.obs)'.format(score_name))\n return adata if copy else None\n\n\ndef score_genes_cell_cycle(\n adata,\n s_genes,\n g2m_genes,\n copy=False,\n **kwargs):\n \"\"\"Score cell cycle genes [Satija15]_.\n\n Given two lists of genes associated to S phase and G2M phase, calculates\n scores and assigns a cell cycle phase (G1, S or G2M). See\n :func:`~scanpy.api.score_genes` for more explanation.\n\n Parameters\n ----------\n adata : :class:`~scanpy.api.AnnData`\n The annotated data matrix.\n s_genes : `list`\n List of genes associated with S phase.\n g2m_genes : `list`\n List of genes associated with G2M phase.\n copy : `bool`, optional (default: `False`)\n Copy `adata` or modify it inplace.\n **kwargs : optional keyword arguments\n Are passed to :func:`~scanpy.api.score_genes`. `ctrl_size` is not\n possible, as it's set as `min(len(s_genes), len(g2m_genes))`.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with the following fields.\n\n S_score : `adata.obs`, dtype `object`\n The score for S phase for each cell.\n G2M_score : `adata.obs`, dtype `object`\n The score for G2M phase for each cell.\n phase : `adata.obs`, dtype `object`\n The cell cycle phase (`S`, `G2M` or `G1`) for each cell.\n\n See also\n --------\n score_genes\n\n Examples\n --------\n See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`_.\n \"\"\"\n logg.info('calculating cell cycle phase')\n\n adata = adata.copy() if copy else adata\n ctrl_size = min(len(s_genes), len(g2m_genes))\n # add s-score\n score_genes(adata, gene_list=s_genes, score_name='S_score', ctrl_size=ctrl_size, **kwargs)\n # add g2m-score\n score_genes(adata, gene_list=g2m_genes, score_name='G2M_score', ctrl_size=ctrl_size, **kwargs)\n scores = adata.obs[['S_score', 'G2M_score']]\n\n # default phase is S\n phase = pd.Series('S', index=scores.index)\n\n # if G2M is higher than S, it's G2M\n phase[scores.G2M_score > scores.S_score] = 'G2M'\n\n # if all scores are negative, it's G1...\n phase[np.all(scores < 0, axis=1)] = 'G1'\n\n adata.obs['phase'] = phase\n logg.hint(' \\'phase\\', cell cycle phase (adata.obs)')\n return adata if copy else None\n", "path": "scanpy/tools/score_genes.py"}]} | 2,858 | 442 |
gh_patches_debug_8635 | rasdani/github-patches | git_diff | sktime__sktime-2607 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] UnobservedComponents throws error during predict() call, when wrapped under TransformedTargetForecaster and ForecastingPipeline
**Describe the bug**
> Initially identified issue:
_New version of sktime throws error for UnobservedComponents when wrapped under ForecastingGridSearchCV with TransformedTargetForecaster pipe._
> Updated description:
From sktime v.0.11.1 onwards untill the current version of main, UnobservedComponents throws error during Predict method call when it is piped under TransformedTargetForecaster().
_The same construct works with all the other models which I could test for example ARIMA, AutoETS etc. So the issue is how TransformedTargetForecaster calls predict and the cascading of the same method to UnobservedComponents class._
**To Reproduce**
The code example was taken from documentation of ForecastingGridSearchCV for advanced example. The only thing changed was calling UnobservedComponents instead of ExponentialSmoothing at the end of param_grid argument in gscv.
<!--
Add a Minimal, Complete, and Verifiable example (for more details, see e.g. https://stackoverflow.com/help/mcve
If the code is too long, feel free to put it in a public gist and link it in the issue: https://gist.github.com
-->
```python
from sktime.datasets import load_shampoo_sales
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.model_selection import ExpandingWindowSplitter
from sktime.forecasting.model_selection import ForecastingGridSearchCV
from sktime.forecasting.compose import TransformedTargetForecaster
from sktime.forecasting.theta import ThetaForecaster
from sktime.transformations.series.impute import Imputer
from sktime.forecasting.structural import UnobservedComponents
y = load_shampoo_sales()
fh = [1,2,3]
pipe = TransformedTargetForecaster(steps=[
("imputer", Imputer()),
("forecaster", UnobservedComponents())])
cv = ExpandingWindowSplitter(
initial_window=24,
step_length=12,
start_with_window=True,
fh=[1,2,3])
gscv = ForecastingGridSearchCV(
forecaster=pipe,
param_grid=[{
"forecaster": [NaiveForecaster(sp=12)],
"forecaster__strategy": ["drift", "last", "mean"],
},
{
"imputer__method": ["mean", "drift"],
"forecaster": [ThetaForecaster(sp=12)],
},
{
"imputer__method": ["mean", "last"],
"forecaster": [UnobservedComponents()],
"forecaster__seasonal": [12],
},
],
cv=cv,
n_jobs=-1)
gscv.fit(y)
y_pred = gscv.predict(fh)
```
**Expected behavior**
<!--
A clear and concise description of what you expected to happen.
-->
**Additional context**
Error report:
```python
TypeError Traceback (most recent call last)
~/work/chronos/chronos/pipeline/tests/test_pipeline.py in <module>
37 cv=cv,
38 n_jobs=-1)
---> 39 gscv.fit(y)
40
41 y_pred = gscv.predict(fh)
~/work/sktime/sktime/forecasting/base/_base.py in fit(self, y, X, fh)
262 # we call the ordinary _fit if no looping/vectorization needed
263 if not vectorization_needed:
--> 264 self._fit(y=y_inner, X=X_inner, fh=fh)
265 else:
266 # otherwise we call the vectorized version of fit
~/work/sktime/sktime/forecasting/model_selection/_tune.py in _fit(self, y, X, fh)
279
280 # Run grid-search cross-validation.
--> 281 results = self._run_search(evaluate_candidates)
282
283 results = pd.DataFrame(results)
~/work/sktime/sktime/forecasting/model_selection/_tune.py in _run_search(self, evaluate_candidates)
487 """Search all candidates in param_grid."""
488 _check_param_grid(self.param_grid)
--> 489 return evaluate_candidates(ParameterGrid(self.param_grid))
490
491 @classmethod
~/work/sktime/sktime/forecasting/model_selection/_tune.py in evaluate_candidates(candidate_params)
266
267 out = parallel(
--> 268 delayed(_fit_and_score)(params) for params in candidate_params
269 )
270
~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/site-packages/joblib/parallel.py in __call__(self, iterable)
1054
1055 with self._backend.retrieval_context():
-> 1056 self.retrieve()
1057 # Make sure that we get a last message telling us we are done
1058 elapsed_time = time.time() - self._start_time
~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/site-packages/joblib/parallel.py in retrieve(self)
933 try:
934 if getattr(self._backend, 'supports_timeout', False):
--> 935 self._output.extend(job.get(timeout=self.timeout))
936 else:
937 self._output.extend(job.get())
~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/site-packages/joblib/_parallel_backends.py in wrap_future_result(future, timeout)
540 AsyncResults.get from multiprocessing."""
541 try:
--> 542 return future.result(timeout=timeout)
543 except CfTimeoutError as e:
544 raise TimeoutError from e
~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
433 raise CancelledError()
434 elif self._state == FINISHED:
--> 435 return self.__get_result()
436 else:
437 raise TimeoutError()
~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
TypeError: No valid mtype could be identified
```
<!--
Add any other context about the problem here.
-->
**Versions**
System:
python: 3.7.13 (default, Mar 28 2022, 07:24:34) [Clang 12.0.0 ]
executable: .../miniconda3/envs/chronos_dev2/bin/python
machine: Darwin-21.4.0-x86_64-i386-64bit
Python dependencies:
pip: 21.2.2
setuptools: 58.0.4
sklearn: 1.0.2
sktime: 0.11.3
statsmodels: 0.12.1
numpy: 1.21.5
scipy: 1.7.3
pandas: 1.3.5
matplotlib: 3.5.1
joblib: 1.1.0
numba: 0.55.1
pmdarima: 1.8.5
tsfresh: None
<!--
Please run the following code snippet and paste the output here:
from sktime import show_versions; show_versions()
-->
</details>
<!-- Thanks for contributing! -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/datatypes/_series/_convert.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Machine type converters for Series scitype.
3
4 Exports conversion and mtype dictionary for Series scitype:
5
6 convert_dict: dict indexed by triples of str
7 1st element = convert from - str
8 2nd element = convert to - str
9 3rd element = considered as this scitype - str
10 elements are conversion functions of machine type (1st) -> 2nd
11
12 Function signature of all elements
13 convert_dict[(from_type, to_type, as_scitype)]
14
15 Parameters
16 ----------
17 obj : from_type - object to convert
18 store : dictionary - reference of storage for lossy conversions, default=None (no store)
19
20 Returns
21 -------
22 converted_obj : to_type - object obj converted to to_type
23
24 Raises
25 ------
26 ValueError and TypeError, if requested conversion is not possible
27 (depending on conversion logic)
28 """
29
30 __author__ = ["fkiraly"]
31
32 __all__ = ["convert_dict"]
33
34 import numpy as np
35 import pandas as pd
36
37 ##############################################################
38 # methods to convert one machine type to another machine type
39 ##############################################################
40
41 convert_dict = dict()
42
43
44 def convert_identity(obj, store=None):
45
46 return obj
47
48
49 # assign identity function to type conversion to self
50 for tp in ["pd.Series", "pd.DataFrame", "np.ndarray"]:
51 convert_dict[(tp, tp, "Series")] = convert_identity
52
53
54 def convert_UvS_to_MvS_as_Series(obj: pd.Series, store=None) -> pd.DataFrame:
55
56 if not isinstance(obj, pd.Series):
57 raise TypeError("input must be a pd.Series")
58
59 if (
60 isinstance(store, dict)
61 and "columns" in store.keys()
62 and len(store["columns"]) == 1
63 ):
64 res = pd.DataFrame(obj, columns=store["columns"])
65 else:
66 res = pd.DataFrame(obj)
67
68 return res
69
70
71 convert_dict[("pd.Series", "pd.DataFrame", "Series")] = convert_UvS_to_MvS_as_Series
72
73
74 def convert_MvS_to_UvS_as_Series(obj: pd.DataFrame, store=None) -> pd.Series:
75
76 if not isinstance(obj, pd.DataFrame):
77 raise TypeError("input is not a pd.DataFrame")
78
79 if len(obj.columns) != 1:
80 raise ValueError("input must be univariate pd.DataFrame, with one column")
81
82 if isinstance(store, dict):
83 store["columns"] = obj.columns[[0]]
84
85 y = obj[obj.columns[0]]
86 y.name = None
87
88 return y
89
90
91 convert_dict[("pd.DataFrame", "pd.Series", "Series")] = convert_MvS_to_UvS_as_Series
92
93
94 def convert_MvS_to_np_as_Series(obj: pd.DataFrame, store=None) -> np.ndarray:
95
96 if not isinstance(obj, pd.DataFrame):
97 raise TypeError("input must be a pd.DataFrame")
98
99 if isinstance(store, dict):
100 store["columns"] = obj.columns
101 store["index"] = obj.index
102
103 return obj.to_numpy()
104
105
106 convert_dict[("pd.DataFrame", "np.ndarray", "Series")] = convert_MvS_to_np_as_Series
107
108
109 def convert_UvS_to_np_as_Series(obj: pd.Series, store=None) -> np.ndarray:
110
111 if not isinstance(obj, pd.Series):
112 raise TypeError("input must be a pd.Series")
113
114 if isinstance(store, dict):
115 store["index"] = obj.index
116
117 return pd.DataFrame(obj).to_numpy()
118
119
120 convert_dict[("pd.Series", "np.ndarray", "Series")] = convert_UvS_to_np_as_Series
121
122
123 def convert_np_to_MvS_as_Series(obj: np.ndarray, store=None) -> pd.DataFrame:
124
125 if not isinstance(obj, np.ndarray) and len(obj.shape) > 2:
126 raise TypeError("input must be a np.ndarray of dim 1 or 2")
127
128 if len(obj.shape) == 1:
129 obj = np.reshape(obj, (-1, 1))
130
131 res = pd.DataFrame(obj)
132
133 # add column names or index from store if stored and length fits
134 if (
135 isinstance(store, dict)
136 and "columns" in store.keys()
137 and len(store["columns"]) == obj.shape[1]
138 ):
139 res.columns = store["columns"]
140 if (
141 isinstance(store, dict)
142 and "index" in store.keys()
143 and len(store["index"]) == obj.shape[0]
144 ):
145 res.index = store["index"]
146
147 return res
148
149
150 convert_dict[("np.ndarray", "pd.DataFrame", "Series")] = convert_np_to_MvS_as_Series
151
152
153 def convert_np_to_UvS_as_Series(obj: np.ndarray, store=None) -> pd.Series:
154
155 if not isinstance(obj, np.ndarray) or obj.ndim > 2:
156 raise TypeError("input must be a one-column np.ndarray of dim 1 or 2")
157
158 if obj.ndim == 2 and obj.shape[1] != 1:
159 raise TypeError("input must be a one-column np.ndarray of dim 1 or 2")
160
161 res = pd.Series(obj.flatten())
162
163 # add index from store if stored and length fits
164 if (
165 isinstance(store, dict)
166 and "index" in store.keys()
167 and len(store["index"]) == obj.shape[0]
168 ):
169 res.index = store["index"]
170
171 return res
172
173
174 convert_dict[("np.ndarray", "pd.Series", "Series")] = convert_np_to_UvS_as_Series
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/datatypes/_series/_convert.py b/sktime/datatypes/_series/_convert.py
--- a/sktime/datatypes/_series/_convert.py
+++ b/sktime/datatypes/_series/_convert.py
@@ -56,14 +56,14 @@
if not isinstance(obj, pd.Series):
raise TypeError("input must be a pd.Series")
+ res = pd.DataFrame(obj)
+
if (
isinstance(store, dict)
and "columns" in store.keys()
and len(store["columns"]) == 1
):
- res = pd.DataFrame(obj, columns=store["columns"])
- else:
- res = pd.DataFrame(obj)
+ res.columns = store["columns"]
return res
| {"golden_diff": "diff --git a/sktime/datatypes/_series/_convert.py b/sktime/datatypes/_series/_convert.py\n--- a/sktime/datatypes/_series/_convert.py\n+++ b/sktime/datatypes/_series/_convert.py\n@@ -56,14 +56,14 @@\n if not isinstance(obj, pd.Series):\n raise TypeError(\"input must be a pd.Series\")\n \n+ res = pd.DataFrame(obj)\n+\n if (\n isinstance(store, dict)\n and \"columns\" in store.keys()\n and len(store[\"columns\"]) == 1\n ):\n- res = pd.DataFrame(obj, columns=store[\"columns\"])\n- else:\n- res = pd.DataFrame(obj)\n+ res.columns = store[\"columns\"]\n \n return res\n", "issue": "[BUG] UnobservedComponents throws error during predict() call, when wrapped under TransformedTargetForecaster and ForecastingPipeline\n**Describe the bug**\r\n\r\n> Initially identified issue:\r\n\r\n_New version of sktime throws error for UnobservedComponents when wrapped under ForecastingGridSearchCV with TransformedTargetForecaster pipe._\r\n\r\n> Updated description:\r\n\r\nFrom sktime v.0.11.1 onwards untill the current version of main, UnobservedComponents throws error during Predict method call when it is piped under TransformedTargetForecaster(). \r\n_The same construct works with all the other models which I could test for example ARIMA, AutoETS etc. So the issue is how TransformedTargetForecaster calls predict and the cascading of the same method to UnobservedComponents class._\r\n\r\n**To Reproduce**\r\nThe code example was taken from documentation of ForecastingGridSearchCV for advanced example. The only thing changed was calling UnobservedComponents instead of ExponentialSmoothing at the end of param_grid argument in gscv.\r\n<!--\r\nAdd a Minimal, Complete, and Verifiable example (for more details, see e.g. https://stackoverflow.com/help/mcve\r\n\r\nIf the code is too long, feel free to put it in a public gist and link it in the issue: https://gist.github.com\r\n-->\r\n\r\n```python\r\nfrom sktime.datasets import load_shampoo_sales\r\nfrom sktime.forecasting.exp_smoothing import ExponentialSmoothing\r\nfrom sktime.forecasting.naive import NaiveForecaster\r\nfrom sktime.forecasting.model_selection import ExpandingWindowSplitter\r\nfrom sktime.forecasting.model_selection import ForecastingGridSearchCV\r\nfrom sktime.forecasting.compose import TransformedTargetForecaster\r\nfrom sktime.forecasting.theta import ThetaForecaster\r\nfrom sktime.transformations.series.impute import Imputer\r\nfrom sktime.forecasting.structural import UnobservedComponents\r\n\r\ny = load_shampoo_sales()\r\nfh = [1,2,3]\r\npipe = TransformedTargetForecaster(steps=[\r\n (\"imputer\", Imputer()),\r\n (\"forecaster\", UnobservedComponents())])\r\ncv = ExpandingWindowSplitter(\r\n initial_window=24,\r\n step_length=12,\r\n start_with_window=True,\r\n fh=[1,2,3])\r\ngscv = ForecastingGridSearchCV(\r\n forecaster=pipe,\r\n param_grid=[{\r\n \"forecaster\": [NaiveForecaster(sp=12)],\r\n \"forecaster__strategy\": [\"drift\", \"last\", \"mean\"],\r\n },\r\n {\r\n \"imputer__method\": [\"mean\", \"drift\"],\r\n \"forecaster\": [ThetaForecaster(sp=12)],\r\n },\r\n {\r\n \"imputer__method\": [\"mean\", \"last\"],\r\n \"forecaster\": [UnobservedComponents()],\r\n \"forecaster__seasonal\": [12],\r\n },\r\n ],\r\n cv=cv,\r\n n_jobs=-1)\r\ngscv.fit(y)\r\n\r\ny_pred = gscv.predict(fh)\r\n```\r\n\r\n**Expected behavior**\r\n<!--\r\nA clear and concise description of what you expected to happen.\r\n-->\r\n\r\n**Additional context**\r\nError report:\r\n```python\r\nTypeError Traceback (most recent call last)\r\n~/work/chronos/chronos/pipeline/tests/test_pipeline.py in <module>\r\n 37 cv=cv,\r\n 38 n_jobs=-1)\r\n---> 39 gscv.fit(y)\r\n 40 \r\n 41 y_pred = gscv.predict(fh)\r\n\r\n~/work/sktime/sktime/forecasting/base/_base.py in fit(self, y, X, fh)\r\n 262 # we call the ordinary _fit if no looping/vectorization needed\r\n 263 if not vectorization_needed:\r\n--> 264 self._fit(y=y_inner, X=X_inner, fh=fh)\r\n 265 else:\r\n 266 # otherwise we call the vectorized version of fit\r\n\r\n~/work/sktime/sktime/forecasting/model_selection/_tune.py in _fit(self, y, X, fh)\r\n 279 \r\n 280 # Run grid-search cross-validation.\r\n--> 281 results = self._run_search(evaluate_candidates)\r\n 282 \r\n 283 results = pd.DataFrame(results)\r\n\r\n~/work/sktime/sktime/forecasting/model_selection/_tune.py in _run_search(self, evaluate_candidates)\r\n 487 \"\"\"Search all candidates in param_grid.\"\"\"\r\n 488 _check_param_grid(self.param_grid)\r\n--> 489 return evaluate_candidates(ParameterGrid(self.param_grid))\r\n 490 \r\n 491 @classmethod\r\n\r\n~/work/sktime/sktime/forecasting/model_selection/_tune.py in evaluate_candidates(candidate_params)\r\n 266 \r\n 267 out = parallel(\r\n--> 268 delayed(_fit_and_score)(params) for params in candidate_params\r\n 269 )\r\n 270 \r\n\r\n~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/site-packages/joblib/parallel.py in __call__(self, iterable)\r\n 1054 \r\n 1055 with self._backend.retrieval_context():\r\n-> 1056 self.retrieve()\r\n 1057 # Make sure that we get a last message telling us we are done\r\n 1058 elapsed_time = time.time() - self._start_time\r\n\r\n~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/site-packages/joblib/parallel.py in retrieve(self)\r\n 933 try:\r\n 934 if getattr(self._backend, 'supports_timeout', False):\r\n--> 935 self._output.extend(job.get(timeout=self.timeout))\r\n 936 else:\r\n 937 self._output.extend(job.get())\r\n\r\n~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/site-packages/joblib/_parallel_backends.py in wrap_future_result(future, timeout)\r\n 540 AsyncResults.get from multiprocessing.\"\"\"\r\n 541 try:\r\n--> 542 return future.result(timeout=timeout)\r\n 543 except CfTimeoutError as e:\r\n 544 raise TimeoutError from e\r\n\r\n~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)\r\n 433 raise CancelledError()\r\n 434 elif self._state == FINISHED:\r\n--> 435 return self.__get_result()\r\n 436 else:\r\n 437 raise TimeoutError()\r\n\r\n~/opt/miniconda3/envs/chronos_dev2/lib/python3.7/concurrent/futures/_base.py in __get_result(self)\r\n 382 def __get_result(self):\r\n 383 if self._exception:\r\n--> 384 raise self._exception\r\n 385 else:\r\n 386 return self._result\r\n\r\nTypeError: No valid mtype could be identified\r\n```\r\n<!--\r\nAdd any other context about the problem here.\r\n-->\r\n\r\n**Versions**\r\nSystem:\r\n python: 3.7.13 (default, Mar 28 2022, 07:24:34) [Clang 12.0.0 ]\r\nexecutable: .../miniconda3/envs/chronos_dev2/bin/python\r\n machine: Darwin-21.4.0-x86_64-i386-64bit\r\n\r\nPython dependencies:\r\n pip: 21.2.2\r\n setuptools: 58.0.4\r\n sklearn: 1.0.2\r\n sktime: 0.11.3\r\n statsmodels: 0.12.1\r\n numpy: 1.21.5\r\n scipy: 1.7.3\r\n pandas: 1.3.5\r\n matplotlib: 3.5.1\r\n joblib: 1.1.0\r\n numba: 0.55.1\r\n pmdarima: 1.8.5\r\n tsfresh: None\r\n\r\n<!--\r\nPlease run the following code snippet and paste the output here:\r\n \r\nfrom sktime import show_versions; show_versions()\r\n-->\r\n\r\n\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Machine type converters for Series scitype.\n\nExports conversion and mtype dictionary for Series scitype:\n\nconvert_dict: dict indexed by triples of str\n 1st element = convert from - str\n 2nd element = convert to - str\n 3rd element = considered as this scitype - str\nelements are conversion functions of machine type (1st) -> 2nd\n\nFunction signature of all elements\nconvert_dict[(from_type, to_type, as_scitype)]\n\nParameters\n----------\nobj : from_type - object to convert\nstore : dictionary - reference of storage for lossy conversions, default=None (no store)\n\nReturns\n-------\nconverted_obj : to_type - object obj converted to to_type\n\nRaises\n------\nValueError and TypeError, if requested conversion is not possible\n (depending on conversion logic)\n\"\"\"\n\n__author__ = [\"fkiraly\"]\n\n__all__ = [\"convert_dict\"]\n\nimport numpy as np\nimport pandas as pd\n\n##############################################################\n# methods to convert one machine type to another machine type\n##############################################################\n\nconvert_dict = dict()\n\n\ndef convert_identity(obj, store=None):\n\n return obj\n\n\n# assign identity function to type conversion to self\nfor tp in [\"pd.Series\", \"pd.DataFrame\", \"np.ndarray\"]:\n convert_dict[(tp, tp, \"Series\")] = convert_identity\n\n\ndef convert_UvS_to_MvS_as_Series(obj: pd.Series, store=None) -> pd.DataFrame:\n\n if not isinstance(obj, pd.Series):\n raise TypeError(\"input must be a pd.Series\")\n\n if (\n isinstance(store, dict)\n and \"columns\" in store.keys()\n and len(store[\"columns\"]) == 1\n ):\n res = pd.DataFrame(obj, columns=store[\"columns\"])\n else:\n res = pd.DataFrame(obj)\n\n return res\n\n\nconvert_dict[(\"pd.Series\", \"pd.DataFrame\", \"Series\")] = convert_UvS_to_MvS_as_Series\n\n\ndef convert_MvS_to_UvS_as_Series(obj: pd.DataFrame, store=None) -> pd.Series:\n\n if not isinstance(obj, pd.DataFrame):\n raise TypeError(\"input is not a pd.DataFrame\")\n\n if len(obj.columns) != 1:\n raise ValueError(\"input must be univariate pd.DataFrame, with one column\")\n\n if isinstance(store, dict):\n store[\"columns\"] = obj.columns[[0]]\n\n y = obj[obj.columns[0]]\n y.name = None\n\n return y\n\n\nconvert_dict[(\"pd.DataFrame\", \"pd.Series\", \"Series\")] = convert_MvS_to_UvS_as_Series\n\n\ndef convert_MvS_to_np_as_Series(obj: pd.DataFrame, store=None) -> np.ndarray:\n\n if not isinstance(obj, pd.DataFrame):\n raise TypeError(\"input must be a pd.DataFrame\")\n\n if isinstance(store, dict):\n store[\"columns\"] = obj.columns\n store[\"index\"] = obj.index\n\n return obj.to_numpy()\n\n\nconvert_dict[(\"pd.DataFrame\", \"np.ndarray\", \"Series\")] = convert_MvS_to_np_as_Series\n\n\ndef convert_UvS_to_np_as_Series(obj: pd.Series, store=None) -> np.ndarray:\n\n if not isinstance(obj, pd.Series):\n raise TypeError(\"input must be a pd.Series\")\n\n if isinstance(store, dict):\n store[\"index\"] = obj.index\n\n return pd.DataFrame(obj).to_numpy()\n\n\nconvert_dict[(\"pd.Series\", \"np.ndarray\", \"Series\")] = convert_UvS_to_np_as_Series\n\n\ndef convert_np_to_MvS_as_Series(obj: np.ndarray, store=None) -> pd.DataFrame:\n\n if not isinstance(obj, np.ndarray) and len(obj.shape) > 2:\n raise TypeError(\"input must be a np.ndarray of dim 1 or 2\")\n\n if len(obj.shape) == 1:\n obj = np.reshape(obj, (-1, 1))\n\n res = pd.DataFrame(obj)\n\n # add column names or index from store if stored and length fits\n if (\n isinstance(store, dict)\n and \"columns\" in store.keys()\n and len(store[\"columns\"]) == obj.shape[1]\n ):\n res.columns = store[\"columns\"]\n if (\n isinstance(store, dict)\n and \"index\" in store.keys()\n and len(store[\"index\"]) == obj.shape[0]\n ):\n res.index = store[\"index\"]\n\n return res\n\n\nconvert_dict[(\"np.ndarray\", \"pd.DataFrame\", \"Series\")] = convert_np_to_MvS_as_Series\n\n\ndef convert_np_to_UvS_as_Series(obj: np.ndarray, store=None) -> pd.Series:\n\n if not isinstance(obj, np.ndarray) or obj.ndim > 2:\n raise TypeError(\"input must be a one-column np.ndarray of dim 1 or 2\")\n\n if obj.ndim == 2 and obj.shape[1] != 1:\n raise TypeError(\"input must be a one-column np.ndarray of dim 1 or 2\")\n\n res = pd.Series(obj.flatten())\n\n # add index from store if stored and length fits\n if (\n isinstance(store, dict)\n and \"index\" in store.keys()\n and len(store[\"index\"]) == obj.shape[0]\n ):\n res.index = store[\"index\"]\n\n return res\n\n\nconvert_dict[(\"np.ndarray\", \"pd.Series\", \"Series\")] = convert_np_to_UvS_as_Series\n", "path": "sktime/datatypes/_series/_convert.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Machine type converters for Series scitype.\n\nExports conversion and mtype dictionary for Series scitype:\n\nconvert_dict: dict indexed by triples of str\n 1st element = convert from - str\n 2nd element = convert to - str\n 3rd element = considered as this scitype - str\nelements are conversion functions of machine type (1st) -> 2nd\n\nFunction signature of all elements\nconvert_dict[(from_type, to_type, as_scitype)]\n\nParameters\n----------\nobj : from_type - object to convert\nstore : dictionary - reference of storage for lossy conversions, default=None (no store)\n\nReturns\n-------\nconverted_obj : to_type - object obj converted to to_type\n\nRaises\n------\nValueError and TypeError, if requested conversion is not possible\n (depending on conversion logic)\n\"\"\"\n\n__author__ = [\"fkiraly\"]\n\n__all__ = [\"convert_dict\"]\n\nimport numpy as np\nimport pandas as pd\n\n##############################################################\n# methods to convert one machine type to another machine type\n##############################################################\n\nconvert_dict = dict()\n\n\ndef convert_identity(obj, store=None):\n\n return obj\n\n\n# assign identity function to type conversion to self\nfor tp in [\"pd.Series\", \"pd.DataFrame\", \"np.ndarray\"]:\n convert_dict[(tp, tp, \"Series\")] = convert_identity\n\n\ndef convert_UvS_to_MvS_as_Series(obj: pd.Series, store=None) -> pd.DataFrame:\n\n if not isinstance(obj, pd.Series):\n raise TypeError(\"input must be a pd.Series\")\n\n res = pd.DataFrame(obj)\n\n if (\n isinstance(store, dict)\n and \"columns\" in store.keys()\n and len(store[\"columns\"]) == 1\n ):\n res.columns = store[\"columns\"]\n\n return res\n\n\nconvert_dict[(\"pd.Series\", \"pd.DataFrame\", \"Series\")] = convert_UvS_to_MvS_as_Series\n\n\ndef convert_MvS_to_UvS_as_Series(obj: pd.DataFrame, store=None) -> pd.Series:\n\n if not isinstance(obj, pd.DataFrame):\n raise TypeError(\"input is not a pd.DataFrame\")\n\n if len(obj.columns) != 1:\n raise ValueError(\"input must be univariate pd.DataFrame, with one column\")\n\n if isinstance(store, dict):\n store[\"columns\"] = obj.columns[[0]]\n\n y = obj[obj.columns[0]]\n y.name = None\n\n return y\n\n\nconvert_dict[(\"pd.DataFrame\", \"pd.Series\", \"Series\")] = convert_MvS_to_UvS_as_Series\n\n\ndef convert_MvS_to_np_as_Series(obj: pd.DataFrame, store=None) -> np.ndarray:\n\n if not isinstance(obj, pd.DataFrame):\n raise TypeError(\"input must be a pd.DataFrame\")\n\n if isinstance(store, dict):\n store[\"columns\"] = obj.columns\n store[\"index\"] = obj.index\n\n return obj.to_numpy()\n\n\nconvert_dict[(\"pd.DataFrame\", \"np.ndarray\", \"Series\")] = convert_MvS_to_np_as_Series\n\n\ndef convert_UvS_to_np_as_Series(obj: pd.Series, store=None) -> np.ndarray:\n\n if not isinstance(obj, pd.Series):\n raise TypeError(\"input must be a pd.Series\")\n\n if isinstance(store, dict):\n store[\"index\"] = obj.index\n\n return pd.DataFrame(obj).to_numpy()\n\n\nconvert_dict[(\"pd.Series\", \"np.ndarray\", \"Series\")] = convert_UvS_to_np_as_Series\n\n\ndef convert_np_to_MvS_as_Series(obj: np.ndarray, store=None) -> pd.DataFrame:\n\n if not isinstance(obj, np.ndarray) and len(obj.shape) > 2:\n raise TypeError(\"input must be a np.ndarray of dim 1 or 2\")\n\n if len(obj.shape) == 1:\n obj = np.reshape(obj, (-1, 1))\n\n res = pd.DataFrame(obj)\n\n # add column names or index from store if stored and length fits\n if (\n isinstance(store, dict)\n and \"columns\" in store.keys()\n and len(store[\"columns\"]) == obj.shape[1]\n ):\n res.columns = store[\"columns\"]\n if (\n isinstance(store, dict)\n and \"index\" in store.keys()\n and len(store[\"index\"]) == obj.shape[0]\n ):\n res.index = store[\"index\"]\n\n return res\n\n\nconvert_dict[(\"np.ndarray\", \"pd.DataFrame\", \"Series\")] = convert_np_to_MvS_as_Series\n\n\ndef convert_np_to_UvS_as_Series(obj: np.ndarray, store=None) -> pd.Series:\n\n if not isinstance(obj, np.ndarray) or obj.ndim > 2:\n raise TypeError(\"input must be a one-column np.ndarray of dim 1 or 2\")\n\n if obj.ndim == 2 and obj.shape[1] != 1:\n raise TypeError(\"input must be a one-column np.ndarray of dim 1 or 2\")\n\n res = pd.Series(obj.flatten())\n\n # add index from store if stored and length fits\n if (\n isinstance(store, dict)\n and \"index\" in store.keys()\n and len(store[\"index\"]) == obj.shape[0]\n ):\n res.index = store[\"index\"]\n\n return res\n\n\nconvert_dict[(\"np.ndarray\", \"pd.Series\", \"Series\")] = convert_np_to_UvS_as_Series\n", "path": "sktime/datatypes/_series/_convert.py"}]} | 3,726 | 167 |
gh_patches_debug_30243 | rasdani/github-patches | git_diff | lhotse-speech__lhotse-847 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Whisper workflow supervision `end` may be incorrect
So I ran the model on one of the AMI headset recordings (~5000s) and it seems like the "end" actually shows the end of the segment, not the duration. Here is the JSON containing the `results["segments"]`: https://drive.google.com/file/d/169igkcDY2SmMs5k3hOhHip89T4MQDnKs/view?usp=sharing
_Originally posted by @desh2608 in https://github.com/lhotse-speech/lhotse/pull/834#discussion_r988376898_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lhotse/workflows/whisper.py`
Content:
```
1 import logging
2 from typing import Any, Generator, List, Optional, Union
3
4 import torch
5
6 from lhotse import CutSet, MonoCut, Recording, RecordingSet, SupervisionSegment
7 from lhotse.qa import trim_supervisions_to_recordings
8 from lhotse.utils import fastcopy, is_module_available
9
10
11 def annotate_with_whisper(
12 manifest: Union[RecordingSet, CutSet],
13 language: Optional[str] = None,
14 model_name: str = "base",
15 device: str = "cpu",
16 ) -> Generator[MonoCut, None, None]:
17 """
18 Use OpenAI Whisper model to annotate either RECORDINGS_MANIFEST, RECORDINGS_DIR, or CUTS_MANIFEST.
19 It will perform automatic segmentation, transcription, and language identification. If
20 the first argument is a CutSet, it will overwrite the supervisions with the results of the inference.
21
22 Note: this is an experimental feature of Lhotse, and is not guaranteed to yield
23 high quality of data.
24
25 See the original repo for more details: https://github.com/openai/whisper
26
27 :param manifest: a ``RecordingSet`` or ``CutSet`` object.
28 :param language: specify the language if known upfront, otherwise it will be auto-detected.
29 :param model_name: one of available Whisper variants (base, medium, large, etc.).
30 :param device: Where to run the inference (cpu, cuda, etc.).
31 :return: a generator of cuts (use ``CutSet.open_writer()`` to write them).
32 """
33 assert is_module_available("whisper"), (
34 "This function expects OpenAI Whisper to be installed. "
35 "You can install it via 'pip install git+https://github.com/openai/whisper.git' "
36 "(see https://github.com/openai/whisper for details)."
37 )
38
39 if isinstance(manifest, RecordingSet):
40 yield from _annotate_recordings(manifest, language, model_name, device)
41 elif isinstance(manifest, CutSet):
42 yield from _annotate_cuts(manifest, language, model_name, device)
43 else:
44 raise ValueError("The ``manifest`` must be either a RecordingSet or a CutSet.")
45
46
47 def _annotate_recordings(
48 recordings: RecordingSet, language: str, model_name: str, device: str
49 ):
50 """
51 Helper function that annotates a RecordingSet with Whisper.
52 """
53 import whisper
54
55 model = whisper.load_model(model_name, device=device)
56
57 for recording in recordings:
58 if recording.num_channels > 1:
59 logging.warning(
60 f"Skipping recording '{recording.id}'. It has {recording.num_channels} channels, "
61 f"but we currently only support mono input."
62 )
63 continue
64 audio = torch.from_numpy(recording.resample(16000).load_audio()).squeeze(0)
65 result = whisper.transcribe(model=model, audio=audio, language=language)
66 supervisions = [
67 SupervisionSegment(
68 id=f"{recording.id}-{segment['id']:06d}",
69 recording_id=recording.id,
70 start=round(segment["start"], ndigits=8),
71 duration=round(segment["end"], ndigits=8),
72 text=segment["text"].strip(),
73 language=result["language"],
74 )
75 for segment in result["segments"]
76 ]
77 cut = recording.to_cut()
78 if supervisions:
79 supervisions = _postprocess_timestamps(supervisions)
80 cut.supervisions = list(
81 trim_supervisions_to_recordings(
82 recordings=recording, supervisions=supervisions, verbose=False
83 )
84 )
85 yield cut
86
87
88 def _annotate_cuts(cuts: CutSet, language: str, model_name: str, device: str):
89 """
90 Helper function that annotates a CutSet with Whisper.
91 """
92 import whisper
93
94 model = whisper.load_model(model_name, device=device)
95
96 for cut in cuts:
97 if cut.num_channels > 1:
98 logging.warning(
99 f"Skipping cut '{cut.id}'. It has {cut.num_channels} channels, "
100 f"but we currently only support mono input."
101 )
102 continue
103 audio = torch.from_numpy(cut.resample(16000).load_audio()).squeeze(0)
104 result = whisper.transcribe(model=model, audio=audio, language=language)
105 supervisions = [
106 SupervisionSegment(
107 id=f"{cut.id}-{segment['id']:06d}",
108 recording_id=cut.recording_id,
109 start=round(segment["start"], ndigits=8),
110 duration=max(cut.duration, round(segment["end"], ndigits=8)),
111 text=segment["text"].strip(),
112 language=result["language"],
113 )
114 for segment in result["segments"]
115 ]
116 new_cut = fastcopy(cut, supervisions=_postprocess_timestamps(supervisions))
117 yield new_cut
118
119
120 def _postprocess_timestamps(supervisions: List[SupervisionSegment]):
121 """
122 Whisper tends to have a lot of overlapping segments due to inaccurate end timestamps.
123 Under a strong assumption that the input speech is non-overlapping, we can fix that
124 by always truncating to the start timestamp of the next segment.
125 """
126 from cytoolz import sliding_window
127
128 if len(supervisions) < 2:
129 return supervisions
130 out = []
131 for cur, nxt in sliding_window(2, supervisions):
132 if cur.end > nxt.start:
133 cur = cur.trim(end=nxt.start)
134 out.append(cur)
135 out.append(nxt)
136 return out
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lhotse/workflows/whisper.py b/lhotse/workflows/whisper.py
--- a/lhotse/workflows/whisper.py
+++ b/lhotse/workflows/whisper.py
@@ -3,7 +3,14 @@
import torch
-from lhotse import CutSet, MonoCut, Recording, RecordingSet, SupervisionSegment
+from lhotse import (
+ CutSet,
+ MonoCut,
+ Recording,
+ RecordingSet,
+ SupervisionSegment,
+ add_durations,
+)
from lhotse.qa import trim_supervisions_to_recordings
from lhotse.utils import fastcopy, is_module_available
@@ -68,7 +75,9 @@
id=f"{recording.id}-{segment['id']:06d}",
recording_id=recording.id,
start=round(segment["start"], ndigits=8),
- duration=round(segment["end"], ndigits=8),
+ duration=add_durations(
+ segment["end"], -segment["start"], sampling_rate=16000
+ ),
text=segment["text"].strip(),
language=result["language"],
)
@@ -107,7 +116,12 @@
id=f"{cut.id}-{segment['id']:06d}",
recording_id=cut.recording_id,
start=round(segment["start"], ndigits=8),
- duration=max(cut.duration, round(segment["end"], ndigits=8)),
+ duration=max(
+ cut.duration,
+ add_durations(
+ segment["end"], -segment["start"], sampling_rate=16000
+ ),
+ ),
text=segment["text"].strip(),
language=result["language"],
)
| {"golden_diff": "diff --git a/lhotse/workflows/whisper.py b/lhotse/workflows/whisper.py\n--- a/lhotse/workflows/whisper.py\n+++ b/lhotse/workflows/whisper.py\n@@ -3,7 +3,14 @@\n \n import torch\n \n-from lhotse import CutSet, MonoCut, Recording, RecordingSet, SupervisionSegment\n+from lhotse import (\n+ CutSet,\n+ MonoCut,\n+ Recording,\n+ RecordingSet,\n+ SupervisionSegment,\n+ add_durations,\n+)\n from lhotse.qa import trim_supervisions_to_recordings\n from lhotse.utils import fastcopy, is_module_available\n \n@@ -68,7 +75,9 @@\n id=f\"{recording.id}-{segment['id']:06d}\",\n recording_id=recording.id,\n start=round(segment[\"start\"], ndigits=8),\n- duration=round(segment[\"end\"], ndigits=8),\n+ duration=add_durations(\n+ segment[\"end\"], -segment[\"start\"], sampling_rate=16000\n+ ),\n text=segment[\"text\"].strip(),\n language=result[\"language\"],\n )\n@@ -107,7 +116,12 @@\n id=f\"{cut.id}-{segment['id']:06d}\",\n recording_id=cut.recording_id,\n start=round(segment[\"start\"], ndigits=8),\n- duration=max(cut.duration, round(segment[\"end\"], ndigits=8)),\n+ duration=max(\n+ cut.duration,\n+ add_durations(\n+ segment[\"end\"], -segment[\"start\"], sampling_rate=16000\n+ ),\n+ ),\n text=segment[\"text\"].strip(),\n language=result[\"language\"],\n )\n", "issue": "Whisper workflow supervision `end` may be incorrect\nSo I ran the model on one of the AMI headset recordings (~5000s) and it seems like the \"end\" actually shows the end of the segment, not the duration. Here is the JSON containing the `results[\"segments\"]`: https://drive.google.com/file/d/169igkcDY2SmMs5k3hOhHip89T4MQDnKs/view?usp=sharing\r\n\r\n_Originally posted by @desh2608 in https://github.com/lhotse-speech/lhotse/pull/834#discussion_r988376898_\r\n \n", "before_files": [{"content": "import logging\nfrom typing import Any, Generator, List, Optional, Union\n\nimport torch\n\nfrom lhotse import CutSet, MonoCut, Recording, RecordingSet, SupervisionSegment\nfrom lhotse.qa import trim_supervisions_to_recordings\nfrom lhotse.utils import fastcopy, is_module_available\n\n\ndef annotate_with_whisper(\n manifest: Union[RecordingSet, CutSet],\n language: Optional[str] = None,\n model_name: str = \"base\",\n device: str = \"cpu\",\n) -> Generator[MonoCut, None, None]:\n \"\"\"\n Use OpenAI Whisper model to annotate either RECORDINGS_MANIFEST, RECORDINGS_DIR, or CUTS_MANIFEST.\n It will perform automatic segmentation, transcription, and language identification. If\n the first argument is a CutSet, it will overwrite the supervisions with the results of the inference.\n\n Note: this is an experimental feature of Lhotse, and is not guaranteed to yield\n high quality of data.\n\n See the original repo for more details: https://github.com/openai/whisper\n\n :param manifest: a ``RecordingSet`` or ``CutSet`` object.\n :param language: specify the language if known upfront, otherwise it will be auto-detected.\n :param model_name: one of available Whisper variants (base, medium, large, etc.).\n :param device: Where to run the inference (cpu, cuda, etc.).\n :return: a generator of cuts (use ``CutSet.open_writer()`` to write them).\n \"\"\"\n assert is_module_available(\"whisper\"), (\n \"This function expects OpenAI Whisper to be installed. \"\n \"You can install it via 'pip install git+https://github.com/openai/whisper.git' \"\n \"(see https://github.com/openai/whisper for details).\"\n )\n\n if isinstance(manifest, RecordingSet):\n yield from _annotate_recordings(manifest, language, model_name, device)\n elif isinstance(manifest, CutSet):\n yield from _annotate_cuts(manifest, language, model_name, device)\n else:\n raise ValueError(\"The ``manifest`` must be either a RecordingSet or a CutSet.\")\n\n\ndef _annotate_recordings(\n recordings: RecordingSet, language: str, model_name: str, device: str\n):\n \"\"\"\n Helper function that annotates a RecordingSet with Whisper.\n \"\"\"\n import whisper\n\n model = whisper.load_model(model_name, device=device)\n\n for recording in recordings:\n if recording.num_channels > 1:\n logging.warning(\n f\"Skipping recording '{recording.id}'. It has {recording.num_channels} channels, \"\n f\"but we currently only support mono input.\"\n )\n continue\n audio = torch.from_numpy(recording.resample(16000).load_audio()).squeeze(0)\n result = whisper.transcribe(model=model, audio=audio, language=language)\n supervisions = [\n SupervisionSegment(\n id=f\"{recording.id}-{segment['id']:06d}\",\n recording_id=recording.id,\n start=round(segment[\"start\"], ndigits=8),\n duration=round(segment[\"end\"], ndigits=8),\n text=segment[\"text\"].strip(),\n language=result[\"language\"],\n )\n for segment in result[\"segments\"]\n ]\n cut = recording.to_cut()\n if supervisions:\n supervisions = _postprocess_timestamps(supervisions)\n cut.supervisions = list(\n trim_supervisions_to_recordings(\n recordings=recording, supervisions=supervisions, verbose=False\n )\n )\n yield cut\n\n\ndef _annotate_cuts(cuts: CutSet, language: str, model_name: str, device: str):\n \"\"\"\n Helper function that annotates a CutSet with Whisper.\n \"\"\"\n import whisper\n\n model = whisper.load_model(model_name, device=device)\n\n for cut in cuts:\n if cut.num_channels > 1:\n logging.warning(\n f\"Skipping cut '{cut.id}'. It has {cut.num_channels} channels, \"\n f\"but we currently only support mono input.\"\n )\n continue\n audio = torch.from_numpy(cut.resample(16000).load_audio()).squeeze(0)\n result = whisper.transcribe(model=model, audio=audio, language=language)\n supervisions = [\n SupervisionSegment(\n id=f\"{cut.id}-{segment['id']:06d}\",\n recording_id=cut.recording_id,\n start=round(segment[\"start\"], ndigits=8),\n duration=max(cut.duration, round(segment[\"end\"], ndigits=8)),\n text=segment[\"text\"].strip(),\n language=result[\"language\"],\n )\n for segment in result[\"segments\"]\n ]\n new_cut = fastcopy(cut, supervisions=_postprocess_timestamps(supervisions))\n yield new_cut\n\n\ndef _postprocess_timestamps(supervisions: List[SupervisionSegment]):\n \"\"\"\n Whisper tends to have a lot of overlapping segments due to inaccurate end timestamps.\n Under a strong assumption that the input speech is non-overlapping, we can fix that\n by always truncating to the start timestamp of the next segment.\n \"\"\"\n from cytoolz import sliding_window\n\n if len(supervisions) < 2:\n return supervisions\n out = []\n for cur, nxt in sliding_window(2, supervisions):\n if cur.end > nxt.start:\n cur = cur.trim(end=nxt.start)\n out.append(cur)\n out.append(nxt)\n return out\n", "path": "lhotse/workflows/whisper.py"}], "after_files": [{"content": "import logging\nfrom typing import Any, Generator, List, Optional, Union\n\nimport torch\n\nfrom lhotse import (\n CutSet,\n MonoCut,\n Recording,\n RecordingSet,\n SupervisionSegment,\n add_durations,\n)\nfrom lhotse.qa import trim_supervisions_to_recordings\nfrom lhotse.utils import fastcopy, is_module_available\n\n\ndef annotate_with_whisper(\n manifest: Union[RecordingSet, CutSet],\n language: Optional[str] = None,\n model_name: str = \"base\",\n device: str = \"cpu\",\n) -> Generator[MonoCut, None, None]:\n \"\"\"\n Use OpenAI Whisper model to annotate either RECORDINGS_MANIFEST, RECORDINGS_DIR, or CUTS_MANIFEST.\n It will perform automatic segmentation, transcription, and language identification. If\n the first argument is a CutSet, it will overwrite the supervisions with the results of the inference.\n\n Note: this is an experimental feature of Lhotse, and is not guaranteed to yield\n high quality of data.\n\n See the original repo for more details: https://github.com/openai/whisper\n\n :param manifest: a ``RecordingSet`` or ``CutSet`` object.\n :param language: specify the language if known upfront, otherwise it will be auto-detected.\n :param model_name: one of available Whisper variants (base, medium, large, etc.).\n :param device: Where to run the inference (cpu, cuda, etc.).\n :return: a generator of cuts (use ``CutSet.open_writer()`` to write them).\n \"\"\"\n assert is_module_available(\"whisper\"), (\n \"This function expects OpenAI Whisper to be installed. \"\n \"You can install it via 'pip install git+https://github.com/openai/whisper.git' \"\n \"(see https://github.com/openai/whisper for details).\"\n )\n\n if isinstance(manifest, RecordingSet):\n yield from _annotate_recordings(manifest, language, model_name, device)\n elif isinstance(manifest, CutSet):\n yield from _annotate_cuts(manifest, language, model_name, device)\n else:\n raise ValueError(\"The ``manifest`` must be either a RecordingSet or a CutSet.\")\n\n\ndef _annotate_recordings(\n recordings: RecordingSet, language: str, model_name: str, device: str\n):\n \"\"\"\n Helper function that annotates a RecordingSet with Whisper.\n \"\"\"\n import whisper\n\n model = whisper.load_model(model_name, device=device)\n\n for recording in recordings:\n if recording.num_channels > 1:\n logging.warning(\n f\"Skipping recording '{recording.id}'. It has {recording.num_channels} channels, \"\n f\"but we currently only support mono input.\"\n )\n continue\n audio = torch.from_numpy(recording.resample(16000).load_audio()).squeeze(0)\n result = whisper.transcribe(model=model, audio=audio, language=language)\n supervisions = [\n SupervisionSegment(\n id=f\"{recording.id}-{segment['id']:06d}\",\n recording_id=recording.id,\n start=round(segment[\"start\"], ndigits=8),\n duration=add_durations(\n segment[\"end\"], -segment[\"start\"], sampling_rate=16000\n ),\n text=segment[\"text\"].strip(),\n language=result[\"language\"],\n )\n for segment in result[\"segments\"]\n ]\n cut = recording.to_cut()\n if supervisions:\n supervisions = _postprocess_timestamps(supervisions)\n cut.supervisions = list(\n trim_supervisions_to_recordings(\n recordings=recording, supervisions=supervisions, verbose=False\n )\n )\n yield cut\n\n\ndef _annotate_cuts(cuts: CutSet, language: str, model_name: str, device: str):\n \"\"\"\n Helper function that annotates a CutSet with Whisper.\n \"\"\"\n import whisper\n\n model = whisper.load_model(model_name, device=device)\n\n for cut in cuts:\n if cut.num_channels > 1:\n logging.warning(\n f\"Skipping cut '{cut.id}'. It has {cut.num_channels} channels, \"\n f\"but we currently only support mono input.\"\n )\n continue\n audio = torch.from_numpy(cut.resample(16000).load_audio()).squeeze(0)\n result = whisper.transcribe(model=model, audio=audio, language=language)\n supervisions = [\n SupervisionSegment(\n id=f\"{cut.id}-{segment['id']:06d}\",\n recording_id=cut.recording_id,\n start=round(segment[\"start\"], ndigits=8),\n duration=max(\n cut.duration,\n add_durations(\n segment[\"end\"], -segment[\"start\"], sampling_rate=16000\n ),\n ),\n text=segment[\"text\"].strip(),\n language=result[\"language\"],\n )\n for segment in result[\"segments\"]\n ]\n new_cut = fastcopy(cut, supervisions=_postprocess_timestamps(supervisions))\n yield new_cut\n\n\ndef _postprocess_timestamps(supervisions: List[SupervisionSegment]):\n \"\"\"\n Whisper tends to have a lot of overlapping segments due to inaccurate end timestamps.\n Under a strong assumption that the input speech is non-overlapping, we can fix that\n by always truncating to the start timestamp of the next segment.\n \"\"\"\n from cytoolz import sliding_window\n\n if len(supervisions) < 2:\n return supervisions\n out = []\n for cur, nxt in sliding_window(2, supervisions):\n if cur.end > nxt.start:\n cur = cur.trim(end=nxt.start)\n out.append(cur)\n out.append(nxt)\n return out\n", "path": "lhotse/workflows/whisper.py"}]} | 1,921 | 389 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.