problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_13438 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3307 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider vetco is broken
During the global build at 2021-09-01-14-42-16, spider **vetco** failed with **0 features** and **24644 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/vetco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/vetco_clinic.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from scrapy.selector import Selector
9
10
11 class VetcoSpider(scrapy.Spider):
12 name = "vetco"
13 item_attributes = {'brand': "vetcoclinics"}
14 allowed_domains = ["vetcoclinics.com"]
15 start_urls = (
16 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',
17 )
18
19 def start_requests(self):
20 with open('./locations/searchable_points/us_zcta.csv') as points:
21 next(points) # Ignore the header
22 for point in points:
23 row = point.split(',')
24 zip = row[0].strip().strip('"')
25
26 url = f"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}"
27
28 yield scrapy.http.Request(
29 url,
30 self.parse,
31 method='GET'
32 )
33
34 def parse(self, response):
35 jsonresponse = json.loads(response.body_as_unicode())
36 if jsonresponse is not None:
37 clinics = jsonresponse.get('clinics')
38 if clinics:
39 for stores in clinics:
40 body = stores['label']
41 address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
42 if len(address) == 3:
43 addr_full, city_state_postal, phone = [item.split(",") for item in address]
44 city, state_postal = [item.split(",") for item in city_state_postal]
45 state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
46
47
48 else:
49 addr_full, city_state_postal = [item.split(",") for item in address]
50 city, state_postal = [item.split(",") for item in city_state_postal]
51 state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
52
53 properties = {
54 'ref': addr_full[0].strip(),
55 'addr_full': addr_full[0].strip(),
56 'city': city[0].strip(),
57 'state': state,
58 'postcode': postal,
59 'lat': float(stores["point"]["lat"]),
60 'lon': float(stores["point"]["long"]),
61 'website': response.url
62 }
63
64 yield GeojsonPointItem(**properties)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/vetco_clinic.py b/locations/spiders/vetco_clinic.py
--- a/locations/spiders/vetco_clinic.py
+++ b/locations/spiders/vetco_clinic.py
@@ -38,7 +38,7 @@
if clinics:
for stores in clinics:
body = stores['label']
- address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
+ address = Selector(text=body).xpath('//address/text()').extract()
if len(address) == 3:
addr_full, city_state_postal, phone = [item.split(",") for item in address]
city, state_postal = [item.split(",") for item in city_state_postal]
| {"golden_diff": "diff --git a/locations/spiders/vetco_clinic.py b/locations/spiders/vetco_clinic.py\n--- a/locations/spiders/vetco_clinic.py\n+++ b/locations/spiders/vetco_clinic.py\n@@ -38,7 +38,7 @@\n if clinics:\n for stores in clinics:\n body = stores['label']\n- address = Selector(text=body).xpath('//div[@class=\"locationinfo_area\"]/address/text()').extract()\n+ address = Selector(text=body).xpath('//address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n", "issue": "Spider vetco is broken\nDuring the global build at 2021-09-01-14-42-16, spider **vetco** failed with **0 features** and **24644 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/vetco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom scrapy.selector import Selector\n\n\nclass VetcoSpider(scrapy.Spider):\n name = \"vetco\"\n item_attributes = {'brand': \"vetcoclinics\"}\n allowed_domains = [\"vetcoclinics.com\"]\n start_urls = (\n 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',\n )\n\n def start_requests(self):\n with open('./locations/searchable_points/us_zcta.csv') as points:\n next(points) # Ignore the header\n for point in points:\n row = point.split(',')\n zip = row[0].strip().strip('\"')\n\n url = f\"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}\"\n\n yield scrapy.http.Request(\n url,\n self.parse,\n method='GET'\n )\n\n def parse(self, response):\n jsonresponse = json.loads(response.body_as_unicode())\n if jsonresponse is not None:\n clinics = jsonresponse.get('clinics')\n if clinics:\n for stores in clinics:\n body = stores['label']\n address = Selector(text=body).xpath('//div[@class=\"locationinfo_area\"]/address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n\n else:\n addr_full, city_state_postal = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n properties = {\n 'ref': addr_full[0].strip(),\n 'addr_full': addr_full[0].strip(),\n 'city': city[0].strip(),\n 'state': state,\n 'postcode': postal,\n 'lat': float(stores[\"point\"][\"lat\"]),\n 'lon': float(stores[\"point\"][\"long\"]),\n 'website': response.url\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/vetco_clinic.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom scrapy.selector import Selector\n\n\nclass VetcoSpider(scrapy.Spider):\n name = \"vetco\"\n item_attributes = {'brand': \"vetcoclinics\"}\n allowed_domains = [\"vetcoclinics.com\"]\n start_urls = (\n 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',\n )\n\n def start_requests(self):\n with open('./locations/searchable_points/us_zcta.csv') as points:\n next(points) # Ignore the header\n for point in points:\n row = point.split(',')\n zip = row[0].strip().strip('\"')\n\n url = f\"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}\"\n\n yield scrapy.http.Request(\n url,\n self.parse,\n method='GET'\n )\n\n def parse(self, response):\n jsonresponse = json.loads(response.body_as_unicode())\n if jsonresponse is not None:\n clinics = jsonresponse.get('clinics')\n if clinics:\n for stores in clinics:\n body = stores['label']\n address = Selector(text=body).xpath('//address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n\n else:\n addr_full, city_state_postal = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n properties = {\n 'ref': addr_full[0].strip(),\n 'addr_full': addr_full[0].strip(),\n 'city': city[0].strip(),\n 'state': state,\n 'postcode': postal,\n 'lat': float(stores[\"point\"][\"lat\"]),\n 'lon': float(stores[\"point\"][\"long\"]),\n 'website': response.url\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/vetco_clinic.py"}]} | 1,108 | 173 |
gh_patches_debug_8249 | rasdani/github-patches | git_diff | holoviz__panel-2611 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change fast theme button color back to white
After upgrading to Panel 0.12.0 the fast theme button switched to the red color

I would propose switching it back to white because 1) It looks better 2) receives less attention 3) makes changing the style of the template easier.
With the white color it looks like

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/template/fast/theme.py`
Content:
```
1 """
2 Functionality for styling according to Fast.design
3 """
4 import pathlib
5 import param
6
7 from bokeh.themes import Theme as _BkTheme
8
9 from ..theme import DarkTheme, DefaultTheme
10
11 _ROOT = pathlib.Path(__file__).parent / "css"
12
13 COLLAPSED_SVG_ICON = """
14 <svg style="stroke: var(--accent-fill-rest);" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="collapsed-icon">
15 <path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
16 <path d="M9 5.44446V12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
17 <path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
18 </svg>
19 """ # noqa
20
21 EXPANDED_SVG_ICON = """
22 <svg style="stroke: var(--accent-fill-rest);" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="expanded-icon">
23 <path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
24 <path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
25 </svg>
26 """ # noqa
27
28 FONT_URL = "//fonts.googleapis.com/css?family=Open+Sans"
29
30 class FastStyle(param.Parameterized):
31 """
32 The FastStyle class provides the different colors and icons used
33 to style the Fast Templates.
34 """
35
36 background_color = param.String(default="#ffffff")
37 neutral_color = param.String(default="#000000")
38 accent_base_color = param.String(default="#A01346")
39 collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)
40 expanded_icon = param.String(default=EXPANDED_SVG_ICON)
41 color = param.String(default="#00aa41")
42 neutral_fill_card_rest = param.String(default="#F7F7F7")
43 neutral_focus = param.String(default="#888888")
44 neutral_foreground_rest = param.String(default="#2B2B2B")
45
46 header_background = param.String(default="#00aa41")
47 header_neutral_color = param.String(default="#ffffff")
48 header_accent_base_color = param.String(default="#A01346")
49 header_color = param.String(default="#ffffff")
50 font = param.String(default="Open Sans, sans-serif")
51 font_url = param.String(default=FONT_URL)
52 corner_radius = param.Integer(default=3)
53 shadow = param.Boolean(default=True)
54
55 def create_bokeh_theme(self):
56 """Returns a custom bokeh theme based on the style parameters
57
58 Returns:
59 Dict: A Bokeh Theme
60 """
61
62 return {
63 "attrs": {
64 "Figure": {
65 "background_fill_color": self.background_color,
66 "border_fill_color": self.neutral_fill_card_rest,
67 "border_fill_alpha": 0,
68 "outline_line_color": self.neutral_focus,
69 "outline_line_alpha": 0.5,
70 "outline_line_width": 1,
71 },
72 "Grid": {"grid_line_color": self.neutral_focus, "grid_line_alpha": 0.25},
73 "Axis": {
74 "major_tick_line_alpha": 0.5,
75 "major_tick_line_color": self.neutral_foreground_rest,
76 "minor_tick_line_alpha": 0.25,
77 "minor_tick_line_color": self.neutral_foreground_rest,
78 "axis_line_alpha": 0.1,
79 "axis_line_color": self.neutral_foreground_rest,
80 "major_label_text_color": self.neutral_foreground_rest,
81 "major_label_text_font": self.font,
82 # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed
83 # "major_label_text_font_size": "1.025em",
84 "axis_label_standoff": 10,
85 "axis_label_text_color": self.neutral_foreground_rest,
86 "axis_label_text_font": self.font,
87 "axis_label_text_font_size": "1.25em",
88 "axis_label_text_font_style": "normal",
89 },
90 "Legend": {
91 "spacing": 8,
92 "glyph_width": 15,
93 "label_standoff": 8,
94 "label_text_color": self.neutral_foreground_rest,
95 "label_text_font": self.font,
96 "label_text_font_size": "1.025em",
97 "border_line_alpha": 0.5,
98 "border_line_color": self.neutral_focus,
99 "background_fill_alpha": 0.25,
100 "background_fill_color": self.neutral_fill_card_rest,
101 },
102 "ColorBar": {
103 "title_text_color": self.neutral_foreground_rest,
104 "title_text_font": self.font,
105 "title_text_font_size": "1.025em",
106 "title_text_font_style": "normal",
107 "major_label_text_color": self.neutral_foreground_rest,
108 "major_label_text_font": self.font,
109 "major_label_text_font_size": "1.025em",
110 # "background_fill_color": FAST_DARK_75,
111 "major_tick_line_alpha": 0,
112 "bar_line_alpha": 0,
113 },
114 "Title": {
115 "text_color": self.neutral_foreground_rest,
116 "text_font": self.font,
117 "text_font_size": "1.15em",
118 },
119 }
120 }
121
122
123 DEFAULT_STYLE = FastStyle()
124 DARK_STYLE = FastStyle(
125 accent_base_color="#E1477E",
126 background_color="#181818",
127 color="#ffffff",
128 header_background="#313131",
129 header_color="#ffffff",
130 neutral_fill_card_rest="#212121",
131 neutral_focus="#717171",
132 neutral_foreground_rest="#e5e5e5",
133 shadow = False,
134 )
135
136 class FastDefaultTheme(DefaultTheme):
137
138 base_css = param.Filename(default=_ROOT / 'fast_root_default.css')
139
140 style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)
141
142 __abstract = True
143
144 @property
145 def bokeh_theme(self):
146 return _BkTheme(json=self.style.create_bokeh_theme())
147
148
149 class FastDarkTheme(DarkTheme):
150
151 base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')
152
153 style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)
154
155 __abstract = True
156
157 @property
158 def bokeh_theme(self):
159 return _BkTheme(json=self.style.create_bokeh_theme())
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/template/fast/theme.py b/panel/template/fast/theme.py
--- a/panel/template/fast/theme.py
+++ b/panel/template/fast/theme.py
@@ -45,7 +45,7 @@
header_background = param.String(default="#00aa41")
header_neutral_color = param.String(default="#ffffff")
- header_accent_base_color = param.String(default="#A01346")
+ header_accent_base_color = param.String(default="#ffffff")
header_color = param.String(default="#ffffff")
font = param.String(default="Open Sans, sans-serif")
font_url = param.String(default=FONT_URL)
| {"golden_diff": "diff --git a/panel/template/fast/theme.py b/panel/template/fast/theme.py\n--- a/panel/template/fast/theme.py\n+++ b/panel/template/fast/theme.py\n@@ -45,7 +45,7 @@\n \n header_background = param.String(default=\"#00aa41\")\n header_neutral_color = param.String(default=\"#ffffff\")\n- header_accent_base_color = param.String(default=\"#A01346\")\n+ header_accent_base_color = param.String(default=\"#ffffff\")\n header_color = param.String(default=\"#ffffff\")\n font = param.String(default=\"Open Sans, sans-serif\")\n font_url = param.String(default=FONT_URL)\n", "issue": "Change fast theme button color back to white\nAfter upgrading to Panel 0.12.0 the fast theme button switched to the red color\r\n\r\n\r\n\r\nI would propose switching it back to white because 1) It looks better 2) receives less attention 3) makes changing the style of the template easier.\r\n\r\nWith the white color it looks like\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nFunctionality for styling according to Fast.design\n\"\"\"\nimport pathlib\nimport param\n\nfrom bokeh.themes import Theme as _BkTheme\n\nfrom ..theme import DarkTheme, DefaultTheme\n\n_ROOT = pathlib.Path(__file__).parent / \"css\"\n\nCOLLAPSED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"collapsed-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M9 5.44446V12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nEXPANDED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"expanded-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nFONT_URL = \"//fonts.googleapis.com/css?family=Open+Sans\"\n\nclass FastStyle(param.Parameterized):\n \"\"\"\n The FastStyle class provides the different colors and icons used\n to style the Fast Templates.\n \"\"\"\n\n background_color = param.String(default=\"#ffffff\")\n neutral_color = param.String(default=\"#000000\")\n accent_base_color = param.String(default=\"#A01346\")\n collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)\n expanded_icon = param.String(default=EXPANDED_SVG_ICON)\n color = param.String(default=\"#00aa41\")\n neutral_fill_card_rest = param.String(default=\"#F7F7F7\")\n neutral_focus = param.String(default=\"#888888\")\n neutral_foreground_rest = param.String(default=\"#2B2B2B\")\n\n header_background = param.String(default=\"#00aa41\")\n header_neutral_color = param.String(default=\"#ffffff\")\n header_accent_base_color = param.String(default=\"#A01346\")\n header_color = param.String(default=\"#ffffff\")\n font = param.String(default=\"Open Sans, sans-serif\")\n font_url = param.String(default=FONT_URL)\n corner_radius = param.Integer(default=3)\n shadow = param.Boolean(default=True)\n\n def create_bokeh_theme(self):\n \"\"\"Returns a custom bokeh theme based on the style parameters\n\n Returns:\n Dict: A Bokeh Theme\n \"\"\"\n\n return {\n \"attrs\": {\n \"Figure\": {\n \"background_fill_color\": self.background_color,\n \"border_fill_color\": self.neutral_fill_card_rest,\n \"border_fill_alpha\": 0,\n \"outline_line_color\": self.neutral_focus,\n \"outline_line_alpha\": 0.5,\n \"outline_line_width\": 1,\n },\n \"Grid\": {\"grid_line_color\": self.neutral_focus, \"grid_line_alpha\": 0.25},\n \"Axis\": {\n \"major_tick_line_alpha\": 0.5,\n \"major_tick_line_color\": self.neutral_foreground_rest,\n \"minor_tick_line_alpha\": 0.25,\n \"minor_tick_line_color\": self.neutral_foreground_rest,\n \"axis_line_alpha\": 0.1,\n \"axis_line_color\": self.neutral_foreground_rest,\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed\n # \"major_label_text_font_size\": \"1.025em\",\n \"axis_label_standoff\": 10,\n \"axis_label_text_color\": self.neutral_foreground_rest,\n \"axis_label_text_font\": self.font,\n \"axis_label_text_font_size\": \"1.25em\",\n \"axis_label_text_font_style\": \"normal\",\n },\n \"Legend\": {\n \"spacing\": 8,\n \"glyph_width\": 15,\n \"label_standoff\": 8,\n \"label_text_color\": self.neutral_foreground_rest,\n \"label_text_font\": self.font,\n \"label_text_font_size\": \"1.025em\",\n \"border_line_alpha\": 0.5,\n \"border_line_color\": self.neutral_focus,\n \"background_fill_alpha\": 0.25,\n \"background_fill_color\": self.neutral_fill_card_rest,\n },\n \"ColorBar\": {\n \"title_text_color\": self.neutral_foreground_rest,\n \"title_text_font\": self.font,\n \"title_text_font_size\": \"1.025em\",\n \"title_text_font_style\": \"normal\",\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n \"major_label_text_font_size\": \"1.025em\",\n # \"background_fill_color\": FAST_DARK_75,\n \"major_tick_line_alpha\": 0,\n \"bar_line_alpha\": 0,\n },\n \"Title\": {\n \"text_color\": self.neutral_foreground_rest,\n \"text_font\": self.font,\n \"text_font_size\": \"1.15em\",\n },\n }\n }\n\n\nDEFAULT_STYLE = FastStyle()\nDARK_STYLE = FastStyle(\n accent_base_color=\"#E1477E\",\n background_color=\"#181818\",\n color=\"#ffffff\",\n header_background=\"#313131\",\n header_color=\"#ffffff\",\n neutral_fill_card_rest=\"#212121\",\n neutral_focus=\"#717171\",\n neutral_foreground_rest=\"#e5e5e5\",\n shadow = False,\n)\n\nclass FastDefaultTheme(DefaultTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_default.css')\n\n style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n\n\nclass FastDarkTheme(DarkTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')\n\n style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n", "path": "panel/template/fast/theme.py"}], "after_files": [{"content": "\"\"\"\nFunctionality for styling according to Fast.design\n\"\"\"\nimport pathlib\nimport param\n\nfrom bokeh.themes import Theme as _BkTheme\n\nfrom ..theme import DarkTheme, DefaultTheme\n\n_ROOT = pathlib.Path(__file__).parent / \"css\"\n\nCOLLAPSED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"collapsed-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M9 5.44446V12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nEXPANDED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"expanded-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nFONT_URL = \"//fonts.googleapis.com/css?family=Open+Sans\"\n\nclass FastStyle(param.Parameterized):\n \"\"\"\n The FastStyle class provides the different colors and icons used\n to style the Fast Templates.\n \"\"\"\n\n background_color = param.String(default=\"#ffffff\")\n neutral_color = param.String(default=\"#000000\")\n accent_base_color = param.String(default=\"#A01346\")\n collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)\n expanded_icon = param.String(default=EXPANDED_SVG_ICON)\n color = param.String(default=\"#00aa41\")\n neutral_fill_card_rest = param.String(default=\"#F7F7F7\")\n neutral_focus = param.String(default=\"#888888\")\n neutral_foreground_rest = param.String(default=\"#2B2B2B\")\n\n header_background = param.String(default=\"#00aa41\")\n header_neutral_color = param.String(default=\"#ffffff\")\n header_accent_base_color = param.String(default=\"#ffffff\")\n header_color = param.String(default=\"#ffffff\")\n font = param.String(default=\"Open Sans, sans-serif\")\n font_url = param.String(default=FONT_URL)\n corner_radius = param.Integer(default=3)\n shadow = param.Boolean(default=True)\n\n def create_bokeh_theme(self):\n \"\"\"Returns a custom bokeh theme based on the style parameters\n\n Returns:\n Dict: A Bokeh Theme\n \"\"\"\n\n return {\n \"attrs\": {\n \"Figure\": {\n \"background_fill_color\": self.background_color,\n \"border_fill_color\": self.neutral_fill_card_rest,\n \"border_fill_alpha\": 0,\n \"outline_line_color\": self.neutral_focus,\n \"outline_line_alpha\": 0.5,\n \"outline_line_width\": 1,\n },\n \"Grid\": {\"grid_line_color\": self.neutral_focus, \"grid_line_alpha\": 0.25},\n \"Axis\": {\n \"major_tick_line_alpha\": 0.5,\n \"major_tick_line_color\": self.neutral_foreground_rest,\n \"minor_tick_line_alpha\": 0.25,\n \"minor_tick_line_color\": self.neutral_foreground_rest,\n \"axis_line_alpha\": 0.1,\n \"axis_line_color\": self.neutral_foreground_rest,\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed\n # \"major_label_text_font_size\": \"1.025em\",\n \"axis_label_standoff\": 10,\n \"axis_label_text_color\": self.neutral_foreground_rest,\n \"axis_label_text_font\": self.font,\n \"axis_label_text_font_size\": \"1.25em\",\n \"axis_label_text_font_style\": \"normal\",\n },\n \"Legend\": {\n \"spacing\": 8,\n \"glyph_width\": 15,\n \"label_standoff\": 8,\n \"label_text_color\": self.neutral_foreground_rest,\n \"label_text_font\": self.font,\n \"label_text_font_size\": \"1.025em\",\n \"border_line_alpha\": 0.5,\n \"border_line_color\": self.neutral_focus,\n \"background_fill_alpha\": 0.25,\n \"background_fill_color\": self.neutral_fill_card_rest,\n },\n \"ColorBar\": {\n \"title_text_color\": self.neutral_foreground_rest,\n \"title_text_font\": self.font,\n \"title_text_font_size\": \"1.025em\",\n \"title_text_font_style\": \"normal\",\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n \"major_label_text_font_size\": \"1.025em\",\n # \"background_fill_color\": FAST_DARK_75,\n \"major_tick_line_alpha\": 0,\n \"bar_line_alpha\": 0,\n },\n \"Title\": {\n \"text_color\": self.neutral_foreground_rest,\n \"text_font\": self.font,\n \"text_font_size\": \"1.15em\",\n },\n }\n }\n\n\nDEFAULT_STYLE = FastStyle()\nDARK_STYLE = FastStyle(\n accent_base_color=\"#E1477E\",\n background_color=\"#181818\",\n color=\"#ffffff\",\n header_background=\"#313131\",\n header_color=\"#ffffff\",\n neutral_fill_card_rest=\"#212121\",\n neutral_focus=\"#717171\",\n neutral_foreground_rest=\"#e5e5e5\",\n shadow = False,\n)\n\nclass FastDefaultTheme(DefaultTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_default.css')\n\n style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n\n\nclass FastDarkTheme(DarkTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')\n\n style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n", "path": "panel/template/fast/theme.py"}]} | 2,704 | 142 |
gh_patches_debug_40056 | rasdani/github-patches | git_diff | arviz-devs__arviz-1074 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bokeh hover tooltip
We need to implement better default hover settings.
https://docs.bokeh.org/en/1.4.0/docs/reference/models/tools.html#bokeh.models.tools.HoverTool
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/backends/bokeh/pairplot.py`
Content:
```
1 """Bokeh pairplot."""
2 import warnings
3 from uuid import uuid4
4
5 import bokeh.plotting as bkp
6 from bokeh.models import ColumnDataSource, CDSView, GroupFilter
7 import numpy as np
8
9 from . import backend_kwarg_defaults
10 from .. import show_layout
11 from ...kdeplot import plot_kde
12 from ...plot_utils import _scale_fig_size
13 from ....rcparams import rcParams
14
15
16 def plot_pair(
17 ax,
18 infdata_group,
19 numvars,
20 figsize,
21 textsize,
22 kind,
23 plot_kwargs,
24 contour,
25 fill_last,
26 divergences,
27 diverging_mask,
28 flat_var_names,
29 backend_kwargs,
30 show,
31 ):
32 """Bokeh pair plot."""
33 if backend_kwargs is None:
34 backend_kwargs = {}
35
36 backend_kwargs = {
37 **backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
38 **backend_kwargs,
39 }
40 dpi = backend_kwargs.pop("dpi")
41 if numvars == 2:
42 (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 1, numvars - 1)
43
44 source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))
45
46 if divergences:
47 divergenve_name = "divergences_{}".format(str(uuid4()))
48 source_dict[divergenve_name] = (
49 np.array(diverging_mask).astype(bool).astype(int).astype(str)
50 )
51
52 source = ColumnDataSource(data=source_dict)
53
54 if divergences:
55 source_nondiv = CDSView(
56 source=source, filters=[GroupFilter(column_name=divergenve_name, group="0")]
57 )
58 source_div = CDSView(
59 source=source, filters=[GroupFilter(column_name=divergenve_name, group="1")]
60 )
61
62 if ax is None:
63 backend_kwargs["width"] = int(figsize[0] * dpi)
64 backend_kwargs["height"] = int(figsize[1] * dpi)
65 ax = bkp.figure(**backend_kwargs)
66
67 if kind == "scatter":
68 if divergences:
69 ax.circle(
70 flat_var_names[0],
71 flat_var_names[1],
72 source=source,
73 view=source_nondiv,
74 legend_label="non-divergent",
75 )
76 else:
77 ax.circle(flat_var_names[0], flat_var_names[1], source=source)
78 elif kind == "kde":
79 plot_kde(
80 infdata_group[0],
81 infdata_group[1],
82 contour=contour,
83 fill_last=fill_last,
84 ax=ax,
85 backend="bokeh",
86 backend_kwargs={},
87 show=False,
88 )
89 else:
90 ax.hexbin(infdata_group[0], infdata_group[1], size=0.5)
91 ax.grid.visible = False
92
93 if divergences:
94 ax.circle(
95 flat_var_names[0],
96 flat_var_names[1],
97 line_color="black",
98 fill_color="orange",
99 line_width=1,
100 size=6,
101 source=source,
102 view=source_div,
103 legend_label="divergent",
104 )
105 ax.legend.click_policy = "hide"
106
107 ax.xaxis.axis_label = flat_var_names[0]
108 ax.yaxis.axis_label = flat_var_names[1]
109
110 show_layout(ax, show)
111
112 else:
113 max_plots = (
114 numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"]
115 )
116 vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)
117 if vars_to_plot < numvars:
118 warnings.warn(
119 "rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
120 "of resulting pair plots with these variables, generating only a "
121 "{side}x{side} grid".format(max_plots=max_plots, side=vars_to_plot),
122 UserWarning,
123 )
124 numvars = vars_to_plot
125
126 (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 2, numvars - 2)
127
128 if ax is None:
129 ax = []
130 backend_kwargs.setdefault("width", int(figsize[0] / (numvars - 1) * dpi))
131 backend_kwargs.setdefault("height", int(figsize[1] / (numvars - 1) * dpi))
132 for row in range(numvars - 1):
133 row_ax = []
134 for col in range(numvars - 1):
135 if row < col:
136 row_ax.append(None)
137 else:
138 ax_ = bkp.figure(**backend_kwargs)
139 row_ax.append(ax_)
140 ax.append(row_ax)
141 ax = np.array(ax)
142
143 tmp_flat_var_names = None
144 if len(flat_var_names) == len(list(set(flat_var_names))):
145 source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))
146 else:
147 tmp_flat_var_names = ["{}__{}".format(name, str(uuid4())) for name in flat_var_names]
148 source_dict = dict(zip(tmp_flat_var_names, [list(post) for post in infdata_group]))
149 if divergences:
150 divergenve_name = "divergences_{}".format(str(uuid4()))
151 source_dict[divergenve_name] = (
152 np.array(diverging_mask).astype(bool).astype(int).astype(str)
153 )
154
155 source = ColumnDataSource(data=source_dict)
156
157 if divergences:
158 source_nondiv = CDSView(
159 source=source, filters=[GroupFilter(column_name=divergenve_name, group="0")]
160 )
161 source_div = CDSView(
162 source=source, filters=[GroupFilter(column_name=divergenve_name, group="1")]
163 )
164
165 for i in range(0, numvars - 1):
166 var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i]
167
168 for j in range(0, numvars - 1):
169 if j < i:
170 continue
171
172 var2 = (
173 flat_var_names[j + 1]
174 if tmp_flat_var_names is None
175 else tmp_flat_var_names[j + 1]
176 )
177
178 if kind == "scatter":
179 if divergences:
180 ax[j, i].circle(var1, var2, source=source, view=source_nondiv)
181 else:
182 ax[j, i].circle(var1, var2, source=source)
183
184 elif kind == "kde":
185 var1_kde = infdata_group[i]
186 var2_kde = infdata_group[j + 1]
187 plot_kde(
188 var1_kde,
189 var2_kde,
190 contour=contour,
191 fill_last=fill_last,
192 ax=ax[j, i],
193 backend="bokeh",
194 backend_kwargs={},
195 show=False,
196 **plot_kwargs
197 )
198
199 else:
200 var1_hexbin = infdata_group[i]
201 var2_hexbin = infdata_group[j + 1]
202 ax[j, i].grid.visible = False
203 ax[j, i].hexbin(var1_hexbin, var2_hexbin, size=0.5)
204
205 if divergences:
206 ax[j, i].circle(
207 var1,
208 var2,
209 line_color="black",
210 fill_color="orange",
211 line_width=1,
212 size=10,
213 source=source,
214 view=source_div,
215 )
216
217 ax[j, i].xaxis.axis_label = flat_var_names[i]
218 ax[j, i].yaxis.axis_label = flat_var_names[j + 1]
219
220 show_layout(ax, show)
221
222 return ax
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/plots/backends/bokeh/pairplot.py b/arviz/plots/backends/bokeh/pairplot.py
--- a/arviz/plots/backends/bokeh/pairplot.py
+++ b/arviz/plots/backends/bokeh/pairplot.py
@@ -39,6 +39,13 @@
}
dpi = backend_kwargs.pop("dpi")
if numvars == 2:
+ if kind == "scatter":
+ tooltips = [
+ (flat_var_names[1], "@{{{}}}".format(flat_var_names[1])),
+ (flat_var_names[0], "@{{{}}}".format(flat_var_names[0])),
+ ]
+ backend_kwargs.setdefault("tooltips", tooltips)
+
(figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 1, numvars - 1)
source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))
@@ -125,21 +132,6 @@
(figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 2, numvars - 2)
- if ax is None:
- ax = []
- backend_kwargs.setdefault("width", int(figsize[0] / (numvars - 1) * dpi))
- backend_kwargs.setdefault("height", int(figsize[1] / (numvars - 1) * dpi))
- for row in range(numvars - 1):
- row_ax = []
- for col in range(numvars - 1):
- if row < col:
- row_ax.append(None)
- else:
- ax_ = bkp.figure(**backend_kwargs)
- row_ax.append(ax_)
- ax.append(row_ax)
- ax = np.array(ax)
-
tmp_flat_var_names = None
if len(flat_var_names) == len(list(set(flat_var_names))):
source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))
@@ -162,6 +154,40 @@
source=source, filters=[GroupFilter(column_name=divergenve_name, group="1")]
)
+ if ax is None:
+ ax = []
+ backend_kwargs.setdefault("width", int(figsize[0] / (numvars - 1) * dpi))
+ backend_kwargs.setdefault("height", int(figsize[1] / (numvars - 1) * dpi))
+ for row in range(numvars - 1):
+ row_ax = []
+ var2 = (
+ flat_var_names[row + 1]
+ if tmp_flat_var_names is None
+ else tmp_flat_var_names[row + 1]
+ )
+ for col in range(numvars - 1):
+ if row < col:
+ row_ax.append(None)
+ continue
+
+ var1 = (
+ flat_var_names[col]
+ if tmp_flat_var_names is None
+ else tmp_flat_var_names[col]
+ )
+ backend_kwargs_copy = backend_kwargs.copy()
+ if kind == "scatter":
+ tooltips = [
+ (var2, "@{{{}}}".format(var2)),
+ (var1, "@{{{}}}".format(var1)),
+ ]
+ backend_kwargs_copy.setdefault("tooltips", tooltips)
+
+ ax_ = bkp.figure(**backend_kwargs_copy)
+ row_ax.append(ax_)
+ ax.append(row_ax)
+ ax = np.array(ax)
+
for i in range(0, numvars - 1):
var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i]
| {"golden_diff": "diff --git a/arviz/plots/backends/bokeh/pairplot.py b/arviz/plots/backends/bokeh/pairplot.py\n--- a/arviz/plots/backends/bokeh/pairplot.py\n+++ b/arviz/plots/backends/bokeh/pairplot.py\n@@ -39,6 +39,13 @@\n }\n dpi = backend_kwargs.pop(\"dpi\")\n if numvars == 2:\n+ if kind == \"scatter\":\n+ tooltips = [\n+ (flat_var_names[1], \"@{{{}}}\".format(flat_var_names[1])),\n+ (flat_var_names[0], \"@{{{}}}\".format(flat_var_names[0])),\n+ ]\n+ backend_kwargs.setdefault(\"tooltips\", tooltips)\n+\n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 1, numvars - 1)\n \n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n@@ -125,21 +132,6 @@\n \n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 2, numvars - 2)\n \n- if ax is None:\n- ax = []\n- backend_kwargs.setdefault(\"width\", int(figsize[0] / (numvars - 1) * dpi))\n- backend_kwargs.setdefault(\"height\", int(figsize[1] / (numvars - 1) * dpi))\n- for row in range(numvars - 1):\n- row_ax = []\n- for col in range(numvars - 1):\n- if row < col:\n- row_ax.append(None)\n- else:\n- ax_ = bkp.figure(**backend_kwargs)\n- row_ax.append(ax_)\n- ax.append(row_ax)\n- ax = np.array(ax)\n-\n tmp_flat_var_names = None\n if len(flat_var_names) == len(list(set(flat_var_names))):\n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n@@ -162,6 +154,40 @@\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"1\")]\n )\n \n+ if ax is None:\n+ ax = []\n+ backend_kwargs.setdefault(\"width\", int(figsize[0] / (numvars - 1) * dpi))\n+ backend_kwargs.setdefault(\"height\", int(figsize[1] / (numvars - 1) * dpi))\n+ for row in range(numvars - 1):\n+ row_ax = []\n+ var2 = (\n+ flat_var_names[row + 1]\n+ if tmp_flat_var_names is None\n+ else tmp_flat_var_names[row + 1]\n+ )\n+ for col in range(numvars - 1):\n+ if row < col:\n+ row_ax.append(None)\n+ continue\n+\n+ var1 = (\n+ flat_var_names[col]\n+ if tmp_flat_var_names is None\n+ else tmp_flat_var_names[col]\n+ )\n+ backend_kwargs_copy = backend_kwargs.copy()\n+ if kind == \"scatter\":\n+ tooltips = [\n+ (var2, \"@{{{}}}\".format(var2)),\n+ (var1, \"@{{{}}}\".format(var1)),\n+ ]\n+ backend_kwargs_copy.setdefault(\"tooltips\", tooltips)\n+\n+ ax_ = bkp.figure(**backend_kwargs_copy)\n+ row_ax.append(ax_)\n+ ax.append(row_ax)\n+ ax = np.array(ax)\n+\n for i in range(0, numvars - 1):\n var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i]\n", "issue": "Bokeh hover tooltip\nWe need to implement better default hover settings.\r\n\r\nhttps://docs.bokeh.org/en/1.4.0/docs/reference/models/tools.html#bokeh.models.tools.HoverTool\n", "before_files": [{"content": "\"\"\"Bokeh pairplot.\"\"\"\nimport warnings\nfrom uuid import uuid4\n\nimport bokeh.plotting as bkp\nfrom bokeh.models import ColumnDataSource, CDSView, GroupFilter\nimport numpy as np\n\nfrom . import backend_kwarg_defaults\nfrom .. import show_layout\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import _scale_fig_size\nfrom ....rcparams import rcParams\n\n\ndef plot_pair(\n ax,\n infdata_group,\n numvars,\n figsize,\n textsize,\n kind,\n plot_kwargs,\n contour,\n fill_last,\n divergences,\n diverging_mask,\n flat_var_names,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh pair plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults((\"dpi\", \"plot.bokeh.figure.dpi\"),),\n **backend_kwargs,\n }\n dpi = backend_kwargs.pop(\"dpi\")\n if numvars == 2:\n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 1, numvars - 1)\n\n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n\n if divergences:\n divergenve_name = \"divergences_{}\".format(str(uuid4()))\n source_dict[divergenve_name] = (\n np.array(diverging_mask).astype(bool).astype(int).astype(str)\n )\n\n source = ColumnDataSource(data=source_dict)\n\n if divergences:\n source_nondiv = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"0\")]\n )\n source_div = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"1\")]\n )\n\n if ax is None:\n backend_kwargs[\"width\"] = int(figsize[0] * dpi)\n backend_kwargs[\"height\"] = int(figsize[1] * dpi)\n ax = bkp.figure(**backend_kwargs)\n\n if kind == \"scatter\":\n if divergences:\n ax.circle(\n flat_var_names[0],\n flat_var_names[1],\n source=source,\n view=source_nondiv,\n legend_label=\"non-divergent\",\n )\n else:\n ax.circle(flat_var_names[0], flat_var_names[1], source=source)\n elif kind == \"kde\":\n plot_kde(\n infdata_group[0],\n infdata_group[1],\n contour=contour,\n fill_last=fill_last,\n ax=ax,\n backend=\"bokeh\",\n backend_kwargs={},\n show=False,\n )\n else:\n ax.hexbin(infdata_group[0], infdata_group[1], size=0.5)\n ax.grid.visible = False\n\n if divergences:\n ax.circle(\n flat_var_names[0],\n flat_var_names[1],\n line_color=\"black\",\n fill_color=\"orange\",\n line_width=1,\n size=6,\n source=source,\n view=source_div,\n legend_label=\"divergent\",\n )\n ax.legend.click_policy = \"hide\"\n\n ax.xaxis.axis_label = flat_var_names[0]\n ax.yaxis.axis_label = flat_var_names[1]\n\n show_layout(ax, show)\n\n else:\n max_plots = (\n numvars ** 2 if rcParams[\"plot.max_subplots\"] is None else rcParams[\"plot.max_subplots\"]\n )\n vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)\n if vars_to_plot < numvars:\n warnings.warn(\n \"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number \"\n \"of resulting pair plots with these variables, generating only a \"\n \"{side}x{side} grid\".format(max_plots=max_plots, side=vars_to_plot),\n UserWarning,\n )\n numvars = vars_to_plot\n\n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 2, numvars - 2)\n\n if ax is None:\n ax = []\n backend_kwargs.setdefault(\"width\", int(figsize[0] / (numvars - 1) * dpi))\n backend_kwargs.setdefault(\"height\", int(figsize[1] / (numvars - 1) * dpi))\n for row in range(numvars - 1):\n row_ax = []\n for col in range(numvars - 1):\n if row < col:\n row_ax.append(None)\n else:\n ax_ = bkp.figure(**backend_kwargs)\n row_ax.append(ax_)\n ax.append(row_ax)\n ax = np.array(ax)\n\n tmp_flat_var_names = None\n if len(flat_var_names) == len(list(set(flat_var_names))):\n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n else:\n tmp_flat_var_names = [\"{}__{}\".format(name, str(uuid4())) for name in flat_var_names]\n source_dict = dict(zip(tmp_flat_var_names, [list(post) for post in infdata_group]))\n if divergences:\n divergenve_name = \"divergences_{}\".format(str(uuid4()))\n source_dict[divergenve_name] = (\n np.array(diverging_mask).astype(bool).astype(int).astype(str)\n )\n\n source = ColumnDataSource(data=source_dict)\n\n if divergences:\n source_nondiv = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"0\")]\n )\n source_div = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"1\")]\n )\n\n for i in range(0, numvars - 1):\n var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i]\n\n for j in range(0, numvars - 1):\n if j < i:\n continue\n\n var2 = (\n flat_var_names[j + 1]\n if tmp_flat_var_names is None\n else tmp_flat_var_names[j + 1]\n )\n\n if kind == \"scatter\":\n if divergences:\n ax[j, i].circle(var1, var2, source=source, view=source_nondiv)\n else:\n ax[j, i].circle(var1, var2, source=source)\n\n elif kind == \"kde\":\n var1_kde = infdata_group[i]\n var2_kde = infdata_group[j + 1]\n plot_kde(\n var1_kde,\n var2_kde,\n contour=contour,\n fill_last=fill_last,\n ax=ax[j, i],\n backend=\"bokeh\",\n backend_kwargs={},\n show=False,\n **plot_kwargs\n )\n\n else:\n var1_hexbin = infdata_group[i]\n var2_hexbin = infdata_group[j + 1]\n ax[j, i].grid.visible = False\n ax[j, i].hexbin(var1_hexbin, var2_hexbin, size=0.5)\n\n if divergences:\n ax[j, i].circle(\n var1,\n var2,\n line_color=\"black\",\n fill_color=\"orange\",\n line_width=1,\n size=10,\n source=source,\n view=source_div,\n )\n\n ax[j, i].xaxis.axis_label = flat_var_names[i]\n ax[j, i].yaxis.axis_label = flat_var_names[j + 1]\n\n show_layout(ax, show)\n\n return ax\n", "path": "arviz/plots/backends/bokeh/pairplot.py"}], "after_files": [{"content": "\"\"\"Bokeh pairplot.\"\"\"\nimport warnings\nfrom uuid import uuid4\n\nimport bokeh.plotting as bkp\nfrom bokeh.models import ColumnDataSource, CDSView, GroupFilter\nimport numpy as np\n\nfrom . import backend_kwarg_defaults\nfrom .. import show_layout\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import _scale_fig_size\nfrom ....rcparams import rcParams\n\n\ndef plot_pair(\n ax,\n infdata_group,\n numvars,\n figsize,\n textsize,\n kind,\n plot_kwargs,\n contour,\n fill_last,\n divergences,\n diverging_mask,\n flat_var_names,\n backend_kwargs,\n show,\n):\n \"\"\"Bokeh pair plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults((\"dpi\", \"plot.bokeh.figure.dpi\"),),\n **backend_kwargs,\n }\n dpi = backend_kwargs.pop(\"dpi\")\n if numvars == 2:\n if kind == \"scatter\":\n tooltips = [\n (flat_var_names[1], \"@{{{}}}\".format(flat_var_names[1])),\n (flat_var_names[0], \"@{{{}}}\".format(flat_var_names[0])),\n ]\n backend_kwargs.setdefault(\"tooltips\", tooltips)\n\n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 1, numvars - 1)\n\n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n\n if divergences:\n divergenve_name = \"divergences_{}\".format(str(uuid4()))\n source_dict[divergenve_name] = (\n np.array(diverging_mask).astype(bool).astype(int).astype(str)\n )\n\n source = ColumnDataSource(data=source_dict)\n\n if divergences:\n source_nondiv = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"0\")]\n )\n source_div = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"1\")]\n )\n\n if ax is None:\n backend_kwargs[\"width\"] = int(figsize[0] * dpi)\n backend_kwargs[\"height\"] = int(figsize[1] * dpi)\n ax = bkp.figure(**backend_kwargs)\n\n if kind == \"scatter\":\n if divergences:\n ax.circle(\n flat_var_names[0],\n flat_var_names[1],\n source=source,\n view=source_nondiv,\n legend_label=\"non-divergent\",\n )\n else:\n ax.circle(flat_var_names[0], flat_var_names[1], source=source)\n elif kind == \"kde\":\n plot_kde(\n infdata_group[0],\n infdata_group[1],\n contour=contour,\n fill_last=fill_last,\n ax=ax,\n backend=\"bokeh\",\n backend_kwargs={},\n show=False,\n )\n else:\n ax.hexbin(infdata_group[0], infdata_group[1], size=0.5)\n ax.grid.visible = False\n\n if divergences:\n ax.circle(\n flat_var_names[0],\n flat_var_names[1],\n line_color=\"black\",\n fill_color=\"orange\",\n line_width=1,\n size=6,\n source=source,\n view=source_div,\n legend_label=\"divergent\",\n )\n ax.legend.click_policy = \"hide\"\n\n ax.xaxis.axis_label = flat_var_names[0]\n ax.yaxis.axis_label = flat_var_names[1]\n\n show_layout(ax, show)\n\n else:\n max_plots = (\n numvars ** 2 if rcParams[\"plot.max_subplots\"] is None else rcParams[\"plot.max_subplots\"]\n )\n vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)\n if vars_to_plot < numvars:\n warnings.warn(\n \"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number \"\n \"of resulting pair plots with these variables, generating only a \"\n \"{side}x{side} grid\".format(max_plots=max_plots, side=vars_to_plot),\n UserWarning,\n )\n numvars = vars_to_plot\n\n (figsize, _, _, _, _, _) = _scale_fig_size(figsize, textsize, numvars - 2, numvars - 2)\n\n tmp_flat_var_names = None\n if len(flat_var_names) == len(list(set(flat_var_names))):\n source_dict = dict(zip(flat_var_names, [list(post) for post in infdata_group]))\n else:\n tmp_flat_var_names = [\"{}__{}\".format(name, str(uuid4())) for name in flat_var_names]\n source_dict = dict(zip(tmp_flat_var_names, [list(post) for post in infdata_group]))\n if divergences:\n divergenve_name = \"divergences_{}\".format(str(uuid4()))\n source_dict[divergenve_name] = (\n np.array(diverging_mask).astype(bool).astype(int).astype(str)\n )\n\n source = ColumnDataSource(data=source_dict)\n\n if divergences:\n source_nondiv = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"0\")]\n )\n source_div = CDSView(\n source=source, filters=[GroupFilter(column_name=divergenve_name, group=\"1\")]\n )\n\n if ax is None:\n ax = []\n backend_kwargs.setdefault(\"width\", int(figsize[0] / (numvars - 1) * dpi))\n backend_kwargs.setdefault(\"height\", int(figsize[1] / (numvars - 1) * dpi))\n for row in range(numvars - 1):\n row_ax = []\n var2 = (\n flat_var_names[row + 1]\n if tmp_flat_var_names is None\n else tmp_flat_var_names[row + 1]\n )\n for col in range(numvars - 1):\n if row < col:\n row_ax.append(None)\n continue\n\n var1 = (\n flat_var_names[col]\n if tmp_flat_var_names is None\n else tmp_flat_var_names[col]\n )\n backend_kwargs_copy = backend_kwargs.copy()\n if kind == \"scatter\":\n tooltips = [\n (var2, \"@{{{}}}\".format(var2)),\n (var1, \"@{{{}}}\".format(var1)),\n ]\n backend_kwargs_copy.setdefault(\"tooltips\", tooltips)\n\n ax_ = bkp.figure(**backend_kwargs_copy)\n row_ax.append(ax_)\n ax.append(row_ax)\n ax = np.array(ax)\n\n for i in range(0, numvars - 1):\n var1 = flat_var_names[i] if tmp_flat_var_names is None else tmp_flat_var_names[i]\n\n for j in range(0, numvars - 1):\n if j < i:\n continue\n\n var2 = (\n flat_var_names[j + 1]\n if tmp_flat_var_names is None\n else tmp_flat_var_names[j + 1]\n )\n\n if kind == \"scatter\":\n if divergences:\n ax[j, i].circle(var1, var2, source=source, view=source_nondiv)\n else:\n ax[j, i].circle(var1, var2, source=source)\n\n elif kind == \"kde\":\n var1_kde = infdata_group[i]\n var2_kde = infdata_group[j + 1]\n plot_kde(\n var1_kde,\n var2_kde,\n contour=contour,\n fill_last=fill_last,\n ax=ax[j, i],\n backend=\"bokeh\",\n backend_kwargs={},\n show=False,\n **plot_kwargs\n )\n\n else:\n var1_hexbin = infdata_group[i]\n var2_hexbin = infdata_group[j + 1]\n ax[j, i].grid.visible = False\n ax[j, i].hexbin(var1_hexbin, var2_hexbin, size=0.5)\n\n if divergences:\n ax[j, i].circle(\n var1,\n var2,\n line_color=\"black\",\n fill_color=\"orange\",\n line_width=1,\n size=10,\n source=source,\n view=source_div,\n )\n\n ax[j, i].xaxis.axis_label = flat_var_names[i]\n ax[j, i].yaxis.axis_label = flat_var_names[j + 1]\n\n show_layout(ax, show)\n\n return ax\n", "path": "arviz/plots/backends/bokeh/pairplot.py"}]} | 2,565 | 827 |
gh_patches_debug_40783 | rasdani/github-patches | git_diff | iterative__dvc-4848 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc version: show external cache and remotes being used
This would have made my life easier when investigating https://github.com/iterative/dvc/pull/4570.
Another question to ask is that `dvc version` can only have a limited amount of information. Should there be `dvc version --json`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/tree/__init__.py`
Content:
```
1 import posixpath
2 from urllib.parse import urlparse
3
4 from .azure import AzureTree
5 from .gdrive import GDriveTree
6 from .gs import GSTree
7 from .hdfs import HDFSTree
8 from .http import HTTPTree
9 from .https import HTTPSTree
10 from .local import LocalTree
11 from .oss import OSSTree
12 from .s3 import S3Tree
13 from .ssh import SSHTree
14 from .webdav import WebDAVTree
15 from .webdavs import WebDAVSTree
16 from .webhdfs import WebHDFSTree
17
18 TREES = [
19 AzureTree,
20 GDriveTree,
21 GSTree,
22 HDFSTree,
23 HTTPTree,
24 HTTPSTree,
25 S3Tree,
26 SSHTree,
27 OSSTree,
28 WebDAVTree,
29 WebDAVSTree,
30 WebHDFSTree
31 # NOTE: LocalTree is the default
32 ]
33
34
35 def _get_tree(remote_conf):
36 for tree_cls in TREES:
37 if tree_cls.supported(remote_conf):
38 return tree_cls
39 return LocalTree
40
41
42 def _get_conf(repo, **kwargs):
43 name = kwargs.get("name")
44 if name:
45 remote_conf = repo.config["remote"][name.lower()]
46 else:
47 remote_conf = kwargs
48 return _resolve_remote_refs(repo, remote_conf)
49
50
51 def _resolve_remote_refs(repo, remote_conf):
52 # Support for cross referenced remotes.
53 # This will merge the settings, shadowing base ref with remote_conf.
54 # For example, having:
55 #
56 # dvc remote add server ssh://localhost
57 # dvc remote modify server user root
58 # dvc remote modify server ask_password true
59 #
60 # dvc remote add images remote://server/tmp/pictures
61 # dvc remote modify images user alice
62 # dvc remote modify images ask_password false
63 # dvc remote modify images password asdf1234
64 #
65 # Results on a config dictionary like:
66 #
67 # {
68 # "url": "ssh://localhost/tmp/pictures",
69 # "user": "alice",
70 # "password": "asdf1234",
71 # "ask_password": False,
72 # }
73 parsed = urlparse(remote_conf["url"])
74 if parsed.scheme != "remote":
75 return remote_conf
76
77 base = _get_conf(repo, name=parsed.netloc)
78 url = posixpath.join(base["url"], parsed.path.lstrip("/"))
79 return {**base, **remote_conf, "url": url}
80
81
82 def get_cloud_tree(repo, **kwargs):
83 from dvc.config import SCHEMA, ConfigError, Invalid
84
85 remote_conf = _get_conf(repo, **kwargs)
86 try:
87 remote_conf = SCHEMA["remote"][str](remote_conf)
88 except Invalid as exc:
89 raise ConfigError(str(exc)) from None
90 return _get_tree(remote_conf)(repo, remote_conf)
91
```
Path: `dvc/info.py`
Content:
```
1 import itertools
2 import os
3 import pathlib
4 import platform
5 import uuid
6
7 from dvc.exceptions import DvcException, NotDvcRepoError
8 from dvc.repo import Repo
9 from dvc.scm.base import SCMError
10 from dvc.system import System
11 from dvc.tree import TREES
12 from dvc.utils import error_link
13 from dvc.utils.pkg import PKG
14 from dvc.version import __version__
15
16 try:
17 import psutil
18 except ImportError:
19 psutil = None
20
21 if PKG is None:
22 package = ""
23 else:
24 package = f"({PKG})"
25
26
27 def get_dvc_info():
28 info = [
29 f"DVC version: {__version__} {package}",
30 "---------------------------------",
31 f"Platform: Python {platform.python_version()} on "
32 f"{platform.platform()}",
33 f"Supports: {_get_supported_remotes()}",
34 ]
35
36 try:
37 repo = Repo()
38
39 # cache_dir might not exist yet (e.g. after `dvc init`), and we
40 # can't auto-create it, as it might cause issues if the user
41 # later decides to enable shared cache mode with
42 # `dvc config cache.shared group`.
43 if os.path.exists(repo.cache.local.cache_dir):
44 info.append(
45 "Cache types: {}".format(_get_linktype_support_info(repo))
46 )
47 if psutil:
48 fs_type = get_fs_type(repo.cache.local.cache_dir)
49 info.append(f"Cache directory: {fs_type}")
50 else:
51 info.append("Cache types: " + error_link("no-dvc-cache"))
52
53 except NotDvcRepoError:
54 pass
55 except SCMError:
56 info.append("Repo: dvc, git (broken)")
57 else:
58 root_directory = repo.root_dir
59 if psutil:
60 fs_root = get_fs_type(os.path.abspath(root_directory))
61 info.append(f"Workspace directory: {fs_root}")
62 info.append("Repo: {}".format(_get_dvc_repo_info(repo)))
63 return "\n".join(info)
64
65
66 def _get_linktype_support_info(repo):
67
68 links = {
69 "reflink": (System.reflink, None),
70 "hardlink": (System.hardlink, System.is_hardlink),
71 "symlink": (System.symlink, System.is_symlink),
72 }
73
74 fname = "." + str(uuid.uuid4())
75 src = os.path.join(repo.cache.local.cache_dir, fname)
76 open(src, "w").close()
77 dst = os.path.join(repo.root_dir, fname)
78
79 cache = []
80
81 for name, (link, is_link) in links.items():
82 try:
83 link(src, dst)
84 status = "supported"
85 if is_link and not is_link(dst):
86 status = "broken"
87 os.unlink(dst)
88 except DvcException:
89 status = "not supported"
90
91 if status == "supported":
92 cache.append(name)
93 os.remove(src)
94
95 return ", ".join(cache)
96
97
98 def _get_supported_remotes():
99
100 supported_remotes = []
101 for tree_cls in TREES:
102 if not tree_cls.get_missing_deps():
103 supported_remotes.append(tree_cls.scheme)
104
105 if len(supported_remotes) == len(TREES):
106 return "All remotes"
107
108 if len(supported_remotes) == 1:
109 return supported_remotes
110
111 return ", ".join(supported_remotes)
112
113
114 def get_fs_type(path):
115
116 partition = {
117 pathlib.Path(part.mountpoint): (part.fstype + " on " + part.device)
118 for part in psutil.disk_partitions(all=True)
119 }
120
121 path = pathlib.Path(path)
122
123 for parent in itertools.chain([path], path.parents):
124 if parent in partition:
125 return partition[parent]
126 return ("unknown", "none")
127
128
129 def _get_dvc_repo_info(self):
130 if self.config.get("core", {}).get("no_scm", False):
131 return "dvc (no_scm)"
132
133 if self.root_dir != self.scm.root_dir:
134 return "dvc (subdir), git"
135
136 return "dvc, git"
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/info.py b/dvc/info.py
--- a/dvc/info.py
+++ b/dvc/info.py
@@ -8,7 +8,7 @@
from dvc.repo import Repo
from dvc.scm.base import SCMError
from dvc.system import System
-from dvc.tree import TREES
+from dvc.tree import TREES, get_tree_cls, get_tree_config
from dvc.utils import error_link
from dvc.utils.pkg import PKG
from dvc.version import __version__
@@ -50,6 +50,10 @@
else:
info.append("Cache types: " + error_link("no-dvc-cache"))
+ info.append(f"Caches: {_get_caches(repo.cache)}")
+
+ info.append(f"Remotes: {_get_remotes(repo.config)}")
+
except NotDvcRepoError:
pass
except SCMError:
@@ -63,6 +67,26 @@
return "\n".join(info)
+def _get_caches(cache):
+ caches = (
+ cache_type
+ for cache_type, cache_instance in cache.by_scheme()
+ if cache_instance
+ )
+
+ # Caches will be always non-empty including the local cache
+ return ", ".join(caches)
+
+
+def _get_remotes(config):
+ schemes = (
+ get_tree_cls(get_tree_config(config, name=remote)).scheme
+ for remote in config["remote"]
+ )
+
+ return ", ".join(schemes) or "None"
+
+
def _get_linktype_support_info(repo):
links = {
diff --git a/dvc/tree/__init__.py b/dvc/tree/__init__.py
--- a/dvc/tree/__init__.py
+++ b/dvc/tree/__init__.py
@@ -32,23 +32,23 @@
]
-def _get_tree(remote_conf):
+def get_tree_cls(remote_conf):
for tree_cls in TREES:
if tree_cls.supported(remote_conf):
return tree_cls
return LocalTree
-def _get_conf(repo, **kwargs):
+def get_tree_config(config, **kwargs):
name = kwargs.get("name")
if name:
- remote_conf = repo.config["remote"][name.lower()]
+ remote_conf = config["remote"][name.lower()]
else:
remote_conf = kwargs
- return _resolve_remote_refs(repo, remote_conf)
+ return _resolve_remote_refs(config, remote_conf)
-def _resolve_remote_refs(repo, remote_conf):
+def _resolve_remote_refs(config, remote_conf):
# Support for cross referenced remotes.
# This will merge the settings, shadowing base ref with remote_conf.
# For example, having:
@@ -74,7 +74,7 @@
if parsed.scheme != "remote":
return remote_conf
- base = _get_conf(repo, name=parsed.netloc)
+ base = get_tree_config(config, name=parsed.netloc)
url = posixpath.join(base["url"], parsed.path.lstrip("/"))
return {**base, **remote_conf, "url": url}
@@ -82,9 +82,9 @@
def get_cloud_tree(repo, **kwargs):
from dvc.config import SCHEMA, ConfigError, Invalid
- remote_conf = _get_conf(repo, **kwargs)
+ remote_conf = get_tree_config(repo.config, **kwargs)
try:
remote_conf = SCHEMA["remote"][str](remote_conf)
except Invalid as exc:
raise ConfigError(str(exc)) from None
- return _get_tree(remote_conf)(repo, remote_conf)
+ return get_tree_cls(remote_conf)(repo, remote_conf)
| {"golden_diff": "diff --git a/dvc/info.py b/dvc/info.py\n--- a/dvc/info.py\n+++ b/dvc/info.py\n@@ -8,7 +8,7 @@\n from dvc.repo import Repo\n from dvc.scm.base import SCMError\n from dvc.system import System\n-from dvc.tree import TREES\n+from dvc.tree import TREES, get_tree_cls, get_tree_config\n from dvc.utils import error_link\n from dvc.utils.pkg import PKG\n from dvc.version import __version__\n@@ -50,6 +50,10 @@\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n \n+ info.append(f\"Caches: {_get_caches(repo.cache)}\")\n+\n+ info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n+\n except NotDvcRepoError:\n pass\n except SCMError:\n@@ -63,6 +67,26 @@\n return \"\\n\".join(info)\n \n \n+def _get_caches(cache):\n+ caches = (\n+ cache_type\n+ for cache_type, cache_instance in cache.by_scheme()\n+ if cache_instance\n+ )\n+\n+ # Caches will be always non-empty including the local cache\n+ return \", \".join(caches)\n+\n+\n+def _get_remotes(config):\n+ schemes = (\n+ get_tree_cls(get_tree_config(config, name=remote)).scheme\n+ for remote in config[\"remote\"]\n+ )\n+\n+ return \", \".join(schemes) or \"None\"\n+\n+\n def _get_linktype_support_info(repo):\n \n links = {\ndiff --git a/dvc/tree/__init__.py b/dvc/tree/__init__.py\n--- a/dvc/tree/__init__.py\n+++ b/dvc/tree/__init__.py\n@@ -32,23 +32,23 @@\n ]\n \n \n-def _get_tree(remote_conf):\n+def get_tree_cls(remote_conf):\n for tree_cls in TREES:\n if tree_cls.supported(remote_conf):\n return tree_cls\n return LocalTree\n \n \n-def _get_conf(repo, **kwargs):\n+def get_tree_config(config, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n- remote_conf = repo.config[\"remote\"][name.lower()]\n+ remote_conf = config[\"remote\"][name.lower()]\n else:\n remote_conf = kwargs\n- return _resolve_remote_refs(repo, remote_conf)\n+ return _resolve_remote_refs(config, remote_conf)\n \n \n-def _resolve_remote_refs(repo, remote_conf):\n+def _resolve_remote_refs(config, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n@@ -74,7 +74,7 @@\n if parsed.scheme != \"remote\":\n return remote_conf\n \n- base = _get_conf(repo, name=parsed.netloc)\n+ base = get_tree_config(config, name=parsed.netloc)\n url = posixpath.join(base[\"url\"], parsed.path.lstrip(\"/\"))\n return {**base, **remote_conf, \"url\": url}\n \n@@ -82,9 +82,9 @@\n def get_cloud_tree(repo, **kwargs):\n from dvc.config import SCHEMA, ConfigError, Invalid\n \n- remote_conf = _get_conf(repo, **kwargs)\n+ remote_conf = get_tree_config(repo.config, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise ConfigError(str(exc)) from None\n- return _get_tree(remote_conf)(repo, remote_conf)\n+ return get_tree_cls(remote_conf)(repo, remote_conf)\n", "issue": "dvc version: show external cache and remotes being used\nThis would have made my life easier when investigating https://github.com/iterative/dvc/pull/4570.\r\n\r\nAnother question to ask is that `dvc version` can only have a limited amount of information. Should there be `dvc version --json`?\r\n\r\n\n", "before_files": [{"content": "import posixpath\nfrom urllib.parse import urlparse\n\nfrom .azure import AzureTree\nfrom .gdrive import GDriveTree\nfrom .gs import GSTree\nfrom .hdfs import HDFSTree\nfrom .http import HTTPTree\nfrom .https import HTTPSTree\nfrom .local import LocalTree\nfrom .oss import OSSTree\nfrom .s3 import S3Tree\nfrom .ssh import SSHTree\nfrom .webdav import WebDAVTree\nfrom .webdavs import WebDAVSTree\nfrom .webhdfs import WebHDFSTree\n\nTREES = [\n AzureTree,\n GDriveTree,\n GSTree,\n HDFSTree,\n HTTPTree,\n HTTPSTree,\n S3Tree,\n SSHTree,\n OSSTree,\n WebDAVTree,\n WebDAVSTree,\n WebHDFSTree\n # NOTE: LocalTree is the default\n]\n\n\ndef _get_tree(remote_conf):\n for tree_cls in TREES:\n if tree_cls.supported(remote_conf):\n return tree_cls\n return LocalTree\n\n\ndef _get_conf(repo, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n remote_conf = repo.config[\"remote\"][name.lower()]\n else:\n remote_conf = kwargs\n return _resolve_remote_refs(repo, remote_conf)\n\n\ndef _resolve_remote_refs(repo, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n #\n # dvc remote add server ssh://localhost\n # dvc remote modify server user root\n # dvc remote modify server ask_password true\n #\n # dvc remote add images remote://server/tmp/pictures\n # dvc remote modify images user alice\n # dvc remote modify images ask_password false\n # dvc remote modify images password asdf1234\n #\n # Results on a config dictionary like:\n #\n # {\n # \"url\": \"ssh://localhost/tmp/pictures\",\n # \"user\": \"alice\",\n # \"password\": \"asdf1234\",\n # \"ask_password\": False,\n # }\n parsed = urlparse(remote_conf[\"url\"])\n if parsed.scheme != \"remote\":\n return remote_conf\n\n base = _get_conf(repo, name=parsed.netloc)\n url = posixpath.join(base[\"url\"], parsed.path.lstrip(\"/\"))\n return {**base, **remote_conf, \"url\": url}\n\n\ndef get_cloud_tree(repo, **kwargs):\n from dvc.config import SCHEMA, ConfigError, Invalid\n\n remote_conf = _get_conf(repo, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise ConfigError(str(exc)) from None\n return _get_tree(remote_conf)(repo, remote_conf)\n", "path": "dvc/tree/__init__.py"}, {"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n path = pathlib.Path(path)\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n", "path": "dvc/info.py"}], "after_files": [{"content": "import posixpath\nfrom urllib.parse import urlparse\n\nfrom .azure import AzureTree\nfrom .gdrive import GDriveTree\nfrom .gs import GSTree\nfrom .hdfs import HDFSTree\nfrom .http import HTTPTree\nfrom .https import HTTPSTree\nfrom .local import LocalTree\nfrom .oss import OSSTree\nfrom .s3 import S3Tree\nfrom .ssh import SSHTree\nfrom .webdav import WebDAVTree\nfrom .webdavs import WebDAVSTree\nfrom .webhdfs import WebHDFSTree\n\nTREES = [\n AzureTree,\n GDriveTree,\n GSTree,\n HDFSTree,\n HTTPTree,\n HTTPSTree,\n S3Tree,\n SSHTree,\n OSSTree,\n WebDAVTree,\n WebDAVSTree,\n WebHDFSTree\n # NOTE: LocalTree is the default\n]\n\n\ndef get_tree_cls(remote_conf):\n for tree_cls in TREES:\n if tree_cls.supported(remote_conf):\n return tree_cls\n return LocalTree\n\n\ndef get_tree_config(config, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n remote_conf = config[\"remote\"][name.lower()]\n else:\n remote_conf = kwargs\n return _resolve_remote_refs(config, remote_conf)\n\n\ndef _resolve_remote_refs(config, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n #\n # dvc remote add server ssh://localhost\n # dvc remote modify server user root\n # dvc remote modify server ask_password true\n #\n # dvc remote add images remote://server/tmp/pictures\n # dvc remote modify images user alice\n # dvc remote modify images ask_password false\n # dvc remote modify images password asdf1234\n #\n # Results on a config dictionary like:\n #\n # {\n # \"url\": \"ssh://localhost/tmp/pictures\",\n # \"user\": \"alice\",\n # \"password\": \"asdf1234\",\n # \"ask_password\": False,\n # }\n parsed = urlparse(remote_conf[\"url\"])\n if parsed.scheme != \"remote\":\n return remote_conf\n\n base = get_tree_config(config, name=parsed.netloc)\n url = posixpath.join(base[\"url\"], parsed.path.lstrip(\"/\"))\n return {**base, **remote_conf, \"url\": url}\n\n\ndef get_cloud_tree(repo, **kwargs):\n from dvc.config import SCHEMA, ConfigError, Invalid\n\n remote_conf = get_tree_config(repo.config, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise ConfigError(str(exc)) from None\n return get_tree_cls(remote_conf)(repo, remote_conf)\n", "path": "dvc/tree/__init__.py"}, {"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES, get_tree_cls, get_tree_config\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n info.append(f\"Caches: {_get_caches(repo.cache)}\")\n\n info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_caches(cache):\n caches = (\n cache_type\n for cache_type, cache_instance in cache.by_scheme()\n if cache_instance\n )\n\n # Caches will be always non-empty including the local cache\n return \", \".join(caches)\n\n\ndef _get_remotes(config):\n schemes = (\n get_tree_cls(get_tree_config(config, name=remote)).scheme\n for remote in config[\"remote\"]\n )\n\n return \", \".join(schemes) or \"None\"\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n path = pathlib.Path(path)\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n", "path": "dvc/info.py"}]} | 2,368 | 809 |
gh_patches_debug_51335 | rasdani/github-patches | git_diff | beetbox__beets-1650 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plexupdate plugin crashed
Every time after import plexupdate plugin crashed with this error:
```
Traceback (most recent call last):
File "/usr/local/bin/beet", line 9, in <module>
load_entry_point('beets==1.3.15', 'console_scripts', 'beet')()
File "/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py", line 1163, in main
_raw_main(args)
File "/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py", line 1155, in _raw_main
plugins.send('cli_exit', lib=lib)
File "/usr/local/lib/python2.7/dist-packages/beets/plugins.py", line 458, in send
result = handler(**arguments)
File "/usr/local/lib/python2.7/dist-packages/beets/plugins.py", line 123, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 87, in update
config['plex']['library_name'].get())
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 41, in update_plex
section_key = get_music_section(host, port, token, library_name)
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 31, in get_music_section
tree = ET.fromstring(r.raw)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1300, in XML
parser.feed(text)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1640, in feed
self._parser.Parse(data, 0)
TypeError: must be string or read-only buffer, not HTTPResponse
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/plexupdate.py`
Content:
```
1 """Updates an Plex library whenever the beets library is changed.
2
3 Plex Home users enter the Plex Token to enable updating.
4 Put something like the following in your config.yaml to configure:
5 plex:
6 host: localhost
7 port: 32400
8 token: token
9 """
10 from __future__ import (division, absolute_import, print_function,
11 unicode_literals)
12
13 import requests
14 from urlparse import urljoin
15 from urllib import urlencode
16 import xml.etree.ElementTree as ET
17 from beets import config
18 from beets.plugins import BeetsPlugin
19
20
21 def get_music_section(host, port, token, library_name):
22 """Getting the section key for the music library in Plex.
23 """
24 api_endpoint = append_token('library/sections', token)
25 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
26
27 # Sends request.
28 r = requests.get(url)
29
30 # Parse xml tree and extract music section key.
31 tree = ET.fromstring(r.text)
32 for child in tree.findall('Directory'):
33 if child.get('title') == library_name:
34 return child.get('key')
35
36
37 def update_plex(host, port, token, library_name):
38 """Sends request to the Plex api to start a library refresh.
39 """
40 # Getting section key and build url.
41 section_key = get_music_section(host, port, token, library_name)
42 api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
43 api_endpoint = append_token(api_endpoint, token)
44 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
45
46 # Sends request and returns requests object.
47 r = requests.get(url)
48 return r
49
50
51 def append_token(url, token):
52 """Appends the Plex Home token to the api call if required.
53 """
54 if token:
55 url += '?' + urlencode({'X-Plex-Token': token})
56 return url
57
58
59 class PlexUpdate(BeetsPlugin):
60 def __init__(self):
61 super(PlexUpdate, self).__init__()
62
63 # Adding defaults.
64 config['plex'].add({
65 u'host': u'localhost',
66 u'port': 32400,
67 u'token': u'',
68 u'library_name': u'Music'})
69
70 self.register_listener('database_change', self.listen_for_db_change)
71
72 def listen_for_db_change(self, lib, model):
73 """Listens for beets db change and register the update for the end"""
74 self.register_listener('cli_exit', self.update)
75
76 def update(self, lib):
77 """When the client exists try to send refresh request to Plex server.
78 """
79 self._log.info('Updating Plex library...')
80
81 # Try to send update request.
82 try:
83 update_plex(
84 config['plex']['host'].get(),
85 config['plex']['port'].get(),
86 config['plex']['token'].get(),
87 config['plex']['library_name'].get())
88 self._log.info('... started.')
89
90 except requests.exceptions.RequestException:
91 self._log.warning('Update failed.')
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py
--- a/beetsplug/plexupdate.py
+++ b/beetsplug/plexupdate.py
@@ -28,7 +28,7 @@
r = requests.get(url)
# Parse xml tree and extract music section key.
- tree = ET.fromstring(r.text)
+ tree = ET.fromstring(r.content)
for child in tree.findall('Directory'):
if child.get('title') == library_name:
return child.get('key')
| {"golden_diff": "diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py\n--- a/beetsplug/plexupdate.py\n+++ b/beetsplug/plexupdate.py\n@@ -28,7 +28,7 @@\n r = requests.get(url)\n \n # Parse xml tree and extract music section key.\n- tree = ET.fromstring(r.text)\n+ tree = ET.fromstring(r.content)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n", "issue": "plexupdate plugin crashed\nEvery time after import plexupdate plugin crashed with this error:\n\n```\nTraceback (most recent call last):\n File \"/usr/local/bin/beet\", line 9, in <module>\n load_entry_point('beets==1.3.15', 'console_scripts', 'beet')()\n File \"/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py\", line 1163, in main\n _raw_main(args)\n File \"/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py\", line 1155, in _raw_main\n plugins.send('cli_exit', lib=lib)\n File \"/usr/local/lib/python2.7/dist-packages/beets/plugins.py\", line 458, in send\n result = handler(**arguments)\n File \"/usr/local/lib/python2.7/dist-packages/beets/plugins.py\", line 123, in wrapper\n return func(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 87, in update\n config['plex']['library_name'].get())\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 41, in update_plex\n section_key = get_music_section(host, port, token, library_name)\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 31, in get_music_section\n tree = ET.fromstring(r.raw)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1300, in XML\n parser.feed(text)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1640, in feed\n self._parser.Parse(data, 0)\nTypeError: must be string or read-only buffer, not HTTPResponse\n```\n\n", "before_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n\n\ndef update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u'',\n u'library_name': u'Music'})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get(),\n config['plex']['library_name'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}], "after_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.content)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n\n\ndef update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u'',\n u'library_name': u'Music'})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get(),\n config['plex']['library_name'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}]} | 1,528 | 118 |
gh_patches_debug_34669 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-125 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Raise ImportError when installing/importing with old versions of Python, Astropy, & NumPy
We decided that PlasmaPy will only be supported for:
- Python version > 3.6
- Astropy version > 2.0
- NumPy version > 1.13
However, when I try to run:
```ShellSession
python setup.py install
```
from the command line with Python 3.5 then I get a `SyntaxError` for syntax that is new in version 3.6.
When I try to run
```Python
import plasmapy
```
in Python 3.6 with Astropy 1.3.1, then I get an exception since one of the constants imported from Astropy was renamed in 2.0.
We should raise an appropriate exception (probably an `ImportError`) when we try to install or import PlasmaPy with any of the unsupported versions above. We should also have appropriate and useful error messages for any of these situations. The pull request to close this issue would involve editing `setup.py`, `requirements/base.txt`, and `plasmapy/__init__.py`.
Thank you!
Nick
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/__init__.py`
Content:
```
1 from ._metadata import (
2 name as __name__,
3 version as __version__,
4 description as __doc__,
5 author as __author__,
6 )
7
8 from .classes import Plasma
9 from . import classes
10 from . import constants
11 from . import atomic
12 from . import math
13 from . import physics
14 from . import utils
15
16 import sys
17 import warnings
18
19 if sys.version_info[:2] < (3, 6): # coveralls: ignore
20 warnings.warn("PlasmaPy does not support Python 3.5 and below")
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py
--- a/plasmapy/__init__.py
+++ b/plasmapy/__init__.py
@@ -5,16 +5,81 @@
author as __author__,
)
-from .classes import Plasma
-from . import classes
-from . import constants
-from . import atomic
-from . import math
-from . import physics
-from . import utils
-
import sys
import warnings
-if sys.version_info[:2] < (3, 6): # coveralls: ignore
+__minimum_python_version__ = '3.6'
+__minimum_numpy_version__ = '1.13.0'
+__minimum_astropy_version__ = '2.0.0'
+
+
+def _split_version(version):
+ return tuple(int(ver) for ver in version.split('.'))
+
+
+def _min_required_version(required, current): # coveralls: ignore
+ """ Return `True` if the current version meets the required minimum
+ version and `False` if not/ if not installed.
+
+ Right now `required` and `current` are just '.' separated strings
+ but it would be good to make this more general and accept modules.
+ """
+ return _split_version(current) >= _split_version(required)
+
+
+def _check_numpy_version(): # coveralls: ignore
+ """ Make sure numpy in installed and meets the minimum version requirements
+ """
+ required_version = False
+ np_ver = None
+
+ try:
+ from numpy import __version__ as np_ver
+ required_version = _min_required_version(__minimum_numpy_version__,
+ np_ver)
+ except ImportError:
+ pass
+
+ if not required_version:
+ ver_error = ("Numpy {} or above is required for PlasmaPy. The "
+ "currently installed version is {}"
+ ).format(__minimum_numpy_version__, np_ver)
+ raise ImportError(ver_error)
+
+
+def _check_astropy_version(): # coveralls: ignore
+ """ Make sure astropy in installed and meets the minimum version requirements
+ """
+ required_version = False
+ ap_ver = None
+
+ try:
+ from astropy import __version__ as ap_ver
+ required_version = _min_required_version(__minimum_astropy_version__,
+ ap_ver)
+ except ImportError:
+ pass
+
+ if not required_version:
+ ver_error = ("Astropy {} or above is required for PlasmaPy. The "
+ "currently installed version is {}"
+ ).format(__minimum_astropy_version__, ap_ver)
+ raise ImportError(ver_error)
+
+
+if (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore
warnings.warn("PlasmaPy does not support Python 3.5 and below")
+
+_check_numpy_version()
+_check_astropy_version()
+
+try:
+ from .classes import Plasma
+ from . import classes
+ from . import constants
+ from . import atomic
+ from . import math
+ from . import physics
+ from . import utils
+except Exception:
+ raise ImportError("Unable to load PlasmaPy subpackages.")
| {"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -5,16 +5,81 @@\n author as __author__,\n )\n \n-from .classes import Plasma\n-from . import classes\n-from . import constants\n-from . import atomic\n-from . import math\n-from . import physics\n-from . import utils\n-\n import sys\n import warnings\n \n-if sys.version_info[:2] < (3, 6): # coveralls: ignore\n+__minimum_python_version__ = '3.6'\n+__minimum_numpy_version__ = '1.13.0'\n+__minimum_astropy_version__ = '2.0.0'\n+\n+\n+def _split_version(version):\n+ return tuple(int(ver) for ver in version.split('.'))\n+\n+\n+def _min_required_version(required, current): # coveralls: ignore\n+ \"\"\" Return `True` if the current version meets the required minimum\n+ version and `False` if not/ if not installed.\n+\n+ Right now `required` and `current` are just '.' separated strings\n+ but it would be good to make this more general and accept modules.\n+ \"\"\"\n+ return _split_version(current) >= _split_version(required)\n+\n+\n+def _check_numpy_version(): # coveralls: ignore\n+ \"\"\" Make sure numpy in installed and meets the minimum version requirements\n+ \"\"\"\n+ required_version = False\n+ np_ver = None\n+\n+ try:\n+ from numpy import __version__ as np_ver\n+ required_version = _min_required_version(__minimum_numpy_version__,\n+ np_ver)\n+ except ImportError:\n+ pass\n+\n+ if not required_version:\n+ ver_error = (\"Numpy {} or above is required for PlasmaPy. The \"\n+ \"currently installed version is {}\"\n+ ).format(__minimum_numpy_version__, np_ver)\n+ raise ImportError(ver_error)\n+\n+\n+def _check_astropy_version(): # coveralls: ignore\n+ \"\"\" Make sure astropy in installed and meets the minimum version requirements\n+ \"\"\"\n+ required_version = False\n+ ap_ver = None\n+\n+ try:\n+ from astropy import __version__ as ap_ver\n+ required_version = _min_required_version(__minimum_astropy_version__,\n+ ap_ver)\n+ except ImportError:\n+ pass\n+\n+ if not required_version:\n+ ver_error = (\"Astropy {} or above is required for PlasmaPy. The \"\n+ \"currently installed version is {}\"\n+ ).format(__minimum_astropy_version__, ap_ver)\n+ raise ImportError(ver_error)\n+\n+\n+if (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n+\n+_check_numpy_version()\n+_check_astropy_version()\n+\n+try:\n+ from .classes import Plasma\n+ from . import classes\n+ from . import constants\n+ from . import atomic\n+ from . import math\n+ from . import physics\n+ from . import utils\n+except Exception:\n+ raise ImportError(\"Unable to load PlasmaPy subpackages.\")\n", "issue": "Raise ImportError when installing/importing with old versions of Python, Astropy, & NumPy\nWe decided that PlasmaPy will only be supported for:\r\n- Python version > 3.6\r\n- Astropy version > 2.0\r\n- NumPy version > 1.13\r\n\r\nHowever, when I try to run:\r\n```ShellSession\r\npython setup.py install\r\n```\r\nfrom the command line with Python 3.5 then I get a `SyntaxError` for syntax that is new in version 3.6.\r\n\r\nWhen I try to run\r\n```Python\r\nimport plasmapy\r\n```\r\nin Python 3.6 with Astropy 1.3.1, then I get an exception since one of the constants imported from Astropy was renamed in 2.0.\r\n\r\nWe should raise an appropriate exception (probably an `ImportError`) when we try to install or import PlasmaPy with any of the unsupported versions above. We should also have appropriate and useful error messages for any of these situations. The pull request to close this issue would involve editing `setup.py`, `requirements/base.txt`, and `plasmapy/__init__.py`.\r\n\r\nThank you!\r\nNick\n", "before_files": [{"content": "from ._metadata import (\n name as __name__,\n version as __version__,\n description as __doc__,\n author as __author__,\n)\n\nfrom .classes import Plasma\nfrom . import classes\nfrom . import constants\nfrom . import atomic\nfrom . import math\nfrom . import physics\nfrom . import utils\n\nimport sys\nimport warnings\n\nif sys.version_info[:2] < (3, 6): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n", "path": "plasmapy/__init__.py"}], "after_files": [{"content": "from ._metadata import (\n name as __name__,\n version as __version__,\n description as __doc__,\n author as __author__,\n)\n\nimport sys\nimport warnings\n\n__minimum_python_version__ = '3.6'\n__minimum_numpy_version__ = '1.13.0'\n__minimum_astropy_version__ = '2.0.0'\n\n\ndef _split_version(version):\n return tuple(int(ver) for ver in version.split('.'))\n\n\ndef _min_required_version(required, current): # coveralls: ignore\n \"\"\" Return `True` if the current version meets the required minimum\n version and `False` if not/ if not installed.\n\n Right now `required` and `current` are just '.' separated strings\n but it would be good to make this more general and accept modules.\n \"\"\"\n return _split_version(current) >= _split_version(required)\n\n\ndef _check_numpy_version(): # coveralls: ignore\n \"\"\" Make sure numpy in installed and meets the minimum version requirements\n \"\"\"\n required_version = False\n np_ver = None\n\n try:\n from numpy import __version__ as np_ver\n required_version = _min_required_version(__minimum_numpy_version__,\n np_ver)\n except ImportError:\n pass\n\n if not required_version:\n ver_error = (\"Numpy {} or above is required for PlasmaPy. The \"\n \"currently installed version is {}\"\n ).format(__minimum_numpy_version__, np_ver)\n raise ImportError(ver_error)\n\n\ndef _check_astropy_version(): # coveralls: ignore\n \"\"\" Make sure astropy in installed and meets the minimum version requirements\n \"\"\"\n required_version = False\n ap_ver = None\n\n try:\n from astropy import __version__ as ap_ver\n required_version = _min_required_version(__minimum_astropy_version__,\n ap_ver)\n except ImportError:\n pass\n\n if not required_version:\n ver_error = (\"Astropy {} or above is required for PlasmaPy. The \"\n \"currently installed version is {}\"\n ).format(__minimum_astropy_version__, ap_ver)\n raise ImportError(ver_error)\n\n\nif (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n\n_check_numpy_version()\n_check_astropy_version()\n\ntry:\n from .classes import Plasma\n from . import classes\n from . import constants\n from . import atomic\n from . import math\n from . import physics\n from . import utils\nexcept Exception:\n raise ImportError(\"Unable to load PlasmaPy subpackages.\")\n", "path": "plasmapy/__init__.py"}]} | 656 | 720 |
gh_patches_debug_36409 | rasdani/github-patches | git_diff | streamlit__streamlit-682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Streamlit 0.49.0 on Windows + Python 3.8 fails to execute (Tornado error)
# Summary
Streamlit fails to execute on Windows under Python 3.8 due to a bug in Tornado. The version of Tornado pinned in streamlit 0.49 was 5.x, while the latest version of Tornado at the time of this bug was 6.0.3. [A similar bug was found in IPython notebook.](https://stackoverflow.com/questions/58422817/jupyter-notebook-with-python-3-8-notimplementederror)
# Steps to reproduce
What are the steps we should take to reproduce the bug:
1. Setup Python 3.8 virtualenv on Windows
2. Install streamlit 0.49.0
3. streamlit hello
## Expected behavior:
Streamlit hello should run.
## Actual behavior:
Streamlit fails to execute, spitting out the following (tail end of traceback -- [see full traceback here](https://discuss.streamlit.io/t/streamlit-issue-when-calling-streamlit/724)):
```
self._handlers[sock.fileno()] = add_accept_handler(
File “c:\users\admin\appdata\local\programs\python\python38-32\lib\site-packages\tornado\netutil.py”, line 268, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File “c:\users\admin\appdata\local\programs\python\python38-32\lib\site-packages\tornado\platform\asyncio.py”, line 79, in add_handler
self.asyncio_loop.add_reader(
File “c:\users\admin\appdata\local\programs\python\python38-32\lib\asyncio\events.py”, line 501, in add_reader
raise NotImplementedError
NotImplementedError
```
## Is this a regression?
No; Python 3.8 hasn't been officially supported in Streamlit to date. (But it _should_ work.)
# Debug info
- Streamlit version: 0.49.0
- Python version: 3.8
- Using Conda? PipEnv? PyEnv? Pex? any
- OS version: Windows (probably any)
- Browser version: n/a
# Additional information
Using Python 3.7.5 is the recommended solution for now. See https://discuss.streamlit.io/t/streamlit-issue-when-calling-streamlit/724/4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/bootstrap.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2018-2019 Streamlit Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import os
17 import signal
18 import sys
19
20 import click
21 import tornado.ioloop
22
23 from streamlit import config
24 from streamlit import net_util
25 from streamlit import url_util
26 from streamlit import util
27 from streamlit.Report import Report
28 from streamlit.logger import get_logger
29 from streamlit.server.Server import Server
30
31 LOGGER = get_logger(__name__)
32
33
34 # Wait for 1 second before opening a browser. This gives old tabs a chance to
35 # reconnect.
36 # This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.
37 BROWSER_WAIT_TIMEOUT_SEC = 1
38
39
40 def _set_up_signal_handler():
41 LOGGER.debug("Setting up signal handler")
42
43 def signal_handler(signal_number, stack_frame):
44 # The server will shut down its threads and stop the ioloop
45 Server.get_current().stop()
46
47 signal.signal(signal.SIGTERM, signal_handler)
48 signal.signal(signal.SIGINT, signal_handler)
49 if sys.platform == "win32":
50 signal.signal(signal.SIGBREAK, signal_handler)
51 else:
52 signal.signal(signal.SIGQUIT, signal_handler)
53
54
55 def _fix_sys_path(script_path):
56 """Add the script's folder to the sys path.
57
58 Python normally does this automatically, but since we exec the script
59 ourselves we need to do it instead.
60 """
61 sys.path.insert(0, os.path.dirname(script_path))
62
63
64 def _fix_matplotlib_crash():
65 """Set Matplotlib backend to avoid a crash.
66
67 The default Matplotlib backend crashes Python on OSX when run on a thread
68 that's not the main thread, so here we set a safer backend as a fix.
69 Users can always disable this behavior by setting the config
70 runner.fixMatplotlib = false.
71
72 This fix is OS-independent. We didn't see a good reason to make this
73 Mac-only. Consistency within Streamlit seemed more important.
74 """
75 if config.get_option("runner.fixMatplotlib"):
76 try:
77 # TODO: a better option may be to set
78 # os.environ["MPLBACKEND"] = "Agg". We'd need to do this towards
79 # the top of __init__.py, before importing anything that imports
80 # pandas (which imports matplotlib). Alternately, we could set
81 # this environment variable in a new entrypoint defined in
82 # setup.py. Both of these introduce additional trickiness: they
83 # need to run without consulting streamlit.config.get_option,
84 # because this would import streamlit, and therefore matplotlib.
85 import matplotlib
86
87 matplotlib.use("Agg")
88 except ImportError:
89 pass
90
91
92 def _fix_sys_argv(script_path, args):
93 """sys.argv needs to exclude streamlit arguments and parameters
94 and be set to what a user's script may expect.
95 """
96 import sys
97
98 sys.argv = [script_path] + list(args)
99
100
101 def _on_server_start(server):
102 _print_url()
103
104 def maybe_open_browser():
105 if config.get_option("server.headless"):
106 # Don't open browser when in headless mode.
107 return
108
109 if server.browser_is_connected:
110 # Don't auto-open browser if there's already a browser connected.
111 # This can happen if there's an old tab repeatedly trying to
112 # connect, and it happens to success before we launch the browser.
113 return
114
115 if config.is_manually_set("browser.serverAddress"):
116 addr = config.get_option("browser.serverAddress")
117 else:
118 addr = "localhost"
119
120 util.open_browser(Report.get_url(addr))
121
122 # Schedule the browser to open using the IO Loop on the main thread, but
123 # only if no other browser connects within 1s.
124 ioloop = tornado.ioloop.IOLoop.current()
125 ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)
126
127
128 def _print_url():
129 title_message = "You can now view your Streamlit app in your browser."
130 named_urls = []
131
132 if config.is_manually_set("browser.serverAddress"):
133 named_urls = [
134 ("URL", Report.get_url(config.get_option("browser.serverAddress")))
135 ]
136
137 elif config.get_option("server.headless"):
138 named_urls = [
139 ("Network URL", Report.get_url(net_util.get_internal_ip())),
140 ("External URL", Report.get_url(net_util.get_external_ip())),
141 ]
142
143 else:
144 named_urls = [
145 ("Local URL", Report.get_url("localhost")),
146 ("Network URL", Report.get_url(net_util.get_internal_ip())),
147 ]
148
149 click.secho("")
150 click.secho(" %s" % title_message, fg="blue", bold=True)
151 click.secho("")
152
153 for url_name, url in named_urls:
154 url_util.print_url(url_name, url)
155
156 click.secho("")
157
158
159 def run(script_path, command_line, args):
160 """Run a script in a separate thread and start a server for the app.
161
162 This starts a blocking ioloop.
163
164 Parameters
165 ----------
166 script_path : str
167 command_line : str
168 args : [str]
169
170 """
171 _fix_sys_path(script_path)
172 _fix_matplotlib_crash()
173 _fix_sys_argv(script_path, args)
174
175 # Install a signal handler that will shut down the ioloop
176 # and close all our threads
177 _set_up_signal_handler()
178
179 ioloop = tornado.ioloop.IOLoop.current()
180
181 # Create and start the server.
182 server = Server(ioloop, script_path, command_line)
183 server.start(_on_server_start)
184
185 # (Must com after start(), because this starts a new thread and start() may
186 # call sys.exit() which doesn't kill other threads.
187 server.add_preheated_report_session()
188
189 # Start the ioloop. This function will not return until the
190 # server is shut down.
191 ioloop.start()
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/bootstrap.py b/lib/streamlit/bootstrap.py
--- a/lib/streamlit/bootstrap.py
+++ b/lib/streamlit/bootstrap.py
@@ -23,6 +23,7 @@
from streamlit import config
from streamlit import net_util
from streamlit import url_util
+from streamlit import env_util
from streamlit import util
from streamlit.Report import Report
from streamlit.logger import get_logger
@@ -30,7 +31,6 @@
LOGGER = get_logger(__name__)
-
# Wait for 1 second before opening a browser. This gives old tabs a chance to
# reconnect.
# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.
@@ -89,6 +89,43 @@
pass
+def _fix_tornado_crash():
+ """Set default asyncio policy to be compatible with Tornado 6.
+
+ Tornado 6 (at least) is not compatible with the default
+ asyncio implementation on Windows. So here we
+ pick the older SelectorEventLoopPolicy when the OS is Windows
+ if the known-incompatible default policy is in use.
+
+ This has to happen as early as possible to make it a low priority and
+ overrideable
+
+ See: https://github.com/tornadoweb/tornado/issues/2608
+
+ FIXME: if/when tornado supports the defaults in asyncio,
+ remove and bump tornado requirement for py38
+ """
+ if env_util.IS_WINDOWS and sys.version_info >= (3, 8):
+ import asyncio
+ try:
+ from asyncio import (
+ WindowsProactorEventLoopPolicy,
+ WindowsSelectorEventLoopPolicy,
+ )
+ except ImportError:
+ pass
+ # Not affected
+ else:
+ if (
+ type(asyncio.get_event_loop_policy()) is
+ WindowsProactorEventLoopPolicy
+ ):
+ # WindowsProactorEventLoopPolicy is not compatible with
+ # Tornado 6 fallback to the pre-3.8 default of Selector
+ asyncio.set_event_loop_policy(
+ WindowsSelectorEventLoopPolicy())
+
+
def _fix_sys_argv(script_path, args):
"""sys.argv needs to exclude streamlit arguments and parameters
and be set to what a user's script may expect.
@@ -170,6 +207,7 @@
"""
_fix_sys_path(script_path)
_fix_matplotlib_crash()
+ _fix_tornado_crash()
_fix_sys_argv(script_path, args)
# Install a signal handler that will shut down the ioloop
| {"golden_diff": "diff --git a/lib/streamlit/bootstrap.py b/lib/streamlit/bootstrap.py\n--- a/lib/streamlit/bootstrap.py\n+++ b/lib/streamlit/bootstrap.py\n@@ -23,6 +23,7 @@\n from streamlit import config\n from streamlit import net_util\n from streamlit import url_util\n+from streamlit import env_util\n from streamlit import util\n from streamlit.Report import Report\n from streamlit.logger import get_logger\n@@ -30,7 +31,6 @@\n \n LOGGER = get_logger(__name__)\n \n-\n # Wait for 1 second before opening a browser. This gives old tabs a chance to\n # reconnect.\n # This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\n@@ -89,6 +89,43 @@\n pass\n \n \n+def _fix_tornado_crash():\n+ \"\"\"Set default asyncio policy to be compatible with Tornado 6.\n+\n+ Tornado 6 (at least) is not compatible with the default\n+ asyncio implementation on Windows. So here we\n+ pick the older SelectorEventLoopPolicy when the OS is Windows\n+ if the known-incompatible default policy is in use.\n+\n+ This has to happen as early as possible to make it a low priority and\n+ overrideable\n+\n+ See: https://github.com/tornadoweb/tornado/issues/2608\n+\n+ FIXME: if/when tornado supports the defaults in asyncio,\n+ remove and bump tornado requirement for py38\n+ \"\"\"\n+ if env_util.IS_WINDOWS and sys.version_info >= (3, 8):\n+ import asyncio\n+ try:\n+ from asyncio import (\n+ WindowsProactorEventLoopPolicy,\n+ WindowsSelectorEventLoopPolicy,\n+ )\n+ except ImportError:\n+ pass\n+ # Not affected\n+ else:\n+ if (\n+ type(asyncio.get_event_loop_policy()) is\n+ WindowsProactorEventLoopPolicy\n+ ):\n+ # WindowsProactorEventLoopPolicy is not compatible with\n+ # Tornado 6 fallback to the pre-3.8 default of Selector\n+ asyncio.set_event_loop_policy(\n+ WindowsSelectorEventLoopPolicy())\n+\n+\n def _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n@@ -170,6 +207,7 @@\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n+ _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n \n # Install a signal handler that will shut down the ioloop\n", "issue": "Streamlit 0.49.0 on Windows + Python 3.8 fails to execute (Tornado error)\n# Summary\r\nStreamlit fails to execute on Windows under Python 3.8 due to a bug in Tornado. The version of Tornado pinned in streamlit 0.49 was 5.x, while the latest version of Tornado at the time of this bug was 6.0.3. [A similar bug was found in IPython notebook.](https://stackoverflow.com/questions/58422817/jupyter-notebook-with-python-3-8-notimplementederror)\r\n\r\n# Steps to reproduce\r\nWhat are the steps we should take to reproduce the bug:\r\n1. Setup Python 3.8 virtualenv on Windows\r\n2. Install streamlit 0.49.0\r\n3. streamlit hello\r\n\r\n## Expected behavior:\r\nStreamlit hello should run.\r\n\r\n## Actual behavior:\r\nStreamlit fails to execute, spitting out the following (tail end of traceback -- [see full traceback here](https://discuss.streamlit.io/t/streamlit-issue-when-calling-streamlit/724)):\r\n\r\n```\r\nself._handlers[sock.fileno()] = add_accept_handler(\r\nFile \u201cc:\\users\\admin\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\tornado\\netutil.py\u201d, line 268, in add_accept_handler\r\nio_loop.add_handler(sock, accept_handler, IOLoop.READ)\r\nFile \u201cc:\\users\\admin\\appdata\\local\\programs\\python\\python38-32\\lib\\site-packages\\tornado\\platform\\asyncio.py\u201d, line 79, in add_handler\r\nself.asyncio_loop.add_reader(\r\nFile \u201cc:\\users\\admin\\appdata\\local\\programs\\python\\python38-32\\lib\\asyncio\\events.py\u201d, line 501, in add_reader\r\nraise NotImplementedError\r\nNotImplementedError\r\n```\r\n\r\n## Is this a regression?\r\n\r\nNo; Python 3.8 hasn't been officially supported in Streamlit to date. (But it _should_ work.)\r\n\r\n# Debug info\r\n- Streamlit version: 0.49.0\r\n- Python version: 3.8\r\n- Using Conda? PipEnv? PyEnv? Pex? any\r\n- OS version: Windows (probably any)\r\n- Browser version: n/a\r\n\r\n# Additional information\r\n\r\nUsing Python 3.7.5 is the recommended solution for now. See https://discuss.streamlit.io/t/streamlit-issue-when-calling-streamlit/724/4\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2019 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport signal\nimport sys\n\nimport click\nimport tornado.ioloop\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import url_util\nfrom streamlit import util\nfrom streamlit.Report import Report\nfrom streamlit.logger import get_logger\nfrom streamlit.server.Server import Server\n\nLOGGER = get_logger(__name__)\n\n\n# Wait for 1 second before opening a browser. This gives old tabs a chance to\n# reconnect.\n# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\nBROWSER_WAIT_TIMEOUT_SEC = 1\n\n\ndef _set_up_signal_handler():\n LOGGER.debug(\"Setting up signal handler\")\n\n def signal_handler(signal_number, stack_frame):\n # The server will shut down its threads and stop the ioloop\n Server.get_current().stop()\n\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n if sys.platform == \"win32\":\n signal.signal(signal.SIGBREAK, signal_handler)\n else:\n signal.signal(signal.SIGQUIT, signal_handler)\n\n\ndef _fix_sys_path(script_path):\n \"\"\"Add the script's folder to the sys path.\n\n Python normally does this automatically, but since we exec the script\n ourselves we need to do it instead.\n \"\"\"\n sys.path.insert(0, os.path.dirname(script_path))\n\n\ndef _fix_matplotlib_crash():\n \"\"\"Set Matplotlib backend to avoid a crash.\n\n The default Matplotlib backend crashes Python on OSX when run on a thread\n that's not the main thread, so here we set a safer backend as a fix.\n Users can always disable this behavior by setting the config\n runner.fixMatplotlib = false.\n\n This fix is OS-independent. We didn't see a good reason to make this\n Mac-only. Consistency within Streamlit seemed more important.\n \"\"\"\n if config.get_option(\"runner.fixMatplotlib\"):\n try:\n # TODO: a better option may be to set\n # os.environ[\"MPLBACKEND\"] = \"Agg\". We'd need to do this towards\n # the top of __init__.py, before importing anything that imports\n # pandas (which imports matplotlib). Alternately, we could set\n # this environment variable in a new entrypoint defined in\n # setup.py. Both of these introduce additional trickiness: they\n # need to run without consulting streamlit.config.get_option,\n # because this would import streamlit, and therefore matplotlib.\n import matplotlib\n\n matplotlib.use(\"Agg\")\n except ImportError:\n pass\n\n\ndef _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n \"\"\"\n import sys\n\n sys.argv = [script_path] + list(args)\n\n\ndef _on_server_start(server):\n _print_url()\n\n def maybe_open_browser():\n if config.get_option(\"server.headless\"):\n # Don't open browser when in headless mode.\n return\n\n if server.browser_is_connected:\n # Don't auto-open browser if there's already a browser connected.\n # This can happen if there's an old tab repeatedly trying to\n # connect, and it happens to success before we launch the browser.\n return\n\n if config.is_manually_set(\"browser.serverAddress\"):\n addr = config.get_option(\"browser.serverAddress\")\n else:\n addr = \"localhost\"\n\n util.open_browser(Report.get_url(addr))\n\n # Schedule the browser to open using the IO Loop on the main thread, but\n # only if no other browser connects within 1s.\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n\n\ndef _print_url():\n title_message = \"You can now view your Streamlit app in your browser.\"\n named_urls = []\n\n if config.is_manually_set(\"browser.serverAddress\"):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"browser.serverAddress\")))\n ]\n\n elif config.get_option(\"server.headless\"):\n named_urls = [\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n (\"External URL\", Report.get_url(net_util.get_external_ip())),\n ]\n\n else:\n named_urls = [\n (\"Local URL\", Report.get_url(\"localhost\")),\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n ]\n\n click.secho(\"\")\n click.secho(\" %s\" % title_message, fg=\"blue\", bold=True)\n click.secho(\"\")\n\n for url_name, url in named_urls:\n url_util.print_url(url_name, url)\n\n click.secho(\"\")\n\n\ndef run(script_path, command_line, args):\n \"\"\"Run a script in a separate thread and start a server for the app.\n\n This starts a blocking ioloop.\n\n Parameters\n ----------\n script_path : str\n command_line : str\n args : [str]\n\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n _fix_sys_argv(script_path, args)\n\n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n _set_up_signal_handler()\n\n ioloop = tornado.ioloop.IOLoop.current()\n\n # Create and start the server.\n server = Server(ioloop, script_path, command_line)\n server.start(_on_server_start)\n\n # (Must com after start(), because this starts a new thread and start() may\n # call sys.exit() which doesn't kill other threads.\n server.add_preheated_report_session()\n\n # Start the ioloop. This function will not return until the\n # server is shut down.\n ioloop.start()\n", "path": "lib/streamlit/bootstrap.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2019 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport signal\nimport sys\n\nimport click\nimport tornado.ioloop\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import url_util\nfrom streamlit import env_util\nfrom streamlit import util\nfrom streamlit.Report import Report\nfrom streamlit.logger import get_logger\nfrom streamlit.server.Server import Server\n\nLOGGER = get_logger(__name__)\n\n# Wait for 1 second before opening a browser. This gives old tabs a chance to\n# reconnect.\n# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\nBROWSER_WAIT_TIMEOUT_SEC = 1\n\n\ndef _set_up_signal_handler():\n LOGGER.debug(\"Setting up signal handler\")\n\n def signal_handler(signal_number, stack_frame):\n # The server will shut down its threads and stop the ioloop\n Server.get_current().stop()\n\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n if sys.platform == \"win32\":\n signal.signal(signal.SIGBREAK, signal_handler)\n else:\n signal.signal(signal.SIGQUIT, signal_handler)\n\n\ndef _fix_sys_path(script_path):\n \"\"\"Add the script's folder to the sys path.\n\n Python normally does this automatically, but since we exec the script\n ourselves we need to do it instead.\n \"\"\"\n sys.path.insert(0, os.path.dirname(script_path))\n\n\ndef _fix_matplotlib_crash():\n \"\"\"Set Matplotlib backend to avoid a crash.\n\n The default Matplotlib backend crashes Python on OSX when run on a thread\n that's not the main thread, so here we set a safer backend as a fix.\n Users can always disable this behavior by setting the config\n runner.fixMatplotlib = false.\n\n This fix is OS-independent. We didn't see a good reason to make this\n Mac-only. Consistency within Streamlit seemed more important.\n \"\"\"\n if config.get_option(\"runner.fixMatplotlib\"):\n try:\n # TODO: a better option may be to set\n # os.environ[\"MPLBACKEND\"] = \"Agg\". We'd need to do this towards\n # the top of __init__.py, before importing anything that imports\n # pandas (which imports matplotlib). Alternately, we could set\n # this environment variable in a new entrypoint defined in\n # setup.py. Both of these introduce additional trickiness: they\n # need to run without consulting streamlit.config.get_option,\n # because this would import streamlit, and therefore matplotlib.\n import matplotlib\n\n matplotlib.use(\"Agg\")\n except ImportError:\n pass\n\n\ndef _fix_tornado_crash():\n \"\"\"Set default asyncio policy to be compatible with Tornado 6.\n\n Tornado 6 (at least) is not compatible with the default\n asyncio implementation on Windows. So here we\n pick the older SelectorEventLoopPolicy when the OS is Windows\n if the known-incompatible default policy is in use.\n\n This has to happen as early as possible to make it a low priority and\n overrideable\n\n See: https://github.com/tornadoweb/tornado/issues/2608\n\n FIXME: if/when tornado supports the defaults in asyncio,\n remove and bump tornado requirement for py38\n \"\"\"\n if env_util.IS_WINDOWS and sys.version_info >= (3, 8):\n import asyncio\n try:\n from asyncio import (\n WindowsProactorEventLoopPolicy,\n WindowsSelectorEventLoopPolicy,\n )\n except ImportError:\n pass\n # Not affected\n else:\n if (\n type(asyncio.get_event_loop_policy()) is\n WindowsProactorEventLoopPolicy\n ):\n # WindowsProactorEventLoopPolicy is not compatible with\n # Tornado 6 fallback to the pre-3.8 default of Selector\n asyncio.set_event_loop_policy(\n WindowsSelectorEventLoopPolicy())\n\n\ndef _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n \"\"\"\n import sys\n\n sys.argv = [script_path] + list(args)\n\n\ndef _on_server_start(server):\n _print_url()\n\n def maybe_open_browser():\n if config.get_option(\"server.headless\"):\n # Don't open browser when in headless mode.\n return\n\n if server.browser_is_connected:\n # Don't auto-open browser if there's already a browser connected.\n # This can happen if there's an old tab repeatedly trying to\n # connect, and it happens to success before we launch the browser.\n return\n\n if config.is_manually_set(\"browser.serverAddress\"):\n addr = config.get_option(\"browser.serverAddress\")\n else:\n addr = \"localhost\"\n\n util.open_browser(Report.get_url(addr))\n\n # Schedule the browser to open using the IO Loop on the main thread, but\n # only if no other browser connects within 1s.\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n\n\ndef _print_url():\n title_message = \"You can now view your Streamlit app in your browser.\"\n named_urls = []\n\n if config.is_manually_set(\"browser.serverAddress\"):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"browser.serverAddress\")))\n ]\n\n elif config.get_option(\"server.headless\"):\n named_urls = [\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n (\"External URL\", Report.get_url(net_util.get_external_ip())),\n ]\n\n else:\n named_urls = [\n (\"Local URL\", Report.get_url(\"localhost\")),\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n ]\n\n click.secho(\"\")\n click.secho(\" %s\" % title_message, fg=\"blue\", bold=True)\n click.secho(\"\")\n\n for url_name, url in named_urls:\n url_util.print_url(url_name, url)\n\n click.secho(\"\")\n\n\ndef run(script_path, command_line, args):\n \"\"\"Run a script in a separate thread and start a server for the app.\n\n This starts a blocking ioloop.\n\n Parameters\n ----------\n script_path : str\n command_line : str\n args : [str]\n\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n\n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n _set_up_signal_handler()\n\n ioloop = tornado.ioloop.IOLoop.current()\n\n # Create and start the server.\n server = Server(ioloop, script_path, command_line)\n server.start(_on_server_start)\n\n # (Must com after start(), because this starts a new thread and start() may\n # call sys.exit() which doesn't kill other threads.\n server.add_preheated_report_session()\n\n # Start the ioloop. This function will not return until the\n # server is shut down.\n ioloop.start()\n", "path": "lib/streamlit/bootstrap.py"}]} | 2,706 | 584 |
gh_patches_debug_60412 | rasdani/github-patches | git_diff | coala__coala-4215 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support neovim (`nvim`) as an editor
```
[WARNING][14:15:54] The editor "nvim" is unknown to coala. Files won't be opened at the correct positions and other quirks might occur. Consider opening an issue at https://github.com/coala/coala/issues so we can add support for this editor. Supported editors are: atom, emacs, emacsclient, geany, gedit, gvim, kate, nano, subl, vim, xed
```
It's basically the same as `vim` so it could be just added to the allowed editors list and given the same behavior as `vim`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/results/result_actions/OpenEditorAction.py`
Content:
```
1 import logging
2 import shlex
3 import subprocess
4 from os.path import exists
5 from os import environ
6
7 from coalib.results.Diff import Diff
8 from coalib.results.Result import Result
9 from coalib.results.result_actions.ResultAction import ResultAction
10 from coala_utils.decorators import enforce_signature
11
12
13 """
14 Data about all text editors coala knows about. New editors
15 can just be added here.
16 For each editor the following info is stored:
17 {
18 <name/comand>: {
19 "file_arg_template":
20 A string used to generate arguments to open a file.
21 Must at least have the placeholder 'filename'
22 and can optionally use 'line' and 'column'
23 to open the file at the correct position.
24 Some editors don't support opening files at
25 a certain position if multiple files are
26 to be opened, but we try to do so anyway.
27 "args":
28 General arguments added to the call, e.g. to
29 force opening of a new window.
30 "gui":
31 Boolean. True if this is a gui editor.
32 Optional, defaults to False.
33 }
34 }
35 """
36 KNOWN_EDITORS = {
37 # non-gui editors
38 'vim': {
39 'file_arg_template': '{filename} +{line}',
40 'gui': False
41 },
42 'nano': {
43 'file_arg_template': '+{line},{column} {filename} ',
44 'gui': False
45 },
46 'emacs': {
47 'file_arg_template': '+{line}:{column} {filename}',
48 'gui': False
49 },
50 'emacsclient': {
51 'file_arg_template': '+{line}:{column} {filename}',
52 'gui': False
53 },
54
55 # gui editors
56 'atom': {
57 'file_arg_template': '{filename}:{line}:{column}',
58 'args': '--wait',
59 'gui': True
60 },
61 'geany': {
62 'file_arg_template': '{filename} -l {line} --column {column}',
63 'args': '-s -i',
64 'gui': True
65 },
66 'gedit': {
67 'file_arg_template': '{filename} +{line}',
68 'args': '-s',
69 'gui': True
70 },
71 'gvim': {
72 'file_arg_template': '{filename} +{line}',
73 'gui': True
74 },
75 'kate': {
76 'file_arg_template': '{filename} -l {line} -c {column}',
77 'args': '--new',
78 'gui': True
79 },
80 'subl': {
81 'file_arg_template': '{filename}:{line}:{column}',
82 'args': '--wait',
83 'gui': True
84 },
85 'xed': {
86 'file_arg_template': '{filename} +{line}',
87 'args': '--new-window',
88 'gui': True
89 },
90 }
91
92
93 class OpenEditorAction(ResultAction):
94
95 SUCCESS_MESSAGE = 'Changes saved successfully.'
96
97 @staticmethod
98 @enforce_signature
99 def is_applicable(result: Result, original_file_dict, file_diff_dict):
100 """
101 For being applicable, the result has to point to a number of files
102 that have to exist i.e. have not been previously deleted.
103 """
104
105 if not len(result.affected_code) > 0:
106 return 'The result is not associated with any source code.'
107
108 filenames = set(src.renamed_file(file_diff_dict)
109 for src in result.affected_code)
110 if not all(exists(filename) for filename in filenames):
111 return ("The result is associated with source code that doesn't "
112 'seem to exist.')
113 return True
114
115 def build_editor_call_args(self, editor, editor_info, filenames):
116 """
117 Create argument list which will then be used to open an editor for
118 the given files at the correct positions, if applicable.
119
120 :param editor:
121 The editor to open the file with.
122 :param editor_info:
123 A dict containing the keys ``args`` and ``file_arg_template``,
124 providing additional call arguments and a template to open
125 files at a position for this editor.
126 :param filenames:
127 A dict holding one entry for each file to be opened.
128 Keys must be ``filename``, ``line`` and ``column``.
129 """
130 call_args = [editor]
131
132 # for some editors we define extra arguments
133 if 'args' in editor_info:
134 call_args += shlex.split(editor_info['args'])
135
136 # add info for each file to be opened
137 for file_info in filenames.values():
138 file_arg = editor_info['file_arg_template'].format(
139 filename=shlex.quote(file_info['filename']),
140 line=file_info['line'], column=file_info['column']
141 )
142 call_args += shlex.split(file_arg)
143
144 return call_args
145
146 def apply(self, result, original_file_dict, file_diff_dict, editor: str):
147 """
148 Open file(s)
149
150 :param editor: The editor to open the file with.
151 """
152 try:
153 editor_info = KNOWN_EDITORS[editor.strip()]
154 except KeyError:
155 # If the editor is unknown fall back to just passing
156 # the filenames and emit a warning
157 logging.warning(
158 'The editor "{editor}" is unknown to coala. Files won\'t be'
159 ' opened at the correct positions and other quirks might'
160 ' occur. Consider opening an issue at'
161 ' https://github.com/coala/coala/issues so we'
162 ' can add support for this editor.'
163 ' Supported editors are: {supported}'.format(
164 editor=editor, supported=', '.join(
165 sorted(KNOWN_EDITORS.keys())
166 )
167 )
168 )
169 editor_info = {
170 'file_arg_template': '{filename}',
171 'gui': False
172 }
173
174 # Use dict to remove duplicates
175 filenames = {
176 src.file: {
177 'filename': src.renamed_file(file_diff_dict),
178 'line': src.start.line or 1,
179 'column': src.start.column or 1
180 }
181 for src in result.affected_code
182 }
183
184 call_args = self.build_editor_call_args(editor, editor_info, filenames)
185
186 if editor_info.get('gui', True):
187 subprocess.call(call_args, stdout=subprocess.PIPE)
188 else:
189 subprocess.call(call_args)
190
191 for original_name, file_info in filenames.items():
192 filename = file_info['filename']
193 with open(filename, encoding='utf-8') as file:
194 file_diff_dict[original_name] = Diff.from_string_arrays(
195 original_file_dict[original_name], file.readlines(),
196 rename=False if original_name == filename else filename)
197
198 return file_diff_dict
199
200 if 'EDITOR' in environ:
201 apply.__defaults__ = (environ['EDITOR'],)
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/coalib/results/result_actions/OpenEditorAction.py b/coalib/results/result_actions/OpenEditorAction.py
--- a/coalib/results/result_actions/OpenEditorAction.py
+++ b/coalib/results/result_actions/OpenEditorAction.py
@@ -39,6 +39,10 @@
'file_arg_template': '{filename} +{line}',
'gui': False
},
+ 'nvim': {
+ 'file_arg_template': '{filename} +{line}',
+ 'gui': False
+ },
'nano': {
'file_arg_template': '+{line},{column} {filename} ',
'gui': False
| {"golden_diff": "diff --git a/coalib/results/result_actions/OpenEditorAction.py b/coalib/results/result_actions/OpenEditorAction.py\n--- a/coalib/results/result_actions/OpenEditorAction.py\n+++ b/coalib/results/result_actions/OpenEditorAction.py\n@@ -39,6 +39,10 @@\n 'file_arg_template': '{filename} +{line}',\n 'gui': False\n },\n+ 'nvim': {\n+ 'file_arg_template': '{filename} +{line}',\n+ 'gui': False\n+ },\n 'nano': {\n 'file_arg_template': '+{line},{column} {filename} ',\n 'gui': False\n", "issue": "Support neovim (`nvim`) as an editor\n```\r\n[WARNING][14:15:54] The editor \"nvim\" is unknown to coala. Files won't be opened at the correct positions and other quirks might occur. Consider opening an issue at https://github.com/coala/coala/issues so we can add support for this editor. Supported editors are: atom, emacs, emacsclient, geany, gedit, gvim, kate, nano, subl, vim, xed\r\n```\r\n\r\nIt's basically the same as `vim` so it could be just added to the allowed editors list and given the same behavior as `vim`.\n", "before_files": [{"content": "import logging\nimport shlex\nimport subprocess\nfrom os.path import exists\nfrom os import environ\n\nfrom coalib.results.Diff import Diff\nfrom coalib.results.Result import Result\nfrom coalib.results.result_actions.ResultAction import ResultAction\nfrom coala_utils.decorators import enforce_signature\n\n\n\"\"\"\nData about all text editors coala knows about. New editors\ncan just be added here.\nFor each editor the following info is stored:\n{\n <name/comand>: {\n \"file_arg_template\":\n A string used to generate arguments to open a file.\n Must at least have the placeholder 'filename'\n and can optionally use 'line' and 'column'\n to open the file at the correct position.\n Some editors don't support opening files at\n a certain position if multiple files are\n to be opened, but we try to do so anyway.\n \"args\":\n General arguments added to the call, e.g. to\n force opening of a new window.\n \"gui\":\n Boolean. True if this is a gui editor.\n Optional, defaults to False.\n }\n}\n\"\"\"\nKNOWN_EDITORS = {\n # non-gui editors\n 'vim': {\n 'file_arg_template': '{filename} +{line}',\n 'gui': False\n },\n 'nano': {\n 'file_arg_template': '+{line},{column} {filename} ',\n 'gui': False\n },\n 'emacs': {\n 'file_arg_template': '+{line}:{column} {filename}',\n 'gui': False\n },\n 'emacsclient': {\n 'file_arg_template': '+{line}:{column} {filename}',\n 'gui': False\n },\n\n # gui editors\n 'atom': {\n 'file_arg_template': '{filename}:{line}:{column}',\n 'args': '--wait',\n 'gui': True\n },\n 'geany': {\n 'file_arg_template': '{filename} -l {line} --column {column}',\n 'args': '-s -i',\n 'gui': True\n },\n 'gedit': {\n 'file_arg_template': '{filename} +{line}',\n 'args': '-s',\n 'gui': True\n },\n 'gvim': {\n 'file_arg_template': '{filename} +{line}',\n 'gui': True\n },\n 'kate': {\n 'file_arg_template': '{filename} -l {line} -c {column}',\n 'args': '--new',\n 'gui': True\n },\n 'subl': {\n 'file_arg_template': '{filename}:{line}:{column}',\n 'args': '--wait',\n 'gui': True\n },\n 'xed': {\n 'file_arg_template': '{filename} +{line}',\n 'args': '--new-window',\n 'gui': True\n },\n}\n\n\nclass OpenEditorAction(ResultAction):\n\n SUCCESS_MESSAGE = 'Changes saved successfully.'\n\n @staticmethod\n @enforce_signature\n def is_applicable(result: Result, original_file_dict, file_diff_dict):\n \"\"\"\n For being applicable, the result has to point to a number of files\n that have to exist i.e. have not been previously deleted.\n \"\"\"\n\n if not len(result.affected_code) > 0:\n return 'The result is not associated with any source code.'\n\n filenames = set(src.renamed_file(file_diff_dict)\n for src in result.affected_code)\n if not all(exists(filename) for filename in filenames):\n return (\"The result is associated with source code that doesn't \"\n 'seem to exist.')\n return True\n\n def build_editor_call_args(self, editor, editor_info, filenames):\n \"\"\"\n Create argument list which will then be used to open an editor for\n the given files at the correct positions, if applicable.\n\n :param editor:\n The editor to open the file with.\n :param editor_info:\n A dict containing the keys ``args`` and ``file_arg_template``,\n providing additional call arguments and a template to open\n files at a position for this editor.\n :param filenames:\n A dict holding one entry for each file to be opened.\n Keys must be ``filename``, ``line`` and ``column``.\n \"\"\"\n call_args = [editor]\n\n # for some editors we define extra arguments\n if 'args' in editor_info:\n call_args += shlex.split(editor_info['args'])\n\n # add info for each file to be opened\n for file_info in filenames.values():\n file_arg = editor_info['file_arg_template'].format(\n filename=shlex.quote(file_info['filename']),\n line=file_info['line'], column=file_info['column']\n )\n call_args += shlex.split(file_arg)\n\n return call_args\n\n def apply(self, result, original_file_dict, file_diff_dict, editor: str):\n \"\"\"\n Open file(s)\n\n :param editor: The editor to open the file with.\n \"\"\"\n try:\n editor_info = KNOWN_EDITORS[editor.strip()]\n except KeyError:\n # If the editor is unknown fall back to just passing\n # the filenames and emit a warning\n logging.warning(\n 'The editor \"{editor}\" is unknown to coala. Files won\\'t be'\n ' opened at the correct positions and other quirks might'\n ' occur. Consider opening an issue at'\n ' https://github.com/coala/coala/issues so we'\n ' can add support for this editor.'\n ' Supported editors are: {supported}'.format(\n editor=editor, supported=', '.join(\n sorted(KNOWN_EDITORS.keys())\n )\n )\n )\n editor_info = {\n 'file_arg_template': '{filename}',\n 'gui': False\n }\n\n # Use dict to remove duplicates\n filenames = {\n src.file: {\n 'filename': src.renamed_file(file_diff_dict),\n 'line': src.start.line or 1,\n 'column': src.start.column or 1\n }\n for src in result.affected_code\n }\n\n call_args = self.build_editor_call_args(editor, editor_info, filenames)\n\n if editor_info.get('gui', True):\n subprocess.call(call_args, stdout=subprocess.PIPE)\n else:\n subprocess.call(call_args)\n\n for original_name, file_info in filenames.items():\n filename = file_info['filename']\n with open(filename, encoding='utf-8') as file:\n file_diff_dict[original_name] = Diff.from_string_arrays(\n original_file_dict[original_name], file.readlines(),\n rename=False if original_name == filename else filename)\n\n return file_diff_dict\n\n if 'EDITOR' in environ:\n apply.__defaults__ = (environ['EDITOR'],)\n", "path": "coalib/results/result_actions/OpenEditorAction.py"}], "after_files": [{"content": "import logging\nimport shlex\nimport subprocess\nfrom os.path import exists\nfrom os import environ\n\nfrom coalib.results.Diff import Diff\nfrom coalib.results.Result import Result\nfrom coalib.results.result_actions.ResultAction import ResultAction\nfrom coala_utils.decorators import enforce_signature\n\n\n\"\"\"\nData about all text editors coala knows about. New editors\ncan just be added here.\nFor each editor the following info is stored:\n{\n <name/comand>: {\n \"file_arg_template\":\n A string used to generate arguments to open a file.\n Must at least have the placeholder 'filename'\n and can optionally use 'line' and 'column'\n to open the file at the correct position.\n Some editors don't support opening files at\n a certain position if multiple files are\n to be opened, but we try to do so anyway.\n \"args\":\n General arguments added to the call, e.g. to\n force opening of a new window.\n \"gui\":\n Boolean. True if this is a gui editor.\n Optional, defaults to False.\n }\n}\n\"\"\"\nKNOWN_EDITORS = {\n # non-gui editors\n 'vim': {\n 'file_arg_template': '{filename} +{line}',\n 'gui': False\n },\n 'nvim': {\n 'file_arg_template': '{filename} +{line}',\n 'gui': False\n },\n 'nano': {\n 'file_arg_template': '+{line},{column} {filename} ',\n 'gui': False\n },\n 'emacs': {\n 'file_arg_template': '+{line}:{column} {filename}',\n 'gui': False\n },\n 'emacsclient': {\n 'file_arg_template': '+{line}:{column} {filename}',\n 'gui': False\n },\n\n # gui editors\n 'atom': {\n 'file_arg_template': '{filename}:{line}:{column}',\n 'args': '--wait',\n 'gui': True\n },\n 'geany': {\n 'file_arg_template': '{filename} -l {line} --column {column}',\n 'args': '-s -i',\n 'gui': True\n },\n 'gedit': {\n 'file_arg_template': '{filename} +{line}',\n 'args': '-s',\n 'gui': True\n },\n 'gvim': {\n 'file_arg_template': '{filename} +{line}',\n 'gui': True\n },\n 'kate': {\n 'file_arg_template': '{filename} -l {line} -c {column}',\n 'args': '--new',\n 'gui': True\n },\n 'subl': {\n 'file_arg_template': '{filename}:{line}:{column}',\n 'args': '--wait',\n 'gui': True\n },\n 'xed': {\n 'file_arg_template': '{filename} +{line}',\n 'args': '--new-window',\n 'gui': True\n },\n}\n\n\nclass OpenEditorAction(ResultAction):\n\n SUCCESS_MESSAGE = 'Changes saved successfully.'\n\n @staticmethod\n @enforce_signature\n def is_applicable(result: Result, original_file_dict, file_diff_dict):\n \"\"\"\n For being applicable, the result has to point to a number of files\n that have to exist i.e. have not been previously deleted.\n \"\"\"\n\n if not len(result.affected_code) > 0:\n return 'The result is not associated with any source code.'\n\n filenames = set(src.renamed_file(file_diff_dict)\n for src in result.affected_code)\n if not all(exists(filename) for filename in filenames):\n return (\"The result is associated with source code that doesn't \"\n 'seem to exist.')\n return True\n\n def build_editor_call_args(self, editor, editor_info, filenames):\n \"\"\"\n Create argument list which will then be used to open an editor for\n the given files at the correct positions, if applicable.\n\n :param editor:\n The editor to open the file with.\n :param editor_info:\n A dict containing the keys ``args`` and ``file_arg_template``,\n providing additional call arguments and a template to open\n files at a position for this editor.\n :param filenames:\n A dict holding one entry for each file to be opened.\n Keys must be ``filename``, ``line`` and ``column``.\n \"\"\"\n call_args = [editor]\n\n # for some editors we define extra arguments\n if 'args' in editor_info:\n call_args += shlex.split(editor_info['args'])\n\n # add info for each file to be opened\n for file_info in filenames.values():\n file_arg = editor_info['file_arg_template'].format(\n filename=shlex.quote(file_info['filename']),\n line=file_info['line'], column=file_info['column']\n )\n call_args += shlex.split(file_arg)\n\n return call_args\n\n def apply(self, result, original_file_dict, file_diff_dict, editor: str):\n \"\"\"\n Open file(s)\n\n :param editor: The editor to open the file with.\n \"\"\"\n try:\n editor_info = KNOWN_EDITORS[editor.strip()]\n except KeyError:\n # If the editor is unknown fall back to just passing\n # the filenames and emit a warning\n logging.warning(\n 'The editor \"{editor}\" is unknown to coala. Files won\\'t be'\n ' opened at the correct positions and other quirks might'\n ' occur. Consider opening an issue at'\n ' https://github.com/coala/coala/issues so we'\n ' can add support for this editor.'\n ' Supported editors are: {supported}'.format(\n editor=editor, supported=', '.join(\n sorted(KNOWN_EDITORS.keys())\n )\n )\n )\n editor_info = {\n 'file_arg_template': '{filename}',\n 'gui': False\n }\n\n # Use dict to remove duplicates\n filenames = {\n src.file: {\n 'filename': src.renamed_file(file_diff_dict),\n 'line': src.start.line or 1,\n 'column': src.start.column or 1\n }\n for src in result.affected_code\n }\n\n call_args = self.build_editor_call_args(editor, editor_info, filenames)\n\n if editor_info.get('gui', True):\n subprocess.call(call_args, stdout=subprocess.PIPE)\n else:\n subprocess.call(call_args)\n\n for original_name, file_info in filenames.items():\n filename = file_info['filename']\n with open(filename, encoding='utf-8') as file:\n file_diff_dict[original_name] = Diff.from_string_arrays(\n original_file_dict[original_name], file.readlines(),\n rename=False if original_name == filename else filename)\n\n return file_diff_dict\n\n if 'EDITOR' in environ:\n apply.__defaults__ = (environ['EDITOR'],)\n", "path": "coalib/results/result_actions/OpenEditorAction.py"}]} | 2,350 | 144 |
gh_patches_debug_15290 | rasdani/github-patches | git_diff | Zeroto521__my-data-toolkit-390 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: shorten package path
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dtoolkit/geoaccessor/register.py`
Content:
```
1 from pandas.util._decorators import doc
2
3 from dtoolkit.accessor.register import register_method_factory
4 from dtoolkit.geoaccessor.accessor import register_geodataframe_accessor
5 from dtoolkit.geoaccessor.accessor import register_geoseries_accessor
6
7
8 @register_method_factory
9 @doc(klass=":class:`geopandas.GeoSeries`")
10 def register_geoseries_method(method):
11 """
12 {klass} register accessor for human.
13
14 Write method normally, use method naturally.
15
16 See Also
17 --------
18 dtoolkit.geoaccessor.accessor.register_geoseries_accessor
19 dtoolkit.geoaccessor.accessor.register_geodataframe_accessor
20 register_geoseries_method
21 register_geodataframe_method
22
23 Examples
24 --------
25 In your library code::
26
27 import geopandas as gpd
28
29 from pygeos import count_coordinates, from_shapely
30
31 @register_geodataframe_method
32 @register_geoseries_method
33 def counts(s: gpd.GeoSeries):
34 # Counts the number of coordinate pairs in geometry
35
36 func = lambda x: count_coordinates(from_shapely(x))
37 return s.geometry.apply(func)
38
39 Back in an interactive IPython session:
40
41 .. code-block:: ipython
42
43 In [1]: import geopandas as gpd
44
45 In [2]: s = gpd.GeoSeries.from_wkt(["POINT (0 0)", "POINT (1 1)", None])
46
47 In [3]: s
48 Out[3]:
49 0 POINT (0.00000 0.00000)
50 1 POINT (1.00000 1.00000)
51 2 None
52 dtype: geometry
53
54 In [4]: s.counts()
55 Out[4]:
56 0 1
57 1 1
58 2 0
59 dtype: int64
60
61 In [5]: d = s.to_frame("geometry")
62 Out[5]:
63 geometry
64 0 POINT (0.00000 0.00000)
65 1 POINT (1.00000 1.00000)
66 2 None
67
68 In [6]: d.counts()
69 Out[6]:
70 0 1
71 1 1
72 2 0
73 Name: geometry, dtype: int64
74 """
75 return register_geoseries_accessor(method)
76
77
78 @register_method_factory
79 @doc(register_geoseries_method, klass=":class:`geopandas.GeoDataFrame`")
80 def register_geodataframe_method(method):
81 return register_geodataframe_accessor(method)
82
```
Path: `dtoolkit/geoaccessor/accessor.py`
Content:
```
1 from geopandas import GeoDataFrame
2 from geopandas import GeoSeries
3 from pandas.core.accessor import _register_accessor
4 from pandas.util._decorators import doc
5
6
7 @doc(klass=":class:`geopandas.GeoSeries`")
8 def register_geoseries_accessor(name: str):
9 """
10 Register a custom accessor on {klass} objects.
11
12 This is a temparatory solution to hook method into
13 :class:`~geopandas.GeoSeries` or :class:`~geopandas.GeoDataFrame`.
14 If `geopandas#1952`_ done, it would be removed from
15 :mod:`dtoolkit.geoaccessor`.
16
17 .. _geopandas#1952: https://github.com/geopandas/geopandas/pull/1952
18
19 Parameters
20 ----------
21 name : str
22 Name under which the accessor should be registered. A warning is issued
23 if this name conflicts with a preexisting attribute.
24
25 Returns
26 -------
27 callable
28 A class decorator.
29
30 See Also
31 --------
32 register_geoseries_accessor
33 register_geodataframe_accessor
34 dtoolkit.geoaccessor.register.register_geoseries_method
35 dtoolkit.geoaccessor.register.register_geodataframe_method
36
37 Notes
38 -----
39 When accessed, your accessor will be initialized with the geopandas object
40 the user is interacting with. So the signature must be::
41
42 def __init__(self, geopandas_object): # noqa: E999
43 ...
44
45 For consistency with geopandas methods, you should raise an
46 ``AttributeError`` if the data passed to your accessor has an incorrect
47 dtype.
48
49 >>> import geopandas as gpd
50 >>> gpd.GeoSeries().dt
51 Traceback (most recent call last):
52 ...
53 AttributeError: Can only use .dt accessor with datetimelike values
54
55 Examples
56 --------
57 In your library code::
58
59 from pygeos import count_coordinates, from_shapely
60
61 @register_geodataframe_accessor("coords")
62 @register_geoseries_accessor("coords")
63 class CoordinateAccessor:
64 def __init__(self, gpd_obj):
65 self._obj = gpd_obj
66 self.geometry = gpd_obj.geometry
67
68 @property
69 def count_coordinates(self):
70 # Counts the number of coordinate pairs in geometry
71
72 return self.geometry.apply(count_coordinates(from_shapely(x)))
73
74 Back in an interactive IPython session:
75
76 .. code-block:: ipython
77
78 In [1]: import geopandas as gpd
79
80 In [2]: s = gpd.GeoSeries.from_wkt(["POINT (0 0)", "POINT (1 1)", None])
81
82 In [3]: s
83 Out[3]:
84 0 POINT (0.00000 0.00000)
85 1 POINT (1.00000 1.00000)
86 2 None
87 dtype: geometry
88
89 In [4]: s.coords.count_coordinates
90 Out[4]:
91 0 1
92 1 1
93 2 0
94 dtype: int64
95
96 In [5]: d = s.to_frame("geometry")
97 Out[5]:
98 geometry
99 0 POINT (0.00000 0.00000)
100 1 POINT (1.00000 1.00000)
101 2 None
102
103 In [6]: d.coords.count_coordinates
104 Out[6]:
105 0 1
106 1 1
107 2 0
108 Name: geometry, dtype: int64
109 """
110
111 return _register_accessor(name, GeoSeries)
112
113
114 @doc(register_geoseries_accessor, klass=":class:`geopandas.GeoDataFrame`")
115 def register_geodataframe_accessor(name: str):
116
117 return _register_accessor(name, GeoDataFrame)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dtoolkit/geoaccessor/accessor.py b/dtoolkit/geoaccessor/accessor.py
--- a/dtoolkit/geoaccessor/accessor.py
+++ b/dtoolkit/geoaccessor/accessor.py
@@ -31,8 +31,8 @@
--------
register_geoseries_accessor
register_geodataframe_accessor
- dtoolkit.geoaccessor.register.register_geoseries_method
- dtoolkit.geoaccessor.register.register_geodataframe_method
+ dtoolkit.geoaccessor.register_geoseries_method
+ dtoolkit.geoaccessor.register_geodataframe_method
Notes
-----
diff --git a/dtoolkit/geoaccessor/register.py b/dtoolkit/geoaccessor/register.py
--- a/dtoolkit/geoaccessor/register.py
+++ b/dtoolkit/geoaccessor/register.py
@@ -15,8 +15,8 @@
See Also
--------
- dtoolkit.geoaccessor.accessor.register_geoseries_accessor
- dtoolkit.geoaccessor.accessor.register_geodataframe_accessor
+ dtoolkit.geoaccessor.register_geoseries_accessor
+ dtoolkit.geoaccessor.register_geodataframe_accessor
register_geoseries_method
register_geodataframe_method
| {"golden_diff": "diff --git a/dtoolkit/geoaccessor/accessor.py b/dtoolkit/geoaccessor/accessor.py\n--- a/dtoolkit/geoaccessor/accessor.py\n+++ b/dtoolkit/geoaccessor/accessor.py\n@@ -31,8 +31,8 @@\n --------\n register_geoseries_accessor\n register_geodataframe_accessor\n- dtoolkit.geoaccessor.register.register_geoseries_method\n- dtoolkit.geoaccessor.register.register_geodataframe_method\n+ dtoolkit.geoaccessor.register_geoseries_method\n+ dtoolkit.geoaccessor.register_geodataframe_method\n \n Notes\n -----\ndiff --git a/dtoolkit/geoaccessor/register.py b/dtoolkit/geoaccessor/register.py\n--- a/dtoolkit/geoaccessor/register.py\n+++ b/dtoolkit/geoaccessor/register.py\n@@ -15,8 +15,8 @@\n \n See Also\n --------\n- dtoolkit.geoaccessor.accessor.register_geoseries_accessor\n- dtoolkit.geoaccessor.accessor.register_geodataframe_accessor\n+ dtoolkit.geoaccessor.register_geoseries_accessor\n+ dtoolkit.geoaccessor.register_geodataframe_accessor\n register_geoseries_method\n register_geodataframe_method\n", "issue": "DOC: shorten package path\n\n", "before_files": [{"content": "from pandas.util._decorators import doc\n\nfrom dtoolkit.accessor.register import register_method_factory\nfrom dtoolkit.geoaccessor.accessor import register_geodataframe_accessor\nfrom dtoolkit.geoaccessor.accessor import register_geoseries_accessor\n\n\n@register_method_factory\n@doc(klass=\":class:`geopandas.GeoSeries`\")\ndef register_geoseries_method(method):\n \"\"\"\n {klass} register accessor for human.\n\n Write method normally, use method naturally.\n\n See Also\n --------\n dtoolkit.geoaccessor.accessor.register_geoseries_accessor\n dtoolkit.geoaccessor.accessor.register_geodataframe_accessor\n register_geoseries_method\n register_geodataframe_method\n\n Examples\n --------\n In your library code::\n\n import geopandas as gpd\n\n from pygeos import count_coordinates, from_shapely\n\n @register_geodataframe_method\n @register_geoseries_method\n def counts(s: gpd.GeoSeries):\n # Counts the number of coordinate pairs in geometry\n\n func = lambda x: count_coordinates(from_shapely(x))\n return s.geometry.apply(func)\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import geopandas as gpd\n\n In [2]: s = gpd.GeoSeries.from_wkt([\"POINT (0 0)\", \"POINT (1 1)\", None])\n\n In [3]: s\n Out[3]:\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n dtype: geometry\n\n In [4]: s.counts()\n Out[4]:\n 0 1\n 1 1\n 2 0\n dtype: int64\n\n In [5]: d = s.to_frame(\"geometry\")\n Out[5]:\n geometry\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n\n In [6]: d.counts()\n Out[6]:\n 0 1\n 1 1\n 2 0\n Name: geometry, dtype: int64\n \"\"\"\n return register_geoseries_accessor(method)\n\n\n@register_method_factory\n@doc(register_geoseries_method, klass=\":class:`geopandas.GeoDataFrame`\")\ndef register_geodataframe_method(method):\n return register_geodataframe_accessor(method)\n", "path": "dtoolkit/geoaccessor/register.py"}, {"content": "from geopandas import GeoDataFrame\nfrom geopandas import GeoSeries\nfrom pandas.core.accessor import _register_accessor\nfrom pandas.util._decorators import doc\n\n\n@doc(klass=\":class:`geopandas.GeoSeries`\")\ndef register_geoseries_accessor(name: str):\n \"\"\"\n Register a custom accessor on {klass} objects.\n\n This is a temparatory solution to hook method into\n :class:`~geopandas.GeoSeries` or :class:`~geopandas.GeoDataFrame`.\n If `geopandas#1952`_ done, it would be removed from\n :mod:`dtoolkit.geoaccessor`.\n\n .. _geopandas#1952: https://github.com/geopandas/geopandas/pull/1952\n\n Parameters\n ----------\n name : str\n Name under which the accessor should be registered. A warning is issued\n if this name conflicts with a preexisting attribute.\n\n Returns\n -------\n callable\n A class decorator.\n\n See Also\n --------\n register_geoseries_accessor\n register_geodataframe_accessor\n dtoolkit.geoaccessor.register.register_geoseries_method\n dtoolkit.geoaccessor.register.register_geodataframe_method\n\n Notes\n -----\n When accessed, your accessor will be initialized with the geopandas object\n the user is interacting with. So the signature must be::\n\n def __init__(self, geopandas_object): # noqa: E999\n ...\n\n For consistency with geopandas methods, you should raise an\n ``AttributeError`` if the data passed to your accessor has an incorrect\n dtype.\n\n >>> import geopandas as gpd\n >>> gpd.GeoSeries().dt\n Traceback (most recent call last):\n ...\n AttributeError: Can only use .dt accessor with datetimelike values\n\n Examples\n --------\n In your library code::\n\n from pygeos import count_coordinates, from_shapely\n\n @register_geodataframe_accessor(\"coords\")\n @register_geoseries_accessor(\"coords\")\n class CoordinateAccessor:\n def __init__(self, gpd_obj):\n self._obj = gpd_obj\n self.geometry = gpd_obj.geometry\n\n @property\n def count_coordinates(self):\n # Counts the number of coordinate pairs in geometry\n\n return self.geometry.apply(count_coordinates(from_shapely(x)))\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import geopandas as gpd\n\n In [2]: s = gpd.GeoSeries.from_wkt([\"POINT (0 0)\", \"POINT (1 1)\", None])\n\n In [3]: s\n Out[3]:\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n dtype: geometry\n\n In [4]: s.coords.count_coordinates\n Out[4]:\n 0 1\n 1 1\n 2 0\n dtype: int64\n\n In [5]: d = s.to_frame(\"geometry\")\n Out[5]:\n geometry\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n\n In [6]: d.coords.count_coordinates\n Out[6]:\n 0 1\n 1 1\n 2 0\n Name: geometry, dtype: int64\n \"\"\"\n\n return _register_accessor(name, GeoSeries)\n\n\n@doc(register_geoseries_accessor, klass=\":class:`geopandas.GeoDataFrame`\")\ndef register_geodataframe_accessor(name: str):\n\n return _register_accessor(name, GeoDataFrame)\n", "path": "dtoolkit/geoaccessor/accessor.py"}], "after_files": [{"content": "from pandas.util._decorators import doc\n\nfrom dtoolkit.accessor.register import register_method_factory\nfrom dtoolkit.geoaccessor.accessor import register_geodataframe_accessor\nfrom dtoolkit.geoaccessor.accessor import register_geoseries_accessor\n\n\n@register_method_factory\n@doc(klass=\":class:`geopandas.GeoSeries`\")\ndef register_geoseries_method(method):\n \"\"\"\n {klass} register accessor for human.\n\n Write method normally, use method naturally.\n\n See Also\n --------\n dtoolkit.geoaccessor.register_geoseries_accessor\n dtoolkit.geoaccessor.register_geodataframe_accessor\n register_geoseries_method\n register_geodataframe_method\n\n Examples\n --------\n In your library code::\n\n import geopandas as gpd\n\n from pygeos import count_coordinates, from_shapely\n\n @register_geodataframe_method\n @register_geoseries_method\n def counts(s: gpd.GeoSeries):\n # Counts the number of coordinate pairs in geometry\n\n func = lambda x: count_coordinates(from_shapely(x))\n return s.geometry.apply(func)\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import geopandas as gpd\n\n In [2]: s = gpd.GeoSeries.from_wkt([\"POINT (0 0)\", \"POINT (1 1)\", None])\n\n In [3]: s\n Out[3]:\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n dtype: geometry\n\n In [4]: s.counts()\n Out[4]:\n 0 1\n 1 1\n 2 0\n dtype: int64\n\n In [5]: d = s.to_frame(\"geometry\")\n Out[5]:\n geometry\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n\n In [6]: d.counts()\n Out[6]:\n 0 1\n 1 1\n 2 0\n Name: geometry, dtype: int64\n \"\"\"\n return register_geoseries_accessor(method)\n\n\n@register_method_factory\n@doc(register_geoseries_method, klass=\":class:`geopandas.GeoDataFrame`\")\ndef register_geodataframe_method(method):\n return register_geodataframe_accessor(method)\n", "path": "dtoolkit/geoaccessor/register.py"}, {"content": "from geopandas import GeoDataFrame\nfrom geopandas import GeoSeries\nfrom pandas.core.accessor import _register_accessor\nfrom pandas.util._decorators import doc\n\n\n@doc(klass=\":class:`geopandas.GeoSeries`\")\ndef register_geoseries_accessor(name: str):\n \"\"\"\n Register a custom accessor on {klass} objects.\n\n This is a temparatory solution to hook method into\n :class:`~geopandas.GeoSeries` or :class:`~geopandas.GeoDataFrame`.\n If `geopandas#1952`_ done, it would be removed from\n :mod:`dtoolkit.geoaccessor`.\n\n .. _geopandas#1952: https://github.com/geopandas/geopandas/pull/1952\n\n Parameters\n ----------\n name : str\n Name under which the accessor should be registered. A warning is issued\n if this name conflicts with a preexisting attribute.\n\n Returns\n -------\n callable\n A class decorator.\n\n See Also\n --------\n register_geoseries_accessor\n register_geodataframe_accessor\n dtoolkit.geoaccessor.register_geoseries_method\n dtoolkit.geoaccessor.register_geodataframe_method\n\n Notes\n -----\n When accessed, your accessor will be initialized with the geopandas object\n the user is interacting with. So the signature must be::\n\n def __init__(self, geopandas_object): # noqa: E999\n ...\n\n For consistency with geopandas methods, you should raise an\n ``AttributeError`` if the data passed to your accessor has an incorrect\n dtype.\n\n >>> import geopandas as gpd\n >>> gpd.GeoSeries().dt\n Traceback (most recent call last):\n ...\n AttributeError: Can only use .dt accessor with datetimelike values\n\n Examples\n --------\n In your library code::\n\n from pygeos import count_coordinates, from_shapely\n\n @register_geodataframe_accessor(\"coords\")\n @register_geoseries_accessor(\"coords\")\n class CoordinateAccessor:\n def __init__(self, gpd_obj):\n self._obj = gpd_obj\n self.geometry = gpd_obj.geometry\n\n @property\n def count_coordinates(self):\n # Counts the number of coordinate pairs in geometry\n\n return self.geometry.apply(count_coordinates(from_shapely(x)))\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import geopandas as gpd\n\n In [2]: s = gpd.GeoSeries.from_wkt([\"POINT (0 0)\", \"POINT (1 1)\", None])\n\n In [3]: s\n Out[3]:\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n dtype: geometry\n\n In [4]: s.coords.count_coordinates\n Out[4]:\n 0 1\n 1 1\n 2 0\n dtype: int64\n\n In [5]: d = s.to_frame(\"geometry\")\n Out[5]:\n geometry\n 0 POINT (0.00000 0.00000)\n 1 POINT (1.00000 1.00000)\n 2 None\n\n In [6]: d.coords.count_coordinates\n Out[6]:\n 0 1\n 1 1\n 2 0\n Name: geometry, dtype: int64\n \"\"\"\n\n return _register_accessor(name, GeoSeries)\n\n\n@doc(register_geoseries_accessor, klass=\":class:`geopandas.GeoDataFrame`\")\ndef register_geodataframe_accessor(name: str):\n\n return _register_accessor(name, GeoDataFrame)\n", "path": "dtoolkit/geoaccessor/accessor.py"}]} | 2,182 | 284 |
gh_patches_debug_4229 | rasdani/github-patches | git_diff | twisted__twisted-11816 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
twisted.web.pages.errorPage docstring has a typo
> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override specific path.
Should be:
> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override **a** specific path.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/twisted/web/pages.py`
Content:
```
1 # -*- test-case-name: twisted.web.test.test_pages -*-
2 # Copyright (c) Twisted Matrix Laboratories.
3 # See LICENSE for details.
4
5 """
6 Utility implementations of L{IResource}.
7 """
8
9 __all__ = (
10 "errorPage",
11 "notFound",
12 "forbidden",
13 )
14
15 from typing import cast
16
17 from twisted.web import http
18 from twisted.web.iweb import IRenderable, IRequest
19 from twisted.web.resource import IResource, Resource
20 from twisted.web.template import renderElement, tags
21
22
23 class _ErrorPage(Resource):
24 """
25 L{_ErrorPage} is a resource that responds to all requests with a particular
26 (parameterized) HTTP status code and an HTML body containing some
27 descriptive text. This is useful for rendering simple error pages.
28
29 @see: L{twisted.web.pages.errorPage}
30
31 @ivar _code: An integer HTTP status code which will be used for the
32 response.
33
34 @ivar _brief: A short string which will be included in the response body as
35 the page title.
36
37 @ivar _detail: A longer string which will be included in the response body.
38 """
39
40 def __init__(self, code: int, brief: str, detail: str) -> None:
41 super().__init__()
42 self._code: int = code
43 self._brief: str = brief
44 self._detail: str = detail
45
46 def render(self, request: IRequest) -> object:
47 """
48 Respond to all requests with the given HTTP status code and an HTML
49 document containing the explanatory strings.
50 """
51 request.setResponseCode(self._code)
52 request.setHeader(b"content-type", b"text/html; charset=utf-8")
53 return renderElement(
54 request,
55 # cast because the type annotations here seem off; Tag isn't an
56 # IRenderable but also probably should be? See
57 # https://github.com/twisted/twisted/issues/4982
58 cast(
59 IRenderable,
60 tags.html(
61 tags.head(tags.title(f"{self._code} - {self._brief}")),
62 tags.body(tags.h1(self._brief), tags.p(self._detail)),
63 ),
64 ),
65 )
66
67 def getChild(self, path: bytes, request: IRequest) -> Resource:
68 """
69 Handle all requests for which L{_ErrorPage} lacks a child by returning
70 this error page.
71
72 @param path: A path segment.
73
74 @param request: HTTP request
75 """
76 return self
77
78
79 def errorPage(code: int, brief: str, detail: str) -> IResource:
80 """
81 Build a resource that responds to all requests with a particular HTTP
82 status code and an HTML body containing some descriptive text. This is
83 useful for rendering simple error pages.
84
85 The resource dynamically handles all paths below it. Use
86 L{IResource.putChild()} override specific path.
87
88 @param code: An integer HTTP status code which will be used for the
89 response.
90
91 @param brief: A short string which will be included in the response
92 body as the page title.
93
94 @param detail: A longer string which will be included in the
95 response body.
96
97 @returns: An L{IResource}
98 """
99 return _ErrorPage(code, brief, detail)
100
101
102 def notFound(
103 brief: str = "No Such Resource",
104 message: str = "Sorry. No luck finding that resource.",
105 ) -> IResource:
106 """
107 Generate an L{IResource} with a 404 Not Found status code.
108
109 @see: L{twisted.web.pages.errorPage}
110
111 @param brief: A short string displayed as the page title.
112
113 @param brief: A longer string displayed in the page body.
114
115 @returns: An L{IResource}
116 """
117 return _ErrorPage(http.NOT_FOUND, brief, message)
118
119
120 def forbidden(
121 brief: str = "Forbidden Resource", message: str = "Sorry, resource is forbidden."
122 ) -> IResource:
123 """
124 Generate an L{IResource} with a 403 Forbidden status code.
125
126 @see: L{twisted.web.pages.errorPage}
127
128 @param brief: A short string displayed as the page title.
129
130 @param brief: A longer string displayed in the page body.
131
132 @returns: An L{IResource}
133 """
134 return _ErrorPage(http.FORBIDDEN, brief, message)
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/twisted/web/pages.py b/src/twisted/web/pages.py
--- a/src/twisted/web/pages.py
+++ b/src/twisted/web/pages.py
@@ -83,7 +83,7 @@
useful for rendering simple error pages.
The resource dynamically handles all paths below it. Use
- L{IResource.putChild()} override specific path.
+ L{IResource.putChild()} to override a specific path.
@param code: An integer HTTP status code which will be used for the
response.
| {"golden_diff": "diff --git a/src/twisted/web/pages.py b/src/twisted/web/pages.py\n--- a/src/twisted/web/pages.py\n+++ b/src/twisted/web/pages.py\n@@ -83,7 +83,7 @@\n useful for rendering simple error pages.\n \n The resource dynamically handles all paths below it. Use\n- L{IResource.putChild()} override specific path.\n+ L{IResource.putChild()} to override a specific path.\n \n @param code: An integer HTTP status code which will be used for the\n response.\n", "issue": "twisted.web.pages.errorPage docstring has a typo\n> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override specific path.\r\n\r\nShould be:\r\n\r\n> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override **a** specific path.\n", "before_files": [{"content": "# -*- test-case-name: twisted.web.test.test_pages -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUtility implementations of L{IResource}.\n\"\"\"\n\n__all__ = (\n \"errorPage\",\n \"notFound\",\n \"forbidden\",\n)\n\nfrom typing import cast\n\nfrom twisted.web import http\nfrom twisted.web.iweb import IRenderable, IRequest\nfrom twisted.web.resource import IResource, Resource\nfrom twisted.web.template import renderElement, tags\n\n\nclass _ErrorPage(Resource):\n \"\"\"\n L{_ErrorPage} is a resource that responds to all requests with a particular\n (parameterized) HTTP status code and an HTML body containing some\n descriptive text. This is useful for rendering simple error pages.\n\n @see: L{twisted.web.pages.errorPage}\n\n @ivar _code: An integer HTTP status code which will be used for the\n response.\n\n @ivar _brief: A short string which will be included in the response body as\n the page title.\n\n @ivar _detail: A longer string which will be included in the response body.\n \"\"\"\n\n def __init__(self, code: int, brief: str, detail: str) -> None:\n super().__init__()\n self._code: int = code\n self._brief: str = brief\n self._detail: str = detail\n\n def render(self, request: IRequest) -> object:\n \"\"\"\n Respond to all requests with the given HTTP status code and an HTML\n document containing the explanatory strings.\n \"\"\"\n request.setResponseCode(self._code)\n request.setHeader(b\"content-type\", b\"text/html; charset=utf-8\")\n return renderElement(\n request,\n # cast because the type annotations here seem off; Tag isn't an\n # IRenderable but also probably should be? See\n # https://github.com/twisted/twisted/issues/4982\n cast(\n IRenderable,\n tags.html(\n tags.head(tags.title(f\"{self._code} - {self._brief}\")),\n tags.body(tags.h1(self._brief), tags.p(self._detail)),\n ),\n ),\n )\n\n def getChild(self, path: bytes, request: IRequest) -> Resource:\n \"\"\"\n Handle all requests for which L{_ErrorPage} lacks a child by returning\n this error page.\n\n @param path: A path segment.\n\n @param request: HTTP request\n \"\"\"\n return self\n\n\ndef errorPage(code: int, brief: str, detail: str) -> IResource:\n \"\"\"\n Build a resource that responds to all requests with a particular HTTP\n status code and an HTML body containing some descriptive text. This is\n useful for rendering simple error pages.\n\n The resource dynamically handles all paths below it. Use\n L{IResource.putChild()} override specific path.\n\n @param code: An integer HTTP status code which will be used for the\n response.\n\n @param brief: A short string which will be included in the response\n body as the page title.\n\n @param detail: A longer string which will be included in the\n response body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(code, brief, detail)\n\n\ndef notFound(\n brief: str = \"No Such Resource\",\n message: str = \"Sorry. No luck finding that resource.\",\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 404 Not Found status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.NOT_FOUND, brief, message)\n\n\ndef forbidden(\n brief: str = \"Forbidden Resource\", message: str = \"Sorry, resource is forbidden.\"\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 403 Forbidden status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.FORBIDDEN, brief, message)\n", "path": "src/twisted/web/pages.py"}], "after_files": [{"content": "# -*- test-case-name: twisted.web.test.test_pages -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUtility implementations of L{IResource}.\n\"\"\"\n\n__all__ = (\n \"errorPage\",\n \"notFound\",\n \"forbidden\",\n)\n\nfrom typing import cast\n\nfrom twisted.web import http\nfrom twisted.web.iweb import IRenderable, IRequest\nfrom twisted.web.resource import IResource, Resource\nfrom twisted.web.template import renderElement, tags\n\n\nclass _ErrorPage(Resource):\n \"\"\"\n L{_ErrorPage} is a resource that responds to all requests with a particular\n (parameterized) HTTP status code and an HTML body containing some\n descriptive text. This is useful for rendering simple error pages.\n\n @see: L{twisted.web.pages.errorPage}\n\n @ivar _code: An integer HTTP status code which will be used for the\n response.\n\n @ivar _brief: A short string which will be included in the response body as\n the page title.\n\n @ivar _detail: A longer string which will be included in the response body.\n \"\"\"\n\n def __init__(self, code: int, brief: str, detail: str) -> None:\n super().__init__()\n self._code: int = code\n self._brief: str = brief\n self._detail: str = detail\n\n def render(self, request: IRequest) -> object:\n \"\"\"\n Respond to all requests with the given HTTP status code and an HTML\n document containing the explanatory strings.\n \"\"\"\n request.setResponseCode(self._code)\n request.setHeader(b\"content-type\", b\"text/html; charset=utf-8\")\n return renderElement(\n request,\n # cast because the type annotations here seem off; Tag isn't an\n # IRenderable but also probably should be? See\n # https://github.com/twisted/twisted/issues/4982\n cast(\n IRenderable,\n tags.html(\n tags.head(tags.title(f\"{self._code} - {self._brief}\")),\n tags.body(tags.h1(self._brief), tags.p(self._detail)),\n ),\n ),\n )\n\n def getChild(self, path: bytes, request: IRequest) -> Resource:\n \"\"\"\n Handle all requests for which L{_ErrorPage} lacks a child by returning\n this error page.\n\n @param path: A path segment.\n\n @param request: HTTP request\n \"\"\"\n return self\n\n\ndef errorPage(code: int, brief: str, detail: str) -> IResource:\n \"\"\"\n Build a resource that responds to all requests with a particular HTTP\n status code and an HTML body containing some descriptive text. This is\n useful for rendering simple error pages.\n\n The resource dynamically handles all paths below it. Use\n L{IResource.putChild()} to override a specific path.\n\n @param code: An integer HTTP status code which will be used for the\n response.\n\n @param brief: A short string which will be included in the response\n body as the page title.\n\n @param detail: A longer string which will be included in the\n response body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(code, brief, detail)\n\n\ndef notFound(\n brief: str = \"No Such Resource\",\n message: str = \"Sorry. No luck finding that resource.\",\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 404 Not Found status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.NOT_FOUND, brief, message)\n\n\ndef forbidden(\n brief: str = \"Forbidden Resource\", message: str = \"Sorry, resource is forbidden.\"\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 403 Forbidden status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.FORBIDDEN, brief, message)\n", "path": "src/twisted/web/pages.py"}]} | 1,605 | 122 |
gh_patches_debug_40353 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-1758 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Only collect `db.statement` if there is sanitization
Spec https://github.com/open-telemetry/opentelemetry-specification/pull/3127
- [ ] [aiopg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-aiopg)
- [ ] [asyncpg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-asyncpg)
- [ ] [dbapi](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-dbapi)
- [ ] [elasticsearch](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-elasticsearch)
- [ ] [mysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-mysql)
- [ ] [pymemcache](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymemcache)
- [ ] [pymongo](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymongo)
- [ ] [pymysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymysql)
- [ ] [redis](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-redis)
- [ ] [sqlalchemy](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlalchemy)
- [ ] [sqlite3](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlite3)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 sanitized_keys = (
16 "message",
17 "should",
18 "filter",
19 "query",
20 "queries",
21 "intervals",
22 "match",
23 )
24 sanitized_value = "?"
25
26
27 # pylint: disable=C0103
28 def _flatten_dict(d, parent_key=""):
29 items = []
30 for k, v in d.items():
31 new_key = parent_key + "." + k if parent_key else k
32 if isinstance(v, dict):
33 items.extend(_flatten_dict(v, new_key).items())
34 else:
35 items.append((new_key, v))
36 return dict(items)
37
38
39 def _unflatten_dict(d):
40 res = {}
41 for k, v in d.items():
42 keys = k.split(".")
43 d = res
44 for key in keys[:-1]:
45 if key not in d:
46 d[key] = {}
47 d = d[key]
48 d[keys[-1]] = v
49 return res
50
51
52 def sanitize_body(body) -> str:
53 flatten_body = _flatten_dict(body)
54
55 for key in flatten_body:
56 if key.endswith(sanitized_keys):
57 flatten_body[key] = sanitized_value
58
59 return str(_unflatten_dict(flatten_body))
60
```
Path: `instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows tracing HTTP elasticsearch made by the
17 `elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.
18
19 Usage
20 -----
21
22 .. code-block:: python
23
24 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
25 import elasticsearch
26
27
28 # instrument elasticsearch
29 ElasticsearchInstrumentor().instrument()
30
31 # Using elasticsearch as normal now will automatically generate spans
32 es = elasticsearch.Elasticsearch()
33 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
34 es.get(index='my-index', doc_type='my-type', id=1)
35
36 Elasticsearch instrumentation prefixes operation names with the string "Elasticsearch". This
37 can be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX
38 environment variable or by passing the prefix as an argument to the instrumentor. For example,
39
40
41 .. code-block:: python
42
43 ElasticsearchInstrumentor("my-custom-prefix").instrument()
44
45 The instrument() method accepts the following keyword args:
46 tracer_provider (TracerProvider) - an optional tracer provider
47 sanitize_query (bool) - an optional query sanitization flag
48 request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
49 this function signature is:
50 def request_hook(span: Span, method: str, url: str, kwargs)
51
52 response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request
53 this function signature is:
54 def response_hook(span: Span, response: dict)
55
56 for example:
57
58 .. code: python
59
60 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
61 import elasticsearch
62
63 def request_hook(span, method, url, kwargs):
64 if span and span.is_recording():
65 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
66
67 def response_hook(span, response):
68 if span and span.is_recording():
69 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
70
71 # instrument elasticsearch with request and response hooks
72 ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)
73
74 # Using elasticsearch as normal now will automatically generate spans,
75 # including user custom attributes added from the hooks
76 es = elasticsearch.Elasticsearch()
77 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
78 es.get(index='my-index', doc_type='my-type', id=1)
79
80 API
81 ---
82 """
83
84 import re
85 from logging import getLogger
86 from os import environ
87 from typing import Collection
88
89 import elasticsearch
90 import elasticsearch.exceptions
91 from wrapt import wrap_function_wrapper as _wrap
92
93 from opentelemetry.instrumentation.elasticsearch.package import _instruments
94 from opentelemetry.instrumentation.elasticsearch.version import __version__
95 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
96 from opentelemetry.instrumentation.utils import unwrap
97 from opentelemetry.semconv.trace import SpanAttributes
98 from opentelemetry.trace import SpanKind, get_tracer
99
100 from .utils import sanitize_body
101
102 logger = getLogger(__name__)
103
104
105 # Values to add as tags from the actual
106 # payload returned by Elasticsearch, if any.
107 _ATTRIBUTES_FROM_RESULT = [
108 "found",
109 "timed_out",
110 "took",
111 ]
112
113 _DEFAULT_OP_NAME = "request"
114
115
116 class ElasticsearchInstrumentor(BaseInstrumentor):
117 """An instrumentor for elasticsearch
118 See `BaseInstrumentor`
119 """
120
121 def __init__(self, span_name_prefix=None):
122 if not span_name_prefix:
123 span_name_prefix = environ.get(
124 "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX",
125 "Elasticsearch",
126 )
127 self._span_name_prefix = span_name_prefix.strip()
128 super().__init__()
129
130 def instrumentation_dependencies(self) -> Collection[str]:
131 return _instruments
132
133 def _instrument(self, **kwargs):
134 """
135 Instruments Elasticsearch module
136 """
137 tracer_provider = kwargs.get("tracer_provider")
138 tracer = get_tracer(__name__, __version__, tracer_provider)
139 request_hook = kwargs.get("request_hook")
140 response_hook = kwargs.get("response_hook")
141 sanitize_query = kwargs.get("sanitize_query", False)
142 _wrap(
143 elasticsearch,
144 "Transport.perform_request",
145 _wrap_perform_request(
146 tracer,
147 sanitize_query,
148 self._span_name_prefix,
149 request_hook,
150 response_hook,
151 ),
152 )
153
154 def _uninstrument(self, **kwargs):
155 unwrap(elasticsearch.Transport, "perform_request")
156
157
158 _regex_doc_url = re.compile(r"/_doc/([^/]+)")
159
160 # search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
161 _regex_search_url = re.compile(r"/([^/]+)/_search[/]?")
162
163
164 def _wrap_perform_request(
165 tracer,
166 sanitize_query,
167 span_name_prefix,
168 request_hook=None,
169 response_hook=None,
170 ):
171 # pylint: disable=R0912,R0914
172 def wrapper(wrapped, _, args, kwargs):
173 method = url = None
174 try:
175 method, url, *_ = args
176 except IndexError:
177 logger.warning(
178 "expected perform_request to receive two positional arguments. "
179 "Got %d",
180 len(args),
181 )
182
183 op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)
184
185 doc_id = None
186 search_target = None
187
188 if url:
189 # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()
190 # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7
191 # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708
192 match = _regex_doc_url.search(url)
193 if match is not None:
194 # Remove the full document ID from the URL
195 doc_span = match.span()
196 op_name = (
197 span_name_prefix
198 + url[: doc_span[0]]
199 + "/_doc/:id"
200 + url[doc_span[1] :]
201 )
202 # Put the document ID in attributes
203 doc_id = match.group(1)
204 match = _regex_search_url.search(url)
205 if match is not None:
206 op_name = span_name_prefix + "/<target>/_search"
207 search_target = match.group(1)
208
209 params = kwargs.get("params", {})
210 body = kwargs.get("body", None)
211
212 with tracer.start_as_current_span(
213 op_name,
214 kind=SpanKind.CLIENT,
215 ) as span:
216 if callable(request_hook):
217 request_hook(span, method, url, kwargs)
218
219 if span.is_recording():
220 attributes = {
221 SpanAttributes.DB_SYSTEM: "elasticsearch",
222 }
223 if url:
224 attributes["elasticsearch.url"] = url
225 if method:
226 attributes["elasticsearch.method"] = method
227 if body:
228 statement = str(body)
229 if sanitize_query:
230 statement = sanitize_body(body)
231 attributes[SpanAttributes.DB_STATEMENT] = statement
232 if params:
233 attributes["elasticsearch.params"] = str(params)
234 if doc_id:
235 attributes["elasticsearch.id"] = doc_id
236 if search_target:
237 attributes["elasticsearch.target"] = search_target
238 for key, value in attributes.items():
239 span.set_attribute(key, value)
240
241 rv = wrapped(*args, **kwargs)
242 if isinstance(rv, dict) and span.is_recording():
243 for member in _ATTRIBUTES_FROM_RESULT:
244 if member in rv:
245 span.set_attribute(
246 f"elasticsearch.{member}",
247 str(rv[member]),
248 )
249
250 if callable(response_hook):
251 response_hook(span, rv)
252 return rv
253
254 return wrapper
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
@@ -44,7 +44,6 @@
The instrument() method accepts the following keyword args:
tracer_provider (TracerProvider) - an optional tracer provider
-sanitize_query (bool) - an optional query sanitization flag
request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
this function signature is:
def request_hook(span: Span, method: str, url: str, kwargs)
@@ -138,13 +137,11 @@
tracer = get_tracer(__name__, __version__, tracer_provider)
request_hook = kwargs.get("request_hook")
response_hook = kwargs.get("response_hook")
- sanitize_query = kwargs.get("sanitize_query", False)
_wrap(
elasticsearch,
"Transport.perform_request",
_wrap_perform_request(
tracer,
- sanitize_query,
self._span_name_prefix,
request_hook,
response_hook,
@@ -163,7 +160,6 @@
def _wrap_perform_request(
tracer,
- sanitize_query,
span_name_prefix,
request_hook=None,
response_hook=None,
@@ -225,10 +221,9 @@
if method:
attributes["elasticsearch.method"] = method
if body:
- statement = str(body)
- if sanitize_query:
- statement = sanitize_body(body)
- attributes[SpanAttributes.DB_STATEMENT] = statement
+ attributes[SpanAttributes.DB_STATEMENT] = sanitize_body(
+ body
+ )
if params:
attributes["elasticsearch.params"] = str(params)
if doc_id:
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py
--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py
+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py
@@ -29,7 +29,8 @@
items = []
for k, v in d.items():
new_key = parent_key + "." + k if parent_key else k
- if isinstance(v, dict):
+ # recursive call _flatten_dict for a non-empty dict value
+ if isinstance(v, dict) and v:
items.extend(_flatten_dict(v, new_key).items())
else:
items.append((new_key, v))
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n@@ -44,7 +44,6 @@\n \n The instrument() method accepts the following keyword args:\n tracer_provider (TracerProvider) - an optional tracer provider\n-sanitize_query (bool) - an optional query sanitization flag\n request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n this function signature is:\n def request_hook(span: Span, method: str, url: str, kwargs)\n@@ -138,13 +137,11 @@\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n- sanitize_query = kwargs.get(\"sanitize_query\", False)\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer,\n- sanitize_query,\n self._span_name_prefix,\n request_hook,\n response_hook,\n@@ -163,7 +160,6 @@\n \n def _wrap_perform_request(\n tracer,\n- sanitize_query,\n span_name_prefix,\n request_hook=None,\n response_hook=None,\n@@ -225,10 +221,9 @@\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n- statement = str(body)\n- if sanitize_query:\n- statement = sanitize_body(body)\n- attributes[SpanAttributes.DB_STATEMENT] = statement\n+ attributes[SpanAttributes.DB_STATEMENT] = sanitize_body(\n+ body\n+ )\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\ndiff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py\n@@ -29,7 +29,8 @@\n items = []\n for k, v in d.items():\n new_key = parent_key + \".\" + k if parent_key else k\n- if isinstance(v, dict):\n+ # recursive call _flatten_dict for a non-empty dict value\n+ if isinstance(v, dict) and v:\n items.extend(_flatten_dict(v, new_key).items())\n else:\n items.append((new_key, v))\n", "issue": "Only collect `db.statement` if there is sanitization\nSpec https://github.com/open-telemetry/opentelemetry-specification/pull/3127\r\n- [ ] [aiopg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-aiopg)\r\n- [ ] [asyncpg](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-asyncpg)\r\n- [ ] [dbapi](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-dbapi)\r\n- [ ] [elasticsearch](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-elasticsearch)\r\n- [ ] [mysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-mysql)\r\n- [ ] [pymemcache](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymemcache)\r\n- [ ] [pymongo](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymongo)\r\n- [ ] [pymysql](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-pymysql)\r\n- [ ] [redis](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-redis)\r\n- [ ] [sqlalchemy](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlalchemy)\r\n- [ ] [sqlite3](https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-sqlite3)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsanitized_keys = (\n \"message\",\n \"should\",\n \"filter\",\n \"query\",\n \"queries\",\n \"intervals\",\n \"match\",\n)\nsanitized_value = \"?\"\n\n\n# pylint: disable=C0103\ndef _flatten_dict(d, parent_key=\"\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + \".\" + k if parent_key else k\n if isinstance(v, dict):\n items.extend(_flatten_dict(v, new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef _unflatten_dict(d):\n res = {}\n for k, v in d.items():\n keys = k.split(\".\")\n d = res\n for key in keys[:-1]:\n if key not in d:\n d[key] = {}\n d = d[key]\n d[keys[-1]] = v\n return res\n\n\ndef sanitize_body(body) -> str:\n flatten_body = _flatten_dict(body)\n\n for key in flatten_body:\n if key.endswith(sanitized_keys):\n flatten_body[key] = sanitized_value\n\n return str(_unflatten_dict(flatten_body))\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\nThe instrument() method accepts the following keyword args:\ntracer_provider (TracerProvider) - an optional tracer provider\nsanitize_query (bool) - an optional query sanitization flag\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\nthis function signature is:\ndef request_hook(span: Span, method: str, url: str, kwargs)\n\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\nthis function signature is:\ndef response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\"\"\"\n\nimport re\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nfrom .utils import sanitize_body\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFAULT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\",\n \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments Elasticsearch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n sanitize_query = kwargs.get(\"sanitize_query\", False)\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer,\n sanitize_query,\n self._span_name_prefix,\n request_hook,\n response_hook,\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\n_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n\n# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html\n_regex_search_url = re.compile(r\"/([^/]+)/_search[/]?\")\n\n\ndef _wrap_perform_request(\n tracer,\n sanitize_query,\n span_name_prefix,\n request_hook=None,\n response_hook=None,\n):\n # pylint: disable=R0912,R0914\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n\n doc_id = None\n search_target = None\n\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n match = _regex_doc_url.search(url)\n if match is not None:\n # Remove the full document ID from the URL\n doc_span = match.span()\n op_name = (\n span_name_prefix\n + url[: doc_span[0]]\n + \"/_doc/:id\"\n + url[doc_span[1] :]\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n match = _regex_search_url.search(url)\n if match is not None:\n op_name = span_name_prefix + \"/<target>/_search\"\n search_target = match.group(1)\n\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name,\n kind=SpanKind.CLIENT,\n ) as span:\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n statement = str(body)\n if sanitize_query:\n statement = sanitize_body(body)\n attributes[SpanAttributes.DB_STATEMENT] = statement\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n if search_target:\n attributes[\"elasticsearch.target\"] = search_target\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\",\n str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsanitized_keys = (\n \"message\",\n \"should\",\n \"filter\",\n \"query\",\n \"queries\",\n \"intervals\",\n \"match\",\n)\nsanitized_value = \"?\"\n\n\n# pylint: disable=C0103\ndef _flatten_dict(d, parent_key=\"\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + \".\" + k if parent_key else k\n # recursive call _flatten_dict for a non-empty dict value\n if isinstance(v, dict) and v:\n items.extend(_flatten_dict(v, new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef _unflatten_dict(d):\n res = {}\n for k, v in d.items():\n keys = k.split(\".\")\n d = res\n for key in keys[:-1]:\n if key not in d:\n d[key] = {}\n d = d[key]\n d[keys[-1]] = v\n return res\n\n\ndef sanitize_body(body) -> str:\n flatten_body = _flatten_dict(body)\n\n for key in flatten_body:\n if key.endswith(sanitized_keys):\n flatten_body[key] = sanitized_value\n\n return str(_unflatten_dict(flatten_body))\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/utils.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\nThe instrument() method accepts the following keyword args:\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\nthis function signature is:\ndef request_hook(span: Span, method: str, url: str, kwargs)\n\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\nthis function signature is:\ndef response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\"\"\"\n\nimport re\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nfrom .utils import sanitize_body\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFAULT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\",\n \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments Elasticsearch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer,\n self._span_name_prefix,\n request_hook,\n response_hook,\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\n_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n\n# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html\n_regex_search_url = re.compile(r\"/([^/]+)/_search[/]?\")\n\n\ndef _wrap_perform_request(\n tracer,\n span_name_prefix,\n request_hook=None,\n response_hook=None,\n):\n # pylint: disable=R0912,R0914\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n\n doc_id = None\n search_target = None\n\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n match = _regex_doc_url.search(url)\n if match is not None:\n # Remove the full document ID from the URL\n doc_span = match.span()\n op_name = (\n span_name_prefix\n + url[: doc_span[0]]\n + \"/_doc/:id\"\n + url[doc_span[1] :]\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n match = _regex_search_url.search(url)\n if match is not None:\n op_name = span_name_prefix + \"/<target>/_search\"\n search_target = match.group(1)\n\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name,\n kind=SpanKind.CLIENT,\n ) as span:\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n attributes[SpanAttributes.DB_STATEMENT] = sanitize_body(\n body\n )\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n if search_target:\n attributes[\"elasticsearch.target\"] = search_target\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\",\n str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}]} | 3,713 | 662 |
gh_patches_debug_12855 | rasdani/github-patches | git_diff | fossasia__open-event-server-6858 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Server crashing on order PATCH
```
KeyError: 'mytickets_url'
(16 additional frame(s) were not displayed)
...
File "flask_rest_jsonapi/decorators.py", line 56, in wrapper
return func(*args, **kwargs)
File "flask_rest_jsonapi/resource.py", line 310, in patch
self._data_layer.update_object(obj, data, kwargs)
File "flask_rest_jsonapi/data_layers/alchemy.py", line 144, in update_object
self.after_update_object(obj, data, view_kwargs)
File "app/api/orders.py", line 601, in after_update_object
attachments=[ticket_path, invoice_path],
File "app/api/helpers/mail.py", line 354, in send_email_to_attendees
event_name=order.event.name,
```
Rolled back the release
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/system_mails.py`
Content:
```
1 """
2 All the System mails
3 Register a mail here before using it
4 """
5 from app.models.mail import (
6 AFTER_EVENT,
7 EVENT_EXPORT_FAIL,
8 EVENT_EXPORTED,
9 EVENT_IMPORT_FAIL,
10 EVENT_IMPORTED,
11 EVENT_PUBLISH,
12 EVENT_ROLE,
13 INVITE_PAPERS,
14 MAIL_TO_EXPIRED_ORDERS,
15 MONTHLY_PAYMENT_EMAIL,
16 MONTHLY_PAYMENT_FOLLOWUP_EMAIL,
17 NEW_SESSION,
18 NEXT_EVENT,
19 PASSWORD_CHANGE,
20 PASSWORD_RESET,
21 PASSWORD_RESET_AND_VERIFY,
22 SESSION_ACCEPT_REJECT,
23 SESSION_SCHEDULE,
24 TEST_MAIL,
25 TICKET_CANCELLED,
26 TICKET_PURCHASED,
27 TICKET_PURCHASED_ATTENDEE,
28 TICKET_PURCHASED_ORGANIZER,
29 USER_CHANGE_EMAIL,
30 USER_CONFIRM,
31 USER_EVENT_ROLE,
32 USER_REGISTER,
33 USER_REGISTER_WITH_PASSWORD,
34 )
35
36 MAILS = {
37 EVENT_PUBLISH: {
38 'recipient': 'Owner, Organizer, Speaker',
39 'subject': u'{event_name} is Live',
40 'message': (
41 u"Hi {email}<br/>"
42 + u"Event, {event_name}, is up and running and ready for action. Go ahead and check it out."
43 + u"<br/> Visit this link to view it: {link}"
44 ),
45 },
46 INVITE_PAPERS: {
47 'recipient': 'Speaker',
48 'subject': u'Invitation to Submit Papers for {event_name}',
49 'message': (
50 u"Hi {email}<br/>"
51 + u"You are invited to submit papers for event: {event_name}"
52 + u"<br/> Visit this link to fill up details: {link}"
53 ),
54 },
55 SESSION_ACCEPT_REJECT: {
56 'recipient': 'Speaker',
57 'subject': u'Session {session_name} has been {acceptance}',
58 'message': (
59 u"Hi {email},<br/>"
60 + u"The session <strong>{session_name}</strong> has been <strong>{acceptance}</strong> by the organizer. "
61 + u"<br/> Visit this link to view the session: {link}"
62 ),
63 },
64 SESSION_SCHEDULE: {
65 'recipient': 'Owner, Organizer, Speaker',
66 'subject': u'Schedule for Session {session_name} has been changed',
67 'message': (
68 u"Hi {email},<br/>"
69 + u"The schedule for session <strong>{session_name}</strong> has been changed. "
70 + u"<br/> Visit this link to view the session: {link}"
71 ),
72 },
73 NEXT_EVENT: {
74 'recipient': 'Owner, Organizer, Speaker',
75 'subject': u'Event {event_name} is coming soon',
76 'message': (
77 u"Hi {email},<br/>"
78 + u"Here are the upcoming events: {up_coming_events} .Get ready!! "
79 + u"<br/> Visit this link to view the event: {link}"
80 ),
81 },
82 AFTER_EVENT: {
83 'recipient': 'Owner, Organizer, Speaker',
84 'subject': u'Event {event_name} is over',
85 'message': (
86 u"Hi {email},<br/>"
87 + u"Thank You for participating in our event. We hope you enjoyed it. "
88 u"Please check out other upcoming events around you on {url} <br />"
89 ),
90 'sent_at': '1 day after the event',
91 },
92 NEW_SESSION: {
93 'recipient': 'Owner, Organizer',
94 'subject': u'New session proposal for {event_name}',
95 'message': (
96 u"Hi {email},<br/>"
97 + u"The event <strong>{event_name}</strong> has received a new session proposal. "
98 + u"<br/> Visit this link to view the session: <a href='{link}' target='_blank'>{link}</a>"
99 ),
100 },
101 USER_REGISTER: {
102 'recipient': 'User',
103 'subject': u'Account Created on {app_name}',
104 'message': (
105 u"Your Account Has Been Created! Congratulations!"
106 + u"<br/> Your login: {email}"
107 ),
108 },
109 USER_REGISTER_WITH_PASSWORD: {
110 'recipient': 'User',
111 'subject': u'Welcome to {app_name}',
112 'message': (
113 u"Your Account Has Been Created! Congratulations!"
114 + u"<br/> <strong>Your login:</strong><br><strong>Email:</strong> {email}<br>"
115 ),
116 },
117 USER_CONFIRM: {
118 'recipient': 'User',
119 'subject': u'Email Confirmation to Create Account for Open-Event',
120 'message': (
121 u"Hi {email},<br/>"
122 + u"Please visit this link to confirm your email: <a href='{link}' target='_blank'>{link}</a>"
123 ),
124 },
125 USER_CHANGE_EMAIL: {
126 'recipient': 'User',
127 'subject': u'Your email has been already changed',
128 'message': (
129 u"Hi {email},<br/>"
130 + u"Your email has been already changed from {email} to {new_email}. You should verify your new email"
131 ),
132 },
133 PASSWORD_RESET: {
134 'recipient': 'User',
135 'subject': u'{app_name}: Password Reset',
136 'message': (
137 u"Please use the following link to reset your password.<br> <a href='{link}' target='_blank'>{link}</a>"
138 + " Or paste this token in your {app_name} App: {token} "
139 ),
140 },
141 PASSWORD_RESET_AND_VERIFY: {
142 'recipient': 'User',
143 'subject': u'{app_name}: Reset your password and verify your account',
144 'message': (
145 u"Please use the following link to reset your password and verify your account."
146 + "<br> <a href='{link}' target='_blank'>{link}</a>"
147 ),
148 },
149 PASSWORD_CHANGE: {
150 'recipient': 'User',
151 'subject': u'{app_name}: Password Change',
152 'message': (
153 u"Your password has been successfully changed. Please login with your new password."
154 ),
155 },
156 EVENT_ROLE: {
157 'recipient': 'User',
158 'subject': u'Invitation to be {role} at {event}',
159 'message': (
160 u"Hello {email},<br><br>"
161 + u"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>"
162 + u"To accept the role please sign up using the following link: <a href='{link}' target='_blank'>Link</a>."
163 ),
164 },
165 USER_EVENT_ROLE: {
166 'recipient': 'User',
167 'subject': u'Invitation to be {role} at {event}',
168 'message': (
169 u"Hello {email},<br><br>"
170 + u"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>"
171 + u"To accept the role please go to the following link: <a href='{link}' target='_blank'>Link</a>."
172 ),
173 },
174 TICKET_PURCHASED: {
175 'recipient': 'User',
176 'subject': u'Your order invoice and tickets for {event_name} ({invoice_id}) ',
177 'message': (
178 u"Hi, this is a confirmation mail of your tickets for the event {event_name}"
179 u"<br/>Your order has been processed successfully."
180 + u"<br/> You can find your Tickets and Order Invoice attached to this mail."
181 u"<br><br><em>Looking forward to seeing you at the event."
182 u"<br/>Login to manage your orders at {frontend_url} </em>"
183 ),
184 },
185 TICKET_PURCHASED_ATTENDEE: {
186 'recipient': 'Attendee',
187 'subject': u'Your tickets for {event_name} ({invoice_id}) ',
188 'message': (
189 u"Hi, this is a confirmation mail of your tickets for the event {event_name}"
190 u"<br/>Your order has been processed successfully."
191 + u"<br/> Your tickets & invoice have been enclosed."
192 u"<br><br>You can also download your tickets in <b>My Tickets</b> section."
193 u"<br/>Login to manage the orders at <a href='{mytickets_url}' target='_blank'>{mytickets_url}</a> </em>"
194 u"<br><br><em>Looking forward to seeing you at the event."
195 ),
196 },
197 TICKET_PURCHASED_ORGANIZER: {
198 'recipient': 'Owner, Organizer, Coorganizer',
199 'subject': u'New ticket purchase for {event_name} by {buyer_email} ({invoice_id}) ',
200 'message': (
201 u"Hi, {buyer_email} just bought tickets for the event {event_name}"
202 u"<br/>The order has been processed successfully."
203 + u"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice."
204 u"<br/>Login to manage the orders at <a href='{frontend_url}' target='_blank'>{frontend_url}</a> </em>"
205 ),
206 },
207 TICKET_CANCELLED: {
208 'recipient': 'User',
209 'subject': u'Your order for {event_name} has been cancelled ({invoice_id})',
210 'message': (
211 u"Hello,"
212 u"<br/>your order for {event_name} has been cancelled by the organizer."
213 u"<br/>Please contact the organizer for more info." + u"{cancel_msg}"
214 u"<br/>To manage orders please login to <a href='{frontend_url}' target='_blank'>{frontend_url}</a>"
215 u"and visit \"My Tickets\"."
216 u"<br/>Best regards,"
217 u"<br/>{app_name} Team"
218 ),
219 },
220 EVENT_EXPORTED: {
221 'recipient': 'User',
222 'subject': u'Event {event_name} has been exported',
223 'message': (
224 u"Click on the following link to download the event."
225 + u"<br> <a href='{download_url}'>Download</a>"
226 ),
227 },
228 EVENT_EXPORT_FAIL: {
229 'recipient': 'User',
230 'subject': u'Export of event {event_name} failed',
231 'message': (u"The error was as follows - <br>" + u"<pre>{error_text}</pre>"),
232 },
233 MAIL_TO_EXPIRED_ORDERS: {
234 'recipient': 'User',
235 'subject': u'Tickets for {event_name} are still available ',
236 'message': (
237 u"This is just a gentle reminder that the payment for your order {invoice_id} is still left."
238 + u"<br/> The tickets for this event are still available. <a href='{order_url}'>Click here</a> to "
239 u"purchase your ticket for this event."
240 u"<br><br><em>Looking forward to seeing you at the event.</em>"
241 ),
242 },
243 MONTHLY_PAYMENT_EMAIL: {
244 'recipient': 'Owner, Organizer',
245 'subject': u'{date} - Monthly service fee invoice for {event_name}',
246 'message': (
247 u"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}."
248 + u"<br/> That payment for the same has to be made in two weeks. <a href='{payment_url}'>Click here</a> to "
249 u"view your invoice and complete the payment."
250 u"<br><br><em>Thank you for using {app_name}.</em>"
251 ),
252 'sent_at': '1st day of the month',
253 },
254 MONTHLY_PAYMENT_FOLLOWUP_EMAIL: {
255 'recipient': 'Owner, Organizer',
256 'subject': u'Past Due: {date} - Monthly service fee invoice for {event_name}',
257 'message': (
258 u"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}."
259 + u"<br/> That payment for the same is past the due date. <a href='{payment_url}'>Click here</a> to "
260 u"view your invoice and complete the payment to prevent loss of functionality."
261 u"<br><br><em>Thank you for using {app_name}.</em>"
262 ),
263 'sent_at': '15th day of the month',
264 },
265 EVENT_IMPORTED: {
266 'recipient': 'User',
267 'subject': u'Event {event_name} has been imported',
268 'message': (
269 u"Click on the following link to manage your event"
270 + u"<br> <a href='{event_url}'>Link</a>"
271 ),
272 },
273 EVENT_IMPORT_FAIL: {
274 'recipient': 'User',
275 'subject': u'Import of event failed',
276 'message': (u"The error was as follows - <br>" + u"<pre>{error_text}</pre>"),
277 },
278 TEST_MAIL: {
279 'recipient': 'User',
280 'subject': u'Test Mail Subject',
281 'message': (u"This is a <strong> Test </strong> E-mail."),
282 },
283 }
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/helpers/system_mails.py b/app/api/helpers/system_mails.py
--- a/app/api/helpers/system_mails.py
+++ b/app/api/helpers/system_mails.py
@@ -190,7 +190,7 @@
u"<br/>Your order has been processed successfully."
+ u"<br/> Your tickets & invoice have been enclosed."
u"<br><br>You can also download your tickets in <b>My Tickets</b> section."
- u"<br/>Login to manage the orders at <a href='{mytickets_url}' target='_blank'>{mytickets_url}</a> </em>"
+ u"<br/>Login to manage the orders at <a href='{my_tickets_url}' target='_blank'>{my_tickets_url}</a> </em>"
u"<br><br><em>Looking forward to seeing you at the event."
),
},
| {"golden_diff": "diff --git a/app/api/helpers/system_mails.py b/app/api/helpers/system_mails.py\n--- a/app/api/helpers/system_mails.py\n+++ b/app/api/helpers/system_mails.py\n@@ -190,7 +190,7 @@\n u\"<br/>Your order has been processed successfully.\"\n + u\"<br/> Your tickets & invoice have been enclosed.\"\n u\"<br><br>You can also download your tickets in <b>My Tickets</b> section.\"\n- u\"<br/>Login to manage the orders at <a href='{mytickets_url}' target='_blank'>{mytickets_url}</a> </em>\"\n+ u\"<br/>Login to manage the orders at <a href='{my_tickets_url}' target='_blank'>{my_tickets_url}</a> </em>\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n ),\n },\n", "issue": "Server crashing on order PATCH\n```\r\nKeyError: 'mytickets_url'\r\n(16 additional frame(s) were not displayed)\r\n...\r\n File \"flask_rest_jsonapi/decorators.py\", line 56, in wrapper\r\n return func(*args, **kwargs)\r\n File \"flask_rest_jsonapi/resource.py\", line 310, in patch\r\n self._data_layer.update_object(obj, data, kwargs)\r\n File \"flask_rest_jsonapi/data_layers/alchemy.py\", line 144, in update_object\r\n self.after_update_object(obj, data, view_kwargs)\r\n File \"app/api/orders.py\", line 601, in after_update_object\r\n attachments=[ticket_path, invoice_path],\r\n File \"app/api/helpers/mail.py\", line 354, in send_email_to_attendees\r\n event_name=order.event.name,\r\n```\r\n\r\nRolled back the release\n", "before_files": [{"content": "\"\"\"\nAll the System mails\nRegister a mail here before using it\n\"\"\"\nfrom app.models.mail import (\n AFTER_EVENT,\n EVENT_EXPORT_FAIL,\n EVENT_EXPORTED,\n EVENT_IMPORT_FAIL,\n EVENT_IMPORTED,\n EVENT_PUBLISH,\n EVENT_ROLE,\n INVITE_PAPERS,\n MAIL_TO_EXPIRED_ORDERS,\n MONTHLY_PAYMENT_EMAIL,\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL,\n NEW_SESSION,\n NEXT_EVENT,\n PASSWORD_CHANGE,\n PASSWORD_RESET,\n PASSWORD_RESET_AND_VERIFY,\n SESSION_ACCEPT_REJECT,\n SESSION_SCHEDULE,\n TEST_MAIL,\n TICKET_CANCELLED,\n TICKET_PURCHASED,\n TICKET_PURCHASED_ATTENDEE,\n TICKET_PURCHASED_ORGANIZER,\n USER_CHANGE_EMAIL,\n USER_CONFIRM,\n USER_EVENT_ROLE,\n USER_REGISTER,\n USER_REGISTER_WITH_PASSWORD,\n)\n\nMAILS = {\n EVENT_PUBLISH: {\n 'recipient': 'Owner, Organizer, Speaker',\n 'subject': u'{event_name} is Live',\n 'message': (\n u\"Hi {email}<br/>\"\n + u\"Event, {event_name}, is up and running and ready for action. Go ahead and check it out.\"\n + u\"<br/> Visit this link to view it: {link}\"\n ),\n },\n INVITE_PAPERS: {\n 'recipient': 'Speaker',\n 'subject': u'Invitation to Submit Papers for {event_name}',\n 'message': (\n u\"Hi {email}<br/>\"\n + u\"You are invited to submit papers for event: {event_name}\"\n + u\"<br/> Visit this link to fill up details: {link}\"\n ),\n },\n SESSION_ACCEPT_REJECT: {\n 'recipient': 'Speaker',\n 'subject': u'Session {session_name} has been {acceptance}',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"The session <strong>{session_name}</strong> has been <strong>{acceptance}</strong> by the organizer. \"\n + u\"<br/> Visit this link to view the session: {link}\"\n ),\n },\n SESSION_SCHEDULE: {\n 'recipient': 'Owner, Organizer, Speaker',\n 'subject': u'Schedule for Session {session_name} has been changed',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"The schedule for session <strong>{session_name}</strong> has been changed. \"\n + u\"<br/> Visit this link to view the session: {link}\"\n ),\n },\n NEXT_EVENT: {\n 'recipient': 'Owner, Organizer, Speaker',\n 'subject': u'Event {event_name} is coming soon',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"Here are the upcoming events: {up_coming_events} .Get ready!! \"\n + u\"<br/> Visit this link to view the event: {link}\"\n ),\n },\n AFTER_EVENT: {\n 'recipient': 'Owner, Organizer, Speaker',\n 'subject': u'Event {event_name} is over',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"Thank You for participating in our event. We hope you enjoyed it. \"\n u\"Please check out other upcoming events around you on {url} <br />\"\n ),\n 'sent_at': '1 day after the event',\n },\n NEW_SESSION: {\n 'recipient': 'Owner, Organizer',\n 'subject': u'New session proposal for {event_name}',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"The event <strong>{event_name}</strong> has received a new session proposal. \"\n + u\"<br/> Visit this link to view the session: <a href='{link}' target='_blank'>{link}</a>\"\n ),\n },\n USER_REGISTER: {\n 'recipient': 'User',\n 'subject': u'Account Created on {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\"\n + u\"<br/> Your login: {email}\"\n ),\n },\n USER_REGISTER_WITH_PASSWORD: {\n 'recipient': 'User',\n 'subject': u'Welcome to {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\"\n + u\"<br/> <strong>Your login:</strong><br><strong>Email:</strong> {email}<br>\"\n ),\n },\n USER_CONFIRM: {\n 'recipient': 'User',\n 'subject': u'Email Confirmation to Create Account for Open-Event',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"Please visit this link to confirm your email: <a href='{link}' target='_blank'>{link}</a>\"\n ),\n },\n USER_CHANGE_EMAIL: {\n 'recipient': 'User',\n 'subject': u'Your email has been already changed',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"Your email has been already changed from {email} to {new_email}. You should verify your new email\"\n ),\n },\n PASSWORD_RESET: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Reset',\n 'message': (\n u\"Please use the following link to reset your password.<br> <a href='{link}' target='_blank'>{link}</a>\"\n + \" Or paste this token in your {app_name} App: {token} \"\n ),\n },\n PASSWORD_RESET_AND_VERIFY: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Reset your password and verify your account',\n 'message': (\n u\"Please use the following link to reset your password and verify your account.\"\n + \"<br> <a href='{link}' target='_blank'>{link}</a>\"\n ),\n },\n PASSWORD_CHANGE: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Change',\n 'message': (\n u\"Your password has been successfully changed. Please login with your new password.\"\n ),\n },\n EVENT_ROLE: {\n 'recipient': 'User',\n 'subject': u'Invitation to be {role} at {event}',\n 'message': (\n u\"Hello {email},<br><br>\"\n + u\"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>\"\n + u\"To accept the role please sign up using the following link: <a href='{link}' target='_blank'>Link</a>.\"\n ),\n },\n USER_EVENT_ROLE: {\n 'recipient': 'User',\n 'subject': u'Invitation to be {role} at {event}',\n 'message': (\n u\"Hello {email},<br><br>\"\n + u\"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>\"\n + u\"To accept the role please go to the following link: <a href='{link}' target='_blank'>Link</a>.\"\n ),\n },\n TICKET_PURCHASED: {\n 'recipient': 'User',\n 'subject': u'Your order invoice and tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\"\n + u\"<br/> You can find your Tickets and Order Invoice attached to this mail.\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n u\"<br/>Login to manage your orders at {frontend_url} </em>\"\n ),\n },\n TICKET_PURCHASED_ATTENDEE: {\n 'recipient': 'Attendee',\n 'subject': u'Your tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\"\n + u\"<br/> Your tickets & invoice have been enclosed.\"\n u\"<br><br>You can also download your tickets in <b>My Tickets</b> section.\"\n u\"<br/>Login to manage the orders at <a href='{mytickets_url}' target='_blank'>{mytickets_url}</a> </em>\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n ),\n },\n TICKET_PURCHASED_ORGANIZER: {\n 'recipient': 'Owner, Organizer, Coorganizer',\n 'subject': u'New ticket purchase for {event_name} by {buyer_email} ({invoice_id}) ',\n 'message': (\n u\"Hi, {buyer_email} just bought tickets for the event {event_name}\"\n u\"<br/>The order has been processed successfully.\"\n + u\"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice.\"\n u\"<br/>Login to manage the orders at <a href='{frontend_url}' target='_blank'>{frontend_url}</a> </em>\"\n ),\n },\n TICKET_CANCELLED: {\n 'recipient': 'User',\n 'subject': u'Your order for {event_name} has been cancelled ({invoice_id})',\n 'message': (\n u\"Hello,\"\n u\"<br/>your order for {event_name} has been cancelled by the organizer.\"\n u\"<br/>Please contact the organizer for more info.\" + u\"{cancel_msg}\"\n u\"<br/>To manage orders please login to <a href='{frontend_url}' target='_blank'>{frontend_url}</a>\"\n u\"and visit \\\"My Tickets\\\".\"\n u\"<br/>Best regards,\"\n u\"<br/>{app_name} Team\"\n ),\n },\n EVENT_EXPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been exported',\n 'message': (\n u\"Click on the following link to download the event.\"\n + u\"<br> <a href='{download_url}'>Download</a>\"\n ),\n },\n EVENT_EXPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Export of event {event_name} failed',\n 'message': (u\"The error was as follows - <br>\" + u\"<pre>{error_text}</pre>\"),\n },\n MAIL_TO_EXPIRED_ORDERS: {\n 'recipient': 'User',\n 'subject': u'Tickets for {event_name} are still available ',\n 'message': (\n u\"This is just a gentle reminder that the payment for your order {invoice_id} is still left.\"\n + u\"<br/> The tickets for this event are still available. <a href='{order_url}'>Click here</a> to \"\n u\"purchase your ticket for this event.\"\n u\"<br><br><em>Looking forward to seeing you at the event.</em>\"\n ),\n },\n MONTHLY_PAYMENT_EMAIL: {\n 'recipient': 'Owner, Organizer',\n 'subject': u'{date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\"\n + u\"<br/> That payment for the same has to be made in two weeks. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '1st day of the month',\n },\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL: {\n 'recipient': 'Owner, Organizer',\n 'subject': u'Past Due: {date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\"\n + u\"<br/> That payment for the same is past the due date. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment to prevent loss of functionality.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '15th day of the month',\n },\n EVENT_IMPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been imported',\n 'message': (\n u\"Click on the following link to manage your event\"\n + u\"<br> <a href='{event_url}'>Link</a>\"\n ),\n },\n EVENT_IMPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Import of event failed',\n 'message': (u\"The error was as follows - <br>\" + u\"<pre>{error_text}</pre>\"),\n },\n TEST_MAIL: {\n 'recipient': 'User',\n 'subject': u'Test Mail Subject',\n 'message': (u\"This is a <strong> Test </strong> E-mail.\"),\n },\n}\n", "path": "app/api/helpers/system_mails.py"}], "after_files": [{"content": "\"\"\"\nAll the System mails\nRegister a mail here before using it\n\"\"\"\nfrom app.models.mail import (\n AFTER_EVENT,\n EVENT_EXPORT_FAIL,\n EVENT_EXPORTED,\n EVENT_IMPORT_FAIL,\n EVENT_IMPORTED,\n EVENT_PUBLISH,\n EVENT_ROLE,\n INVITE_PAPERS,\n MAIL_TO_EXPIRED_ORDERS,\n MONTHLY_PAYMENT_EMAIL,\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL,\n NEW_SESSION,\n NEXT_EVENT,\n PASSWORD_CHANGE,\n PASSWORD_RESET,\n PASSWORD_RESET_AND_VERIFY,\n SESSION_ACCEPT_REJECT,\n SESSION_SCHEDULE,\n TEST_MAIL,\n TICKET_CANCELLED,\n TICKET_PURCHASED,\n TICKET_PURCHASED_ATTENDEE,\n TICKET_PURCHASED_ORGANIZER,\n USER_CHANGE_EMAIL,\n USER_CONFIRM,\n USER_EVENT_ROLE,\n USER_REGISTER,\n USER_REGISTER_WITH_PASSWORD,\n)\n\nMAILS = {\n EVENT_PUBLISH: {\n 'recipient': 'Owner, Organizer, Speaker',\n 'subject': u'{event_name} is Live',\n 'message': (\n u\"Hi {email}<br/>\"\n + u\"Event, {event_name}, is up and running and ready for action. Go ahead and check it out.\"\n + u\"<br/> Visit this link to view it: {link}\"\n ),\n },\n INVITE_PAPERS: {\n 'recipient': 'Speaker',\n 'subject': u'Invitation to Submit Papers for {event_name}',\n 'message': (\n u\"Hi {email}<br/>\"\n + u\"You are invited to submit papers for event: {event_name}\"\n + u\"<br/> Visit this link to fill up details: {link}\"\n ),\n },\n SESSION_ACCEPT_REJECT: {\n 'recipient': 'Speaker',\n 'subject': u'Session {session_name} has been {acceptance}',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"The session <strong>{session_name}</strong> has been <strong>{acceptance}</strong> by the organizer. \"\n + u\"<br/> Visit this link to view the session: {link}\"\n ),\n },\n SESSION_SCHEDULE: {\n 'recipient': 'Owner, Organizer, Speaker',\n 'subject': u'Schedule for Session {session_name} has been changed',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"The schedule for session <strong>{session_name}</strong> has been changed. \"\n + u\"<br/> Visit this link to view the session: {link}\"\n ),\n },\n NEXT_EVENT: {\n 'recipient': 'Owner, Organizer, Speaker',\n 'subject': u'Event {event_name} is coming soon',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"Here are the upcoming events: {up_coming_events} .Get ready!! \"\n + u\"<br/> Visit this link to view the event: {link}\"\n ),\n },\n AFTER_EVENT: {\n 'recipient': 'Owner, Organizer, Speaker',\n 'subject': u'Event {event_name} is over',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"Thank You for participating in our event. We hope you enjoyed it. \"\n u\"Please check out other upcoming events around you on {url} <br />\"\n ),\n 'sent_at': '1 day after the event',\n },\n NEW_SESSION: {\n 'recipient': 'Owner, Organizer',\n 'subject': u'New session proposal for {event_name}',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"The event <strong>{event_name}</strong> has received a new session proposal. \"\n + u\"<br/> Visit this link to view the session: <a href='{link}' target='_blank'>{link}</a>\"\n ),\n },\n USER_REGISTER: {\n 'recipient': 'User',\n 'subject': u'Account Created on {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\"\n + u\"<br/> Your login: {email}\"\n ),\n },\n USER_REGISTER_WITH_PASSWORD: {\n 'recipient': 'User',\n 'subject': u'Welcome to {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\"\n + u\"<br/> <strong>Your login:</strong><br><strong>Email:</strong> {email}<br>\"\n ),\n },\n USER_CONFIRM: {\n 'recipient': 'User',\n 'subject': u'Email Confirmation to Create Account for Open-Event',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"Please visit this link to confirm your email: <a href='{link}' target='_blank'>{link}</a>\"\n ),\n },\n USER_CHANGE_EMAIL: {\n 'recipient': 'User',\n 'subject': u'Your email has been already changed',\n 'message': (\n u\"Hi {email},<br/>\"\n + u\"Your email has been already changed from {email} to {new_email}. You should verify your new email\"\n ),\n },\n PASSWORD_RESET: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Reset',\n 'message': (\n u\"Please use the following link to reset your password.<br> <a href='{link}' target='_blank'>{link}</a>\"\n + \" Or paste this token in your {app_name} App: {token} \"\n ),\n },\n PASSWORD_RESET_AND_VERIFY: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Reset your password and verify your account',\n 'message': (\n u\"Please use the following link to reset your password and verify your account.\"\n + \"<br> <a href='{link}' target='_blank'>{link}</a>\"\n ),\n },\n PASSWORD_CHANGE: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Change',\n 'message': (\n u\"Your password has been successfully changed. Please login with your new password.\"\n ),\n },\n EVENT_ROLE: {\n 'recipient': 'User',\n 'subject': u'Invitation to be {role} at {event}',\n 'message': (\n u\"Hello {email},<br><br>\"\n + u\"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>\"\n + u\"To accept the role please sign up using the following link: <a href='{link}' target='_blank'>Link</a>.\"\n ),\n },\n USER_EVENT_ROLE: {\n 'recipient': 'User',\n 'subject': u'Invitation to be {role} at {event}',\n 'message': (\n u\"Hello {email},<br><br>\"\n + u\"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>\"\n + u\"To accept the role please go to the following link: <a href='{link}' target='_blank'>Link</a>.\"\n ),\n },\n TICKET_PURCHASED: {\n 'recipient': 'User',\n 'subject': u'Your order invoice and tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\"\n + u\"<br/> You can find your Tickets and Order Invoice attached to this mail.\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n u\"<br/>Login to manage your orders at {frontend_url} </em>\"\n ),\n },\n TICKET_PURCHASED_ATTENDEE: {\n 'recipient': 'Attendee',\n 'subject': u'Your tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\"\n + u\"<br/> Your tickets & invoice have been enclosed.\"\n u\"<br><br>You can also download your tickets in <b>My Tickets</b> section.\"\n u\"<br/>Login to manage the orders at <a href='{my_tickets_url}' target='_blank'>{my_tickets_url}</a> </em>\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n ),\n },\n TICKET_PURCHASED_ORGANIZER: {\n 'recipient': 'Owner, Organizer, Coorganizer',\n 'subject': u'New ticket purchase for {event_name} by {buyer_email} ({invoice_id}) ',\n 'message': (\n u\"Hi, {buyer_email} just bought tickets for the event {event_name}\"\n u\"<br/>The order has been processed successfully.\"\n + u\"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice.\"\n u\"<br/>Login to manage the orders at <a href='{frontend_url}' target='_blank'>{frontend_url}</a> </em>\"\n ),\n },\n TICKET_CANCELLED: {\n 'recipient': 'User',\n 'subject': u'Your order for {event_name} has been cancelled ({invoice_id})',\n 'message': (\n u\"Hello,\"\n u\"<br/>your order for {event_name} has been cancelled by the organizer.\"\n u\"<br/>Please contact the organizer for more info.\" + u\"{cancel_msg}\"\n u\"<br/>To manage orders please login to <a href='{frontend_url}' target='_blank'>{frontend_url}</a>\"\n u\"and visit \\\"My Tickets\\\".\"\n u\"<br/>Best regards,\"\n u\"<br/>{app_name} Team\"\n ),\n },\n EVENT_EXPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been exported',\n 'message': (\n u\"Click on the following link to download the event.\"\n + u\"<br> <a href='{download_url}'>Download</a>\"\n ),\n },\n EVENT_EXPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Export of event {event_name} failed',\n 'message': (u\"The error was as follows - <br>\" + u\"<pre>{error_text}</pre>\"),\n },\n MAIL_TO_EXPIRED_ORDERS: {\n 'recipient': 'User',\n 'subject': u'Tickets for {event_name} are still available ',\n 'message': (\n u\"This is just a gentle reminder that the payment for your order {invoice_id} is still left.\"\n + u\"<br/> The tickets for this event are still available. <a href='{order_url}'>Click here</a> to \"\n u\"purchase your ticket for this event.\"\n u\"<br><br><em>Looking forward to seeing you at the event.</em>\"\n ),\n },\n MONTHLY_PAYMENT_EMAIL: {\n 'recipient': 'Owner, Organizer',\n 'subject': u'{date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\"\n + u\"<br/> That payment for the same has to be made in two weeks. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '1st day of the month',\n },\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL: {\n 'recipient': 'Owner, Organizer',\n 'subject': u'Past Due: {date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\"\n + u\"<br/> That payment for the same is past the due date. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment to prevent loss of functionality.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '15th day of the month',\n },\n EVENT_IMPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been imported',\n 'message': (\n u\"Click on the following link to manage your event\"\n + u\"<br> <a href='{event_url}'>Link</a>\"\n ),\n },\n EVENT_IMPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Import of event failed',\n 'message': (u\"The error was as follows - <br>\" + u\"<pre>{error_text}</pre>\"),\n },\n TEST_MAIL: {\n 'recipient': 'User',\n 'subject': u'Test Mail Subject',\n 'message': (u\"This is a <strong> Test </strong> E-mail.\"),\n },\n}\n", "path": "app/api/helpers/system_mails.py"}]} | 4,026 | 191 |
gh_patches_debug_38939 | rasdani/github-patches | git_diff | AlexsLemonade__refinebio-3363 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clean up AWS Batch job definition list
### Problem or idea
The Batch job definition section contains 100+ pages of jobs (~2500 items). They haven't been cleaned up properly during deploy process for a while.
### Solution or next step
Clean up stale items, make sure job deregistering script takes care of old job definitions in a right way.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `infrastructure/delete_batch_job_queue.py`
Content:
```
1 import os
2 from time import sleep
3
4 import boto3
5
6 AWS_REGION = os.environ["AWS_REGION"]
7 AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
8
9 batch = boto3.client("batch", region_name=AWS_REGION)
10
11 # First disable each job queue.
12 for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
13 try:
14 batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
15 except Exception as e:
16 # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
17 pass
18
19 # Then wait for each one to be disabled so it can be deleted.
20 for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
21 while True:
22 job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])
23 if "jobQueues" in job_queues:
24 job_queue = job_queues["jobQueues"][0]
25 if job_queue["state"] == "DISABLED" and job_queue["status"] != "UPDATING":
26 break
27 else:
28 print(f"Unexpected response while describing job queue {batch_queue_name}.")
29 break
30
31 sleep(3)
32
33 batch.delete_job_queue(jobQueue=batch_queue_name)
34
```
Path: `infrastructure/deregister_batch_job_definitions.py`
Content:
```
1 import os
2
3 import boto3
4
5 AWS_REGION = os.environ["AWS_REGION"]
6
7 batch = boto3.client("batch", region_name=AWS_REGION)
8
9 # TODO: stop repeating this construction everywhere. Just set it once somewhere.
10 JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
11
12 job_definition_files = os.listdir("batch-job-templates")
13
14 job_definition_list = list(
15 {JOB_DEFINITION_PREFIX + job_def.upper().split(".")[0] for job_def in job_definition_files}
16 )
17
18 # Have to go one by one because providing a list of job names doesn't work:
19 # https://github.com/boto/boto3/issues/2908
20 for job_definition in job_definition_list:
21 job_definitions = batch.describe_job_definitions(
22 jobDefinitionName=job_definition, status="ACTIVE"
23 )
24 # There can be multiple revisions per job definition. We want them all gone.
25 for job_definition_revision in job_definitions["jobDefinitions"]:
26 batch.deregister_job_definition(jobDefinition=job_definition_revision["jobDefinitionArn"])
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/infrastructure/delete_batch_job_queue.py b/infrastructure/delete_batch_job_queue.py
--- a/infrastructure/delete_batch_job_queue.py
+++ b/infrastructure/delete_batch_job_queue.py
@@ -2,19 +2,22 @@
from time import sleep
import boto3
+from botocore.exceptions import ClientError
-AWS_REGION = os.environ["AWS_REGION"]
AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
-batch = boto3.client("batch", region_name=AWS_REGION)
+batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# First disable each job queue.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
try:
batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
- except Exception as e:
+ except ClientError as e:
# If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
- pass
+ if str(e).endswith(" does not exist."):
+ pass
+ else:
+ raise e
# Then wait for each one to be disabled so it can be deleted.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
diff --git a/infrastructure/deregister_batch_job_definitions.py b/infrastructure/deregister_batch_job_definitions.py
--- a/infrastructure/deregister_batch_job_definitions.py
+++ b/infrastructure/deregister_batch_job_definitions.py
@@ -2,25 +2,36 @@
import boto3
-AWS_REGION = os.environ["AWS_REGION"]
-
-batch = boto3.client("batch", region_name=AWS_REGION)
+batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# TODO: stop repeating this construction everywhere. Just set it once somewhere.
JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
-job_definition_files = os.listdir("batch-job-templates")
-
-job_definition_list = list(
- {JOB_DEFINITION_PREFIX + job_def.upper().split(".")[0] for job_def in job_definition_files}
+job_names = (
+ JOB_DEFINITION_PREFIX + batch_job_template.upper().split(".")[0]
+ for batch_job_template in os.listdir("batch-job-templates")
)
+nextToken = ""
# Have to go one by one because providing a list of job names doesn't work:
# https://github.com/boto/boto3/issues/2908
-for job_definition in job_definition_list:
- job_definitions = batch.describe_job_definitions(
- jobDefinitionName=job_definition, status="ACTIVE"
- )
- # There can be multiple revisions per job definition. We want them all gone.
- for job_definition_revision in job_definitions["jobDefinitions"]:
- batch.deregister_job_definition(jobDefinition=job_definition_revision["jobDefinitionArn"])
+for job_name in sorted(job_names):
+ while True:
+ data = {
+ "jobDefinitionName": job_name,
+ "maxResults": 100,
+ "status": "ACTIVE",
+ }
+ if nextToken:
+ data["nextToken"] = nextToken
+
+ response = batch.describe_job_definitions(**data)
+ nextToken = response.get("nextToken", "")
+
+ job_definitions = response.get("jobDefinitions")
+ if not job_definitions:
+ break
+
+ # There can be multiple revisions per job definition. We want them all gone.
+ for job_definition in job_definitions:
+ batch.deregister_job_definition(jobDefinition=job_definition["jobDefinitionArn"])
| {"golden_diff": "diff --git a/infrastructure/delete_batch_job_queue.py b/infrastructure/delete_batch_job_queue.py\n--- a/infrastructure/delete_batch_job_queue.py\n+++ b/infrastructure/delete_batch_job_queue.py\n@@ -2,19 +2,22 @@\n from time import sleep\n \n import boto3\n+from botocore.exceptions import ClientError\n \n-AWS_REGION = os.environ[\"AWS_REGION\"]\n AWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n \n-batch = boto3.client(\"batch\", region_name=AWS_REGION)\n+batch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n \n # First disable each job queue.\n for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n- except Exception as e:\n+ except ClientError as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n- pass\n+ if str(e).endswith(\" does not exist.\"):\n+ pass\n+ else:\n+ raise e\n \n # Then wait for each one to be disabled so it can be deleted.\n for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\ndiff --git a/infrastructure/deregister_batch_job_definitions.py b/infrastructure/deregister_batch_job_definitions.py\n--- a/infrastructure/deregister_batch_job_definitions.py\n+++ b/infrastructure/deregister_batch_job_definitions.py\n@@ -2,25 +2,36 @@\n \n import boto3\n \n-AWS_REGION = os.environ[\"AWS_REGION\"]\n-\n-batch = boto3.client(\"batch\", region_name=AWS_REGION)\n+batch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n \n # TODO: stop repeating this construction everywhere. Just set it once somewhere.\n JOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n \n-job_definition_files = os.listdir(\"batch-job-templates\")\n-\n-job_definition_list = list(\n- {JOB_DEFINITION_PREFIX + job_def.upper().split(\".\")[0] for job_def in job_definition_files}\n+job_names = (\n+ JOB_DEFINITION_PREFIX + batch_job_template.upper().split(\".\")[0]\n+ for batch_job_template in os.listdir(\"batch-job-templates\")\n )\n+nextToken = \"\"\n \n # Have to go one by one because providing a list of job names doesn't work:\n # https://github.com/boto/boto3/issues/2908\n-for job_definition in job_definition_list:\n- job_definitions = batch.describe_job_definitions(\n- jobDefinitionName=job_definition, status=\"ACTIVE\"\n- )\n- # There can be multiple revisions per job definition. We want them all gone.\n- for job_definition_revision in job_definitions[\"jobDefinitions\"]:\n- batch.deregister_job_definition(jobDefinition=job_definition_revision[\"jobDefinitionArn\"])\n+for job_name in sorted(job_names):\n+ while True:\n+ data = {\n+ \"jobDefinitionName\": job_name,\n+ \"maxResults\": 100,\n+ \"status\": \"ACTIVE\",\n+ }\n+ if nextToken:\n+ data[\"nextToken\"] = nextToken\n+\n+ response = batch.describe_job_definitions(**data)\n+ nextToken = response.get(\"nextToken\", \"\")\n+\n+ job_definitions = response.get(\"jobDefinitions\")\n+ if not job_definitions:\n+ break\n+\n+ # There can be multiple revisions per job definition. We want them all gone.\n+ for job_definition in job_definitions:\n+ batch.deregister_job_definition(jobDefinition=job_definition[\"jobDefinitionArn\"])\n", "issue": "Clean up AWS Batch job definition list\n### Problem or idea\r\n\r\nThe Batch job definition section contains 100+ pages of jobs (~2500 items). They haven't been cleaned up properly during deploy process for a while.\r\n\r\n\r\n### Solution or next step\r\n\r\nClean up stale items, make sure job deregistering script takes care of old job definitions in a right way.\r\n\n", "before_files": [{"content": "import os\nfrom time import sleep\n\nimport boto3\n\nAWS_REGION = os.environ[\"AWS_REGION\"]\nAWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n\nbatch = boto3.client(\"batch\", region_name=AWS_REGION)\n\n# First disable each job queue.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n except Exception as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n pass\n\n# Then wait for each one to be disabled so it can be deleted.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n while True:\n job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])\n if \"jobQueues\" in job_queues:\n job_queue = job_queues[\"jobQueues\"][0]\n if job_queue[\"state\"] == \"DISABLED\" and job_queue[\"status\"] != \"UPDATING\":\n break\n else:\n print(f\"Unexpected response while describing job queue {batch_queue_name}.\")\n break\n\n sleep(3)\n\n batch.delete_job_queue(jobQueue=batch_queue_name)\n", "path": "infrastructure/delete_batch_job_queue.py"}, {"content": "import os\n\nimport boto3\n\nAWS_REGION = os.environ[\"AWS_REGION\"]\n\nbatch = boto3.client(\"batch\", region_name=AWS_REGION)\n\n# TODO: stop repeating this construction everywhere. Just set it once somewhere.\nJOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n\njob_definition_files = os.listdir(\"batch-job-templates\")\n\njob_definition_list = list(\n {JOB_DEFINITION_PREFIX + job_def.upper().split(\".\")[0] for job_def in job_definition_files}\n)\n\n# Have to go one by one because providing a list of job names doesn't work:\n# https://github.com/boto/boto3/issues/2908\nfor job_definition in job_definition_list:\n job_definitions = batch.describe_job_definitions(\n jobDefinitionName=job_definition, status=\"ACTIVE\"\n )\n # There can be multiple revisions per job definition. We want them all gone.\n for job_definition_revision in job_definitions[\"jobDefinitions\"]:\n batch.deregister_job_definition(jobDefinition=job_definition_revision[\"jobDefinitionArn\"])\n", "path": "infrastructure/deregister_batch_job_definitions.py"}], "after_files": [{"content": "import os\nfrom time import sleep\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nAWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n\nbatch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n\n# First disable each job queue.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n except ClientError as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n if str(e).endswith(\" does not exist.\"):\n pass\n else:\n raise e\n\n# Then wait for each one to be disabled so it can be deleted.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n while True:\n job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])\n if \"jobQueues\" in job_queues:\n job_queue = job_queues[\"jobQueues\"][0]\n if job_queue[\"state\"] == \"DISABLED\" and job_queue[\"status\"] != \"UPDATING\":\n break\n else:\n print(f\"Unexpected response while describing job queue {batch_queue_name}.\")\n break\n\n sleep(3)\n\n batch.delete_job_queue(jobQueue=batch_queue_name)\n", "path": "infrastructure/delete_batch_job_queue.py"}, {"content": "import os\n\nimport boto3\n\nbatch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n\n# TODO: stop repeating this construction everywhere. Just set it once somewhere.\nJOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n\njob_names = (\n JOB_DEFINITION_PREFIX + batch_job_template.upper().split(\".\")[0]\n for batch_job_template in os.listdir(\"batch-job-templates\")\n)\nnextToken = \"\"\n\n# Have to go one by one because providing a list of job names doesn't work:\n# https://github.com/boto/boto3/issues/2908\nfor job_name in sorted(job_names):\n while True:\n data = {\n \"jobDefinitionName\": job_name,\n \"maxResults\": 100,\n \"status\": \"ACTIVE\",\n }\n if nextToken:\n data[\"nextToken\"] = nextToken\n\n response = batch.describe_job_definitions(**data)\n nextToken = response.get(\"nextToken\", \"\")\n\n job_definitions = response.get(\"jobDefinitions\")\n if not job_definitions:\n break\n\n # There can be multiple revisions per job definition. We want them all gone.\n for job_definition in job_definitions:\n batch.deregister_job_definition(jobDefinition=job_definition[\"jobDefinitionArn\"])\n", "path": "infrastructure/deregister_batch_job_definitions.py"}]} | 958 | 786 |
gh_patches_debug_10043 | rasdani/github-patches | git_diff | nautobot__nautobot-877 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migrate user, social auth, and system settings from Django Admin to be first-class citizens in UI/API
### Proposed Functionality
Before the launch of Nautobot, there was significant work to migrate functionality from Django Admin into core and create first-class views and APIs for webhooks, custom links, and export templates. Custom fields are also coming soon in #229. This proposes doing the same for everything in the Users, Python Social Auth, and System sections in Django Admin.
### Use Cases
As Patti the Platform Admin,
I want to have the ability to manage users, groups, and permissions from the Nautobot UI without going into Django Admin,
So that there is a more consistent user experience for my team as well as APIs for anything else that doesn't have an API currently within Django Admin, e.g. Social Auth, retrieving background tasks, etc.
One option is to create an "Admin" dropdown in the navigation bar which contains "Users (no change)," "Social Auth (drop 'Python')," and "System" sections. We may need one additional section called "plugins" for when plugins have created entries in Django Admin.
I will know this is done when it is possible to:
* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin in the main Nautobot UI
* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin through Nautobot REST API calls
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/core/admin.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.admin import site as admin_site
3 from taggit.models import Tag
4
5
6 # Override default AdminSite attributes so we can avoid creating and
7 # registering our own class
8 admin_site.site_header = "Nautobot Administration"
9 admin_site.site_title = "Nautobot"
10 admin_site.index_template = "admin/nautobot_index.html"
11
12 # Unregister the unused stock Tag model provided by django-taggit
13 admin_site.unregister(Tag)
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/core/admin.py b/nautobot/core/admin.py
--- a/nautobot/core/admin.py
+++ b/nautobot/core/admin.py
@@ -1,5 +1,6 @@
from django.conf import settings
from django.contrib.admin import site as admin_site
+from social_django.models import Association, Nonce, UserSocialAuth
from taggit.models import Tag
@@ -11,3 +12,8 @@
# Unregister the unused stock Tag model provided by django-taggit
admin_site.unregister(Tag)
+
+# Unregister SocialAuth from Django admin menu
+admin_site.unregister(Association)
+admin_site.unregister(Nonce)
+admin_site.unregister(UserSocialAuth)
| {"golden_diff": "diff --git a/nautobot/core/admin.py b/nautobot/core/admin.py\n--- a/nautobot/core/admin.py\n+++ b/nautobot/core/admin.py\n@@ -1,5 +1,6 @@\n from django.conf import settings\n from django.contrib.admin import site as admin_site\n+from social_django.models import Association, Nonce, UserSocialAuth\n from taggit.models import Tag\n \n \n@@ -11,3 +12,8 @@\n \n # Unregister the unused stock Tag model provided by django-taggit\n admin_site.unregister(Tag)\n+\n+# Unregister SocialAuth from Django admin menu\n+admin_site.unregister(Association)\n+admin_site.unregister(Nonce)\n+admin_site.unregister(UserSocialAuth)\n", "issue": "Migrate user, social auth, and system settings from Django Admin to be first-class citizens in UI/API\n### Proposed Functionality \r\n\r\nBefore the launch of Nautobot, there was significant work to migrate functionality from Django Admin into core and create first-class views and APIs for webhooks, custom links, and export templates. Custom fields are also coming soon in #229. This proposes doing the same for everything in the Users, Python Social Auth, and System sections in Django Admin.\r\n\r\n### Use Cases\r\n\r\nAs Patti the Platform Admin,\r\nI want to have the ability to manage users, groups, and permissions from the Nautobot UI without going into Django Admin,\r\nSo that there is a more consistent user experience for my team as well as APIs for anything else that doesn't have an API currently within Django Admin, e.g. Social Auth, retrieving background tasks, etc. \r\n\r\nOne option is to create an \"Admin\" dropdown in the navigation bar which contains \"Users (no change),\" \"Social Auth (drop 'Python'),\" and \"System\" sections. We may need one additional section called \"plugins\" for when plugins have created entries in Django Admin.\r\n\r\nI will know this is done when it is possible to:\r\n* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin in the main Nautobot UI\r\n* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin through Nautobot REST API calls\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.admin import site as admin_site\nfrom taggit.models import Tag\n\n\n# Override default AdminSite attributes so we can avoid creating and\n# registering our own class\nadmin_site.site_header = \"Nautobot Administration\"\nadmin_site.site_title = \"Nautobot\"\nadmin_site.index_template = \"admin/nautobot_index.html\"\n\n# Unregister the unused stock Tag model provided by django-taggit\nadmin_site.unregister(Tag)\n", "path": "nautobot/core/admin.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.admin import site as admin_site\nfrom social_django.models import Association, Nonce, UserSocialAuth\nfrom taggit.models import Tag\n\n\n# Override default AdminSite attributes so we can avoid creating and\n# registering our own class\nadmin_site.site_header = \"Nautobot Administration\"\nadmin_site.site_title = \"Nautobot\"\nadmin_site.index_template = \"admin/nautobot_index.html\"\n\n# Unregister the unused stock Tag model provided by django-taggit\nadmin_site.unregister(Tag)\n\n# Unregister SocialAuth from Django admin menu\nadmin_site.unregister(Association)\nadmin_site.unregister(Nonce)\nadmin_site.unregister(UserSocialAuth)\n", "path": "nautobot/core/admin.py"}]} | 680 | 153 |
gh_patches_debug_6459 | rasdani/github-patches | git_diff | holoviz__panel-5490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--autoreload does not work in Panel 1.2.2
Run this code with `panel serve --autoreload tmp.py` and change the end to 2 the
``` python
# tmp.py
import panel as pn
pn.widgets.IntSlider(end=1).servable()
```
A git bisect indicates https://github.com/holoviz/panel/pull/5344 is the culprit.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/io/callbacks.py`
Content:
```
1 """
2 Defines callbacks to be executed on a thread or by scheduling it
3 on a running bokeh server.
4 """
5 import asyncio
6 import inspect
7 import logging
8 import time
9
10 from functools import partial
11
12 import param
13
14 from ..util import edit_readonly, function_name
15 from .logging import LOG_PERIODIC_END, LOG_PERIODIC_START
16 from .state import curdoc_locked, state
17
18 log = logging.getLogger('panel.callbacks')
19 _periodic_logger = logging.getLogger(f'{__name__}.PeriodicCallback')
20
21 class PeriodicCallback(param.Parameterized):
22 """
23 Periodic encapsulates a periodic callback which will run both
24 in tornado based notebook environments and on bokeh server. By
25 default the callback will run until the stop method is called,
26 but count and timeout values can be set to limit the number of
27 executions or the maximum length of time for which the callback
28 will run. The callback may also be started and stopped by setting
29 the running parameter to True or False respectively.
30 """
31
32 callback = param.Callable(doc="""
33 The callback to execute periodically.""")
34
35 counter = param.Integer(default=0, doc="""
36 Counts the number of executions.""")
37
38 count = param.Integer(default=None, doc="""
39 Number of times the callback will be executed, by default
40 this is unlimited.""")
41
42 log = param.Boolean(default=True, doc="""
43 Whether the periodic callback should log its actions.""")
44
45 period = param.Integer(default=500, doc="""
46 Period in milliseconds at which the callback is executed.""")
47
48 timeout = param.Integer(default=None, doc="""
49 Timeout in milliseconds from the start time at which the callback
50 expires.""")
51
52 running = param.Boolean(default=False, doc="""
53 Toggles whether the periodic callback is currently running.""")
54
55 def __init__(self, **params):
56 self._background = params.pop('background', False)
57 super().__init__(**params)
58 self._start_time = None
59 self._cb = None
60 self._updating = False
61 self._doc = None
62
63 @param.depends('running', watch=True)
64 def _start(self):
65 if not self.running or self._updating:
66 return
67 self.start()
68
69 @param.depends('running', watch=True)
70 def _stop(self):
71 if self.running or self._updating:
72 return
73 self.stop()
74
75 @param.depends('period', watch=True)
76 def _update_period(self):
77 if self._cb:
78 self.stop()
79 self.start()
80
81 def _exec_callback(self, post=False):
82 from .state import set_curdoc
83 try:
84 with set_curdoc(self._doc):
85 if self.running:
86 self.counter += 1
87 if self.counter > self.count:
88 self.stop()
89 cb = self.callback() if self.running else None
90 except Exception:
91 cb = None
92 if post:
93 self._post_callback()
94 return cb
95
96 def _post_callback(self):
97 cbname = function_name(self.callback)
98 if self._doc and self.log:
99 _periodic_logger.info(
100 LOG_PERIODIC_END, id(self._doc), cbname, self.counter
101 )
102 if not self._background:
103 with edit_readonly(state):
104 state._busy_counter -= 1
105 if self.timeout is not None:
106 dt = (time.time() - self._start_time) * 1000
107 if dt > self.timeout:
108 self.stop()
109 if self.counter == self.count:
110 self.stop()
111
112 async def _periodic_callback(self):
113 if not self._background:
114 with edit_readonly(state):
115 state._busy_counter += 1
116 cbname = function_name(self.callback)
117 if self._doc and self.log:
118 _periodic_logger.info(
119 LOG_PERIODIC_START, id(self._doc), cbname, self.counter
120 )
121 is_async = (
122 inspect.isasyncgenfunction(self.callback) or
123 inspect.iscoroutinefunction(self.callback)
124 )
125 if state._thread_pool and not is_async:
126 future = state._thread_pool.submit(self._exec_callback, True)
127 future.add_done_callback(partial(state._handle_future_exception, doc=self._doc))
128 return
129 try:
130 cb = self._exec_callback()
131 if inspect.isawaitable(cb):
132 await cb
133 except Exception:
134 log.exception('Periodic callback failed.')
135 raise
136 finally:
137 self._post_callback()
138
139 async def _async_repeat(self, func):
140 """
141 Run func every interval seconds.
142
143 If func has not finished before *interval*, will run again
144 immediately when the previous iteration finished.
145 """
146 while True:
147 start = time.monotonic()
148 await func()
149 timeout = (self.period/1000.) - (time.monotonic()-start)
150 if timeout > 0:
151 await asyncio.sleep(timeout)
152
153 def _cleanup(self, session_context):
154 self.stop()
155
156 def start(self):
157 """
158 Starts running the periodic callback.
159 """
160 if self._cb is not None:
161 raise RuntimeError('Periodic callback has already started.')
162 if not self.running:
163 try:
164 self._updating = True
165 self.running = True
166 finally:
167 self._updating = False
168 self._start_time = time.time()
169 if state._is_pyodide:
170 self._cb = asyncio.create_task(
171 self._async_repeat(self._periodic_callback)
172 )
173 elif state.curdoc and state.curdoc.session_context:
174 self._doc = state.curdoc
175 if state._unblocked(state.curdoc):
176 self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)
177 else:
178 self._doc.add_next_tick_callback(self.start)
179 else:
180 from tornado.ioloop import PeriodicCallback
181 self._cb = PeriodicCallback(lambda: asyncio.create_task(self._periodic_callback()), self.period)
182 self._cb.start()
183
184 def stop(self):
185 """
186 Stops running the periodic callback.
187 """
188 if self.running:
189 try:
190 self._updating = True
191 self.running = False
192 finally:
193 self._updating = False
194 with param.discard_events(self):
195 self.counter = 0
196 self._timeout = None
197 if state._is_pyodide:
198 self._cb.cancel()
199 elif self._doc:
200 if self._doc._session_context:
201 self._doc.callbacks.remove_session_callback(self._cb)
202 else:
203 self._doc.callbacks._session_callbacks.remove(self._cb)
204 elif self._cb:
205 self._cb.stop()
206 self._cb = None
207 doc = self._doc or curdoc_locked()
208 if doc:
209 doc.callbacks.session_destroyed_callbacks = {
210 cb for cb in doc.callbacks.session_destroyed_callbacks
211 if cb is not self._cleanup
212 }
213 self._doc = None
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/io/callbacks.py b/panel/io/callbacks.py
--- a/panel/io/callbacks.py
+++ b/panel/io/callbacks.py
@@ -84,7 +84,7 @@
with set_curdoc(self._doc):
if self.running:
self.counter += 1
- if self.counter > self.count:
+ if self.count is not None and self.counter > self.count:
self.stop()
cb = self.callback() if self.running else None
except Exception:
| {"golden_diff": "diff --git a/panel/io/callbacks.py b/panel/io/callbacks.py\n--- a/panel/io/callbacks.py\n+++ b/panel/io/callbacks.py\n@@ -84,7 +84,7 @@\n with set_curdoc(self._doc):\n if self.running:\n self.counter += 1\n- if self.counter > self.count:\n+ if self.count is not None and self.counter > self.count:\n self.stop()\n cb = self.callback() if self.running else None\n except Exception:\n", "issue": "--autoreload does not work in Panel 1.2.2\nRun this code with `panel serve --autoreload tmp.py` and change the end to 2 the \r\n\r\n``` python\r\n# tmp.py\r\nimport panel as pn\r\n\r\npn.widgets.IntSlider(end=1).servable()\r\n```\r\n\r\nA git bisect indicates https://github.com/holoviz/panel/pull/5344 is the culprit. \n", "before_files": [{"content": "\"\"\"\nDefines callbacks to be executed on a thread or by scheduling it\non a running bokeh server.\n\"\"\"\nimport asyncio\nimport inspect\nimport logging\nimport time\n\nfrom functools import partial\n\nimport param\n\nfrom ..util import edit_readonly, function_name\nfrom .logging import LOG_PERIODIC_END, LOG_PERIODIC_START\nfrom .state import curdoc_locked, state\n\nlog = logging.getLogger('panel.callbacks')\n_periodic_logger = logging.getLogger(f'{__name__}.PeriodicCallback')\n\nclass PeriodicCallback(param.Parameterized):\n \"\"\"\n Periodic encapsulates a periodic callback which will run both\n in tornado based notebook environments and on bokeh server. By\n default the callback will run until the stop method is called,\n but count and timeout values can be set to limit the number of\n executions or the maximum length of time for which the callback\n will run. The callback may also be started and stopped by setting\n the running parameter to True or False respectively.\n \"\"\"\n\n callback = param.Callable(doc=\"\"\"\n The callback to execute periodically.\"\"\")\n\n counter = param.Integer(default=0, doc=\"\"\"\n Counts the number of executions.\"\"\")\n\n count = param.Integer(default=None, doc=\"\"\"\n Number of times the callback will be executed, by default\n this is unlimited.\"\"\")\n\n log = param.Boolean(default=True, doc=\"\"\"\n Whether the periodic callback should log its actions.\"\"\")\n\n period = param.Integer(default=500, doc=\"\"\"\n Period in milliseconds at which the callback is executed.\"\"\")\n\n timeout = param.Integer(default=None, doc=\"\"\"\n Timeout in milliseconds from the start time at which the callback\n expires.\"\"\")\n\n running = param.Boolean(default=False, doc=\"\"\"\n Toggles whether the periodic callback is currently running.\"\"\")\n\n def __init__(self, **params):\n self._background = params.pop('background', False)\n super().__init__(**params)\n self._start_time = None\n self._cb = None\n self._updating = False\n self._doc = None\n\n @param.depends('running', watch=True)\n def _start(self):\n if not self.running or self._updating:\n return\n self.start()\n\n @param.depends('running', watch=True)\n def _stop(self):\n if self.running or self._updating:\n return\n self.stop()\n\n @param.depends('period', watch=True)\n def _update_period(self):\n if self._cb:\n self.stop()\n self.start()\n\n def _exec_callback(self, post=False):\n from .state import set_curdoc\n try:\n with set_curdoc(self._doc):\n if self.running:\n self.counter += 1\n if self.counter > self.count:\n self.stop()\n cb = self.callback() if self.running else None\n except Exception:\n cb = None\n if post:\n self._post_callback()\n return cb\n\n def _post_callback(self):\n cbname = function_name(self.callback)\n if self._doc and self.log:\n _periodic_logger.info(\n LOG_PERIODIC_END, id(self._doc), cbname, self.counter\n )\n if not self._background:\n with edit_readonly(state):\n state._busy_counter -= 1\n if self.timeout is not None:\n dt = (time.time() - self._start_time) * 1000\n if dt > self.timeout:\n self.stop()\n if self.counter == self.count:\n self.stop()\n\n async def _periodic_callback(self):\n if not self._background:\n with edit_readonly(state):\n state._busy_counter += 1\n cbname = function_name(self.callback)\n if self._doc and self.log:\n _periodic_logger.info(\n LOG_PERIODIC_START, id(self._doc), cbname, self.counter\n )\n is_async = (\n inspect.isasyncgenfunction(self.callback) or\n inspect.iscoroutinefunction(self.callback)\n )\n if state._thread_pool and not is_async:\n future = state._thread_pool.submit(self._exec_callback, True)\n future.add_done_callback(partial(state._handle_future_exception, doc=self._doc))\n return\n try:\n cb = self._exec_callback()\n if inspect.isawaitable(cb):\n await cb\n except Exception:\n log.exception('Periodic callback failed.')\n raise\n finally:\n self._post_callback()\n\n async def _async_repeat(self, func):\n \"\"\"\n Run func every interval seconds.\n\n If func has not finished before *interval*, will run again\n immediately when the previous iteration finished.\n \"\"\"\n while True:\n start = time.monotonic()\n await func()\n timeout = (self.period/1000.) - (time.monotonic()-start)\n if timeout > 0:\n await asyncio.sleep(timeout)\n\n def _cleanup(self, session_context):\n self.stop()\n\n def start(self):\n \"\"\"\n Starts running the periodic callback.\n \"\"\"\n if self._cb is not None:\n raise RuntimeError('Periodic callback has already started.')\n if not self.running:\n try:\n self._updating = True\n self.running = True\n finally:\n self._updating = False\n self._start_time = time.time()\n if state._is_pyodide:\n self._cb = asyncio.create_task(\n self._async_repeat(self._periodic_callback)\n )\n elif state.curdoc and state.curdoc.session_context:\n self._doc = state.curdoc\n if state._unblocked(state.curdoc):\n self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)\n else:\n self._doc.add_next_tick_callback(self.start)\n else:\n from tornado.ioloop import PeriodicCallback\n self._cb = PeriodicCallback(lambda: asyncio.create_task(self._periodic_callback()), self.period)\n self._cb.start()\n\n def stop(self):\n \"\"\"\n Stops running the periodic callback.\n \"\"\"\n if self.running:\n try:\n self._updating = True\n self.running = False\n finally:\n self._updating = False\n with param.discard_events(self):\n self.counter = 0\n self._timeout = None\n if state._is_pyodide:\n self._cb.cancel()\n elif self._doc:\n if self._doc._session_context:\n self._doc.callbacks.remove_session_callback(self._cb)\n else:\n self._doc.callbacks._session_callbacks.remove(self._cb)\n elif self._cb:\n self._cb.stop()\n self._cb = None\n doc = self._doc or curdoc_locked()\n if doc:\n doc.callbacks.session_destroyed_callbacks = {\n cb for cb in doc.callbacks.session_destroyed_callbacks\n if cb is not self._cleanup\n }\n self._doc = None\n", "path": "panel/io/callbacks.py"}], "after_files": [{"content": "\"\"\"\nDefines callbacks to be executed on a thread or by scheduling it\non a running bokeh server.\n\"\"\"\nimport asyncio\nimport inspect\nimport logging\nimport time\n\nfrom functools import partial\n\nimport param\n\nfrom ..util import edit_readonly, function_name\nfrom .logging import LOG_PERIODIC_END, LOG_PERIODIC_START\nfrom .state import curdoc_locked, state\n\nlog = logging.getLogger('panel.callbacks')\n_periodic_logger = logging.getLogger(f'{__name__}.PeriodicCallback')\n\nclass PeriodicCallback(param.Parameterized):\n \"\"\"\n Periodic encapsulates a periodic callback which will run both\n in tornado based notebook environments and on bokeh server. By\n default the callback will run until the stop method is called,\n but count and timeout values can be set to limit the number of\n executions or the maximum length of time for which the callback\n will run. The callback may also be started and stopped by setting\n the running parameter to True or False respectively.\n \"\"\"\n\n callback = param.Callable(doc=\"\"\"\n The callback to execute periodically.\"\"\")\n\n counter = param.Integer(default=0, doc=\"\"\"\n Counts the number of executions.\"\"\")\n\n count = param.Integer(default=None, doc=\"\"\"\n Number of times the callback will be executed, by default\n this is unlimited.\"\"\")\n\n log = param.Boolean(default=True, doc=\"\"\"\n Whether the periodic callback should log its actions.\"\"\")\n\n period = param.Integer(default=500, doc=\"\"\"\n Period in milliseconds at which the callback is executed.\"\"\")\n\n timeout = param.Integer(default=None, doc=\"\"\"\n Timeout in milliseconds from the start time at which the callback\n expires.\"\"\")\n\n running = param.Boolean(default=False, doc=\"\"\"\n Toggles whether the periodic callback is currently running.\"\"\")\n\n def __init__(self, **params):\n self._background = params.pop('background', False)\n super().__init__(**params)\n self._start_time = None\n self._cb = None\n self._updating = False\n self._doc = None\n\n @param.depends('running', watch=True)\n def _start(self):\n if not self.running or self._updating:\n return\n self.start()\n\n @param.depends('running', watch=True)\n def _stop(self):\n if self.running or self._updating:\n return\n self.stop()\n\n @param.depends('period', watch=True)\n def _update_period(self):\n if self._cb:\n self.stop()\n self.start()\n\n def _exec_callback(self, post=False):\n from .state import set_curdoc\n try:\n with set_curdoc(self._doc):\n if self.running:\n self.counter += 1\n if self.count is not None and self.counter > self.count:\n self.stop()\n cb = self.callback() if self.running else None\n except Exception:\n cb = None\n if post:\n self._post_callback()\n return cb\n\n def _post_callback(self):\n cbname = function_name(self.callback)\n if self._doc and self.log:\n _periodic_logger.info(\n LOG_PERIODIC_END, id(self._doc), cbname, self.counter\n )\n if not self._background:\n with edit_readonly(state):\n state._busy_counter -= 1\n if self.timeout is not None:\n dt = (time.time() - self._start_time) * 1000\n if dt > self.timeout:\n self.stop()\n if self.counter == self.count:\n self.stop()\n\n async def _periodic_callback(self):\n if not self._background:\n with edit_readonly(state):\n state._busy_counter += 1\n cbname = function_name(self.callback)\n if self._doc and self.log:\n _periodic_logger.info(\n LOG_PERIODIC_START, id(self._doc), cbname, self.counter\n )\n is_async = (\n inspect.isasyncgenfunction(self.callback) or\n inspect.iscoroutinefunction(self.callback)\n )\n if state._thread_pool and not is_async:\n future = state._thread_pool.submit(self._exec_callback, True)\n future.add_done_callback(partial(state._handle_future_exception, doc=self._doc))\n return\n try:\n cb = self._exec_callback()\n if inspect.isawaitable(cb):\n await cb\n except Exception:\n log.exception('Periodic callback failed.')\n raise\n finally:\n self._post_callback()\n\n async def _async_repeat(self, func):\n \"\"\"\n Run func every interval seconds.\n\n If func has not finished before *interval*, will run again\n immediately when the previous iteration finished.\n \"\"\"\n while True:\n start = time.monotonic()\n await func()\n timeout = (self.period/1000.) - (time.monotonic()-start)\n if timeout > 0:\n await asyncio.sleep(timeout)\n\n def _cleanup(self, session_context):\n self.stop()\n\n def start(self):\n \"\"\"\n Starts running the periodic callback.\n \"\"\"\n if self._cb is not None:\n raise RuntimeError('Periodic callback has already started.')\n if not self.running:\n try:\n self._updating = True\n self.running = True\n finally:\n self._updating = False\n self._start_time = time.time()\n if state._is_pyodide:\n self._cb = asyncio.create_task(\n self._async_repeat(self._periodic_callback)\n )\n elif state.curdoc and state.curdoc.session_context:\n self._doc = state.curdoc\n if state._unblocked(state.curdoc):\n self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period)\n else:\n self._doc.add_next_tick_callback(self.start)\n else:\n from tornado.ioloop import PeriodicCallback\n self._cb = PeriodicCallback(lambda: asyncio.create_task(self._periodic_callback()), self.period)\n self._cb.start()\n\n def stop(self):\n \"\"\"\n Stops running the periodic callback.\n \"\"\"\n if self.running:\n try:\n self._updating = True\n self.running = False\n finally:\n self._updating = False\n with param.discard_events(self):\n self.counter = 0\n self._timeout = None\n if state._is_pyodide:\n self._cb.cancel()\n elif self._doc:\n if self._doc._session_context:\n self._doc.callbacks.remove_session_callback(self._cb)\n else:\n self._doc.callbacks._session_callbacks.remove(self._cb)\n elif self._cb:\n self._cb.stop()\n self._cb = None\n doc = self._doc or curdoc_locked()\n if doc:\n doc.callbacks.session_destroyed_callbacks = {\n cb for cb in doc.callbacks.session_destroyed_callbacks\n if cb is not self._cleanup\n }\n self._doc = None\n", "path": "panel/io/callbacks.py"}]} | 2,374 | 113 |
gh_patches_debug_4919 | rasdani/github-patches | git_diff | bokeh__bokeh-1361 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot style minor ticks
Axis objects do not have minor tick properties.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/models/axes.py`
Content:
```
1 from __future__ import absolute_import
2
3 from ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include
4 from ..mixins import LineProps, TextProps
5 from ..enums import Location
6
7 from .renderers import GuideRenderer
8 from .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker
9 from .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter
10
11 class Axis(GuideRenderer):
12 location = Either(Enum('auto'), Enum(Location))
13 bounds = Either(Enum('auto'), Tuple(Float, Float))
14
15 x_range_name = String('default')
16 y_range_name = String('default')
17
18 ticker = Instance(Ticker)
19 formatter = Instance(TickFormatter)
20
21 axis_label = String
22 axis_label_standoff = Int
23 axis_label_props = Include(TextProps)
24
25 major_label_standoff = Int
26 major_label_orientation = Either(Enum("horizontal", "vertical"), Float)
27 major_label_props = Include(TextProps)
28
29 axis_props = Include(LineProps)
30 major_tick_props = Include(LineProps)
31
32 major_tick_in = Int
33 major_tick_out = Int
34
35 class ContinuousAxis(Axis):
36 pass
37
38 class LinearAxis(ContinuousAxis):
39 def __init__(self, ticker=None, formatter=None, **kwargs):
40 if ticker is None:
41 ticker = BasicTicker()
42 if formatter is None:
43 formatter = BasicTickFormatter()
44 super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
45
46 class LogAxis(ContinuousAxis):
47 def __init__(self, ticker=None, formatter=None, **kwargs):
48 if ticker is None:
49 ticker = LogTicker(num_minor_ticks=10)
50 if formatter is None:
51 formatter = LogTickFormatter()
52 super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
53
54 class CategoricalAxis(Axis):
55 def __init__(self, ticker=None, formatter=None, **kwargs):
56 if ticker is None:
57 ticker = CategoricalTicker()
58 if formatter is None:
59 formatter = CategoricalTickFormatter()
60 super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
61
62 class DatetimeAxis(LinearAxis):
63 axis_label = String("date")
64 scale = String("time")
65 num_labels = Int(8)
66 char_width = Int(10)
67 fill_ratio = Float(0.3)
68
69 def __init__(self, ticker=None, formatter=None, **kwargs):
70 if ticker is None:
71 ticker = DatetimeTicker()
72 if formatter is None:
73 formatter = DatetimeTickFormatter()
74 super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bokeh/models/axes.py b/bokeh/models/axes.py
--- a/bokeh/models/axes.py
+++ b/bokeh/models/axes.py
@@ -27,11 +27,15 @@
major_label_props = Include(TextProps)
axis_props = Include(LineProps)
- major_tick_props = Include(LineProps)
+ major_tick_props = Include(LineProps)
major_tick_in = Int
major_tick_out = Int
+ minor_tick_props = Include(LineProps)
+ minor_tick_in = Int
+ minor_tick_out = Int
+
class ContinuousAxis(Axis):
pass
| {"golden_diff": "diff --git a/bokeh/models/axes.py b/bokeh/models/axes.py\n--- a/bokeh/models/axes.py\n+++ b/bokeh/models/axes.py\n@@ -27,11 +27,15 @@\n major_label_props = Include(TextProps)\n \n axis_props = Include(LineProps)\n- major_tick_props = Include(LineProps)\n \n+ major_tick_props = Include(LineProps)\n major_tick_in = Int\n major_tick_out = Int\n \n+ minor_tick_props = Include(LineProps)\n+ minor_tick_in = Int\n+ minor_tick_out = Int\n+\n class ContinuousAxis(Axis):\n pass\n", "issue": "Cannot style minor ticks\nAxis objects do not have minor tick properties.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include\nfrom ..mixins import LineProps, TextProps\nfrom ..enums import Location\n\nfrom .renderers import GuideRenderer\nfrom .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker\nfrom .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter\n\nclass Axis(GuideRenderer):\n location = Either(Enum('auto'), Enum(Location))\n bounds = Either(Enum('auto'), Tuple(Float, Float))\n\n x_range_name = String('default')\n y_range_name = String('default')\n\n ticker = Instance(Ticker)\n formatter = Instance(TickFormatter)\n\n axis_label = String\n axis_label_standoff = Int\n axis_label_props = Include(TextProps)\n\n major_label_standoff = Int\n major_label_orientation = Either(Enum(\"horizontal\", \"vertical\"), Float)\n major_label_props = Include(TextProps)\n\n axis_props = Include(LineProps)\n major_tick_props = Include(LineProps)\n\n major_tick_in = Int\n major_tick_out = Int\n\nclass ContinuousAxis(Axis):\n pass\n\nclass LinearAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = BasicTicker()\n if formatter is None:\n formatter = BasicTickFormatter()\n super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass LogAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = LogTicker(num_minor_ticks=10)\n if formatter is None:\n formatter = LogTickFormatter()\n super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass CategoricalAxis(Axis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = CategoricalTicker()\n if formatter is None:\n formatter = CategoricalTickFormatter()\n super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass DatetimeAxis(LinearAxis):\n axis_label = String(\"date\")\n scale = String(\"time\")\n num_labels = Int(8)\n char_width = Int(10)\n fill_ratio = Float(0.3)\n\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = DatetimeTicker()\n if formatter is None:\n formatter = DatetimeTickFormatter()\n super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n", "path": "bokeh/models/axes.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include\nfrom ..mixins import LineProps, TextProps\nfrom ..enums import Location\n\nfrom .renderers import GuideRenderer\nfrom .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker\nfrom .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter\n\nclass Axis(GuideRenderer):\n location = Either(Enum('auto'), Enum(Location))\n bounds = Either(Enum('auto'), Tuple(Float, Float))\n\n x_range_name = String('default')\n y_range_name = String('default')\n\n ticker = Instance(Ticker)\n formatter = Instance(TickFormatter)\n\n axis_label = String\n axis_label_standoff = Int\n axis_label_props = Include(TextProps)\n\n major_label_standoff = Int\n major_label_orientation = Either(Enum(\"horizontal\", \"vertical\"), Float)\n major_label_props = Include(TextProps)\n\n axis_props = Include(LineProps)\n\n major_tick_props = Include(LineProps)\n major_tick_in = Int\n major_tick_out = Int\n\n minor_tick_props = Include(LineProps)\n minor_tick_in = Int\n minor_tick_out = Int\n\nclass ContinuousAxis(Axis):\n pass\n\nclass LinearAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = BasicTicker()\n if formatter is None:\n formatter = BasicTickFormatter()\n super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass LogAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = LogTicker(num_minor_ticks=10)\n if formatter is None:\n formatter = LogTickFormatter()\n super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass CategoricalAxis(Axis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = CategoricalTicker()\n if formatter is None:\n formatter = CategoricalTickFormatter()\n super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass DatetimeAxis(LinearAxis):\n axis_label = String(\"date\")\n scale = String(\"time\")\n num_labels = Int(8)\n char_width = Int(10)\n fill_ratio = Float(0.3)\n\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = DatetimeTicker()\n if formatter is None:\n formatter = DatetimeTickFormatter()\n super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n", "path": "bokeh/models/axes.py"}]} | 1,017 | 143 |
gh_patches_debug_14499 | rasdani/github-patches | git_diff | optuna__optuna-3545 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show warning message by `GridSearchSampler` whose `CategoricalDistribution` contains unsupported typed values, not raising `ValueError`
### Motivation
The grid search sampler has a strict type rule for its search space for `CategoricalDistribution`, which is not consistent with the other samplers as reported in #3534.
### Description
`CategoriaclDistribution` shows a warning message when the `choices` argument contains an unsupported type.
When we run the following code
```python
import optuna
optuna.distributions.CategoricalDistribution([[1], [2]])
```
then we see the following warning message:
```bash
/Users/nzw/Documents/optuna/optuna/distributions.py:501: UserWarning: Choices for a categorical distribution should be a tuple of None, bool, int, float and str for persistent storage but contains [1] which is of type list.
warnings.warn(message)
/Users/nzw/Documents/optuna/optuna/distributions.py:501: UserWarning: Choices for a categorical distribution should be a tuple of None, bool, int, float and str for persistent storage but contains [2] which is of type list.
warnings.warn(message)
```
On the other hand, the grid search sampler raises `ValueError` when we use such unsupported typed value as an element of `choices` of the distribution.
### Alternatives (optional)
_No response_
### Additional context (optional)
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/samplers/_grid.py`
Content:
```
1 import collections
2 import itertools
3 import random
4 from typing import Any
5 from typing import cast
6 from typing import Dict
7 from typing import List
8 from typing import Mapping
9 from typing import Optional
10 from typing import Sequence
11 from typing import Union
12 import warnings
13
14 from optuna.distributions import BaseDistribution
15 from optuna.logging import get_logger
16 from optuna.samplers import BaseSampler
17 from optuna.study import Study
18 from optuna.trial import FrozenTrial
19 from optuna.trial import TrialState
20
21
22 GridValueType = Union[str, float, int, bool, None]
23 SortableParamValueSequenceType = Union[List[str], List[float], List[int], List[bool]]
24
25
26 _logger = get_logger(__name__)
27
28
29 class GridSampler(BaseSampler):
30 """Sampler using grid search.
31
32 With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters
33 in the given search space during the study.
34
35 Example:
36
37 .. testcode::
38
39 import optuna
40
41
42 def objective(trial):
43 x = trial.suggest_float("x", -100, 100)
44 y = trial.suggest_int("y", -100, 100)
45 return x**2 + y**2
46
47
48 search_space = {"x": [-50, 0, 50], "y": [-99, 0, 99]}
49 study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
50 study.optimize(objective)
51
52 Note:
53
54 :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all
55 combinations in the passed ``search_space`` have already been evaluated, internally
56 invoking the :func:`~optuna.study.Study.stop` method.
57
58 Note:
59
60 :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization
61 specified by discrete suggest methods but just samples one of values specified in the
62 search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is
63 sampled as ``x`` instead of an integer point.
64
65 .. testcode::
66
67 import optuna
68
69
70 def objective(trial):
71 # The following suggest method specifies integer points between -5 and 5.
72 x = trial.suggest_float("x", -5, 5, step=1)
73 return x**2
74
75
76 # Non-int points are specified in the grid.
77 search_space = {"x": [-0.5, 0.5]}
78 study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
79 study.optimize(objective, n_trials=2)
80
81 Note:
82 A parameter configuration in the grid is not considered finished until its trial is
83 finished. Therefore, during distributed optimization where trials run concurrently,
84 different workers will occasionally suggest the same parameter configuration.
85 The total number of actual trials may therefore exceed the size of the grid.
86
87 Note:
88 The grid is randomly shuffled and the order in which parameter configurations are
89 suggested may vary. This is to reduce duplicate suggestions during distributed
90 optimization.
91
92 Note:
93 All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with
94 :meth:`~optuna.study.Study.enqueue_trial`.
95
96 Args:
97 search_space:
98 A dictionary whose key and value are a parameter name and the corresponding candidates
99 of values, respectively.
100 """
101
102 def __init__(self, search_space: Mapping[str, Sequence[GridValueType]]) -> None:
103
104 for param_name, param_values in search_space.items():
105 for value in param_values:
106 self._check_value(param_name, value)
107
108 self._search_space = collections.OrderedDict()
109 for param_name, param_values in sorted(search_space.items(), key=lambda x: x[0]):
110 param_values = cast(SortableParamValueSequenceType, param_values)
111
112 self._search_space[param_name] = sorted(param_values)
113
114 self._all_grids = list(itertools.product(*self._search_space.values()))
115 self._param_names = sorted(search_space.keys())
116 self._n_min_trials = len(self._all_grids)
117
118 def infer_relative_search_space(
119 self, study: Study, trial: FrozenTrial
120 ) -> Dict[str, BaseDistribution]:
121
122 return {}
123
124 def sample_relative(
125 self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]
126 ) -> Dict[str, Any]:
127 # Instead of returning param values, GridSampler puts the target grid id as a system attr,
128 # and the values are returned from `sample_independent`. This is because the distribution
129 # object is hard to get at the beginning of trial, while we need the access to the object
130 # to validate the sampled value.
131
132 # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not
133 # assign a new grid_id.
134 if "grid_id" in trial.system_attrs or "fixed_params" in trial.system_attrs:
135 return {}
136
137 target_grids = self._get_unvisited_grid_ids(study)
138
139 if len(target_grids) == 0:
140 # This case may occur with distributed optimization or trial queue. If there is no
141 # target grid, `GridSampler` evaluates a visited, duplicated point with the current
142 # trial. After that, the optimization stops.
143
144 _logger.warning(
145 "`GridSampler` is re-evaluating a configuration because the grid has been "
146 "exhausted. This may happen due to a timing issue during distributed optimization "
147 "or when re-running optimizations on already finished studies."
148 )
149
150 # One of all grids is randomly picked up in this case.
151 target_grids = list(range(len(self._all_grids)))
152
153 # In distributed optimization, multiple workers may simultaneously pick up the same grid.
154 # To make the conflict less frequent, the grid is chosen randomly.
155 grid_id = random.choice(target_grids)
156
157 study._storage.set_trial_system_attr(trial._trial_id, "search_space", self._search_space)
158 study._storage.set_trial_system_attr(trial._trial_id, "grid_id", grid_id)
159
160 return {}
161
162 def sample_independent(
163 self,
164 study: Study,
165 trial: FrozenTrial,
166 param_name: str,
167 param_distribution: BaseDistribution,
168 ) -> Any:
169
170 if "grid_id" not in trial.system_attrs:
171 message = "All parameters must be specified when using GridSampler with enqueue_trial."
172 raise ValueError(message)
173
174 if param_name not in self._search_space:
175 message = "The parameter name, {}, is not found in the given grid.".format(param_name)
176 raise ValueError(message)
177
178 # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.
179 # Current selection logic may evaluate the same parameters multiple times.
180 # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.
181 grid_id = trial.system_attrs["grid_id"]
182 param_value = self._all_grids[grid_id][self._param_names.index(param_name)]
183 contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))
184 if not contains:
185 warnings.warn(
186 f"The value `{param_value}` is out of range of the parameter `{param_name}`. "
187 f"The value will be used but the actual distribution is: `{param_distribution}`."
188 )
189
190 return param_value
191
192 def after_trial(
193 self,
194 study: Study,
195 trial: FrozenTrial,
196 state: TrialState,
197 values: Optional[Sequence[float]],
198 ) -> None:
199 target_grids = self._get_unvisited_grid_ids(study)
200
201 if len(target_grids) == 0:
202 study.stop()
203 elif len(target_grids) == 1:
204 grid_id = study._storage.get_trial_system_attrs(trial._trial_id)["grid_id"]
205 if grid_id == target_grids[0]:
206 study.stop()
207
208 @staticmethod
209 def _check_value(param_name: str, param_value: Any) -> None:
210
211 if param_value is None or isinstance(param_value, (str, int, float, bool)):
212 return
213
214 raise ValueError(
215 "{} contains a value with the type of {}, which is not supported by "
216 "`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`"
217 " or `None`.".format(param_name, type(param_value))
218 )
219
220 def _get_unvisited_grid_ids(self, study: Study) -> List[int]:
221
222 # List up unvisited grids based on already finished ones.
223 visited_grids = []
224 running_grids = []
225
226 # We directly query the storage to get trials here instead of `study.get_trials`,
227 # since some pruners such as `HyperbandPruner` use the study transformed
228 # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.
229 trials = study._storage.get_all_trials(study._study_id, deepcopy=False)
230
231 for t in trials:
232 if "grid_id" in t.system_attrs and self._same_search_space(
233 t.system_attrs["search_space"]
234 ):
235 if t.state.is_finished():
236 visited_grids.append(t.system_attrs["grid_id"])
237 elif t.state == TrialState.RUNNING:
238 running_grids.append(t.system_attrs["grid_id"])
239
240 unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)
241
242 # If evaluations for all grids have been started, return grids that have not yet finished
243 # because all grids should be evaluated before stopping the optimization.
244 if len(unvisited_grids) == 0:
245 unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)
246
247 return list(unvisited_grids)
248
249 def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:
250
251 if set(search_space.keys()) != set(self._search_space.keys()):
252 return False
253
254 for param_name in search_space.keys():
255 if len(search_space[param_name]) != len(self._search_space[param_name]):
256 return False
257
258 param_values = cast(SortableParamValueSequenceType, search_space[param_name])
259 for i, param_value in enumerate(sorted(param_values)):
260 if param_value != self._search_space[param_name][i]:
261 return False
262
263 return True
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optuna/samplers/_grid.py b/optuna/samplers/_grid.py
--- a/optuna/samplers/_grid.py
+++ b/optuna/samplers/_grid.py
@@ -211,11 +211,12 @@
if param_value is None or isinstance(param_value, (str, int, float, bool)):
return
- raise ValueError(
+ message = (
"{} contains a value with the type of {}, which is not supported by "
"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`"
- " or `None`.".format(param_name, type(param_value))
+ " or `None` for persistent storage.".format(param_name, type(param_value))
)
+ warnings.warn(message)
def _get_unvisited_grid_ids(self, study: Study) -> List[int]:
| {"golden_diff": "diff --git a/optuna/samplers/_grid.py b/optuna/samplers/_grid.py\n--- a/optuna/samplers/_grid.py\n+++ b/optuna/samplers/_grid.py\n@@ -211,11 +211,12 @@\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n \n- raise ValueError(\n+ message = (\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n- \" or `None`.\".format(param_name, type(param_value))\n+ \" or `None` for persistent storage.\".format(param_name, type(param_value))\n )\n+ warnings.warn(message)\n \n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n", "issue": "Show warning message by `GridSearchSampler` whose `CategoricalDistribution` contains unsupported typed values, not raising `ValueError`\n### Motivation\n\nThe grid search sampler has a strict type rule for its search space for `CategoricalDistribution`, which is not consistent with the other samplers as reported in #3534.\r\n\n\n### Description\n\n`CategoriaclDistribution` shows a warning message when the `choices` argument contains an unsupported type.\r\n\r\nWhen we run the following code\r\n```python\r\nimport optuna\r\n\r\noptuna.distributions.CategoricalDistribution([[1], [2]])\r\n```\r\n\r\nthen we see the following warning message:\r\n\r\n```bash\r\n/Users/nzw/Documents/optuna/optuna/distributions.py:501: UserWarning: Choices for a categorical distribution should be a tuple of None, bool, int, float and str for persistent storage but contains [1] which is of type list.\r\n warnings.warn(message)\r\n/Users/nzw/Documents/optuna/optuna/distributions.py:501: UserWarning: Choices for a categorical distribution should be a tuple of None, bool, int, float and str for persistent storage but contains [2] which is of type list.\r\n warnings.warn(message)\r\n```\r\n\r\n\r\nOn the other hand, the grid search sampler raises `ValueError` when we use such unsupported typed value as an element of `choices` of the distribution.\r\n\n\n### Alternatives (optional)\n\n_No response_\n\n### Additional context (optional)\n\n_No response_\n", "before_files": [{"content": "import collections\nimport itertools\nimport random\nfrom typing import Any\nfrom typing import cast\nfrom typing import Dict\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\nimport warnings\n\nfrom optuna.distributions import BaseDistribution\nfrom optuna.logging import get_logger\nfrom optuna.samplers import BaseSampler\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\nGridValueType = Union[str, float, int, bool, None]\nSortableParamValueSequenceType = Union[List[str], List[float], List[int], List[bool]]\n\n\n_logger = get_logger(__name__)\n\n\nclass GridSampler(BaseSampler):\n \"\"\"Sampler using grid search.\n\n With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters\n in the given search space during the study.\n\n Example:\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_int(\"y\", -100, 100)\n return x**2 + y**2\n\n\n search_space = {\"x\": [-50, 0, 50], \"y\": [-99, 0, 99]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective)\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all\n combinations in the passed ``search_space`` have already been evaluated, internally\n invoking the :func:`~optuna.study.Study.stop` method.\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization\n specified by discrete suggest methods but just samples one of values specified in the\n search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is\n sampled as ``x`` instead of an integer point.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n # The following suggest method specifies integer points between -5 and 5.\n x = trial.suggest_float(\"x\", -5, 5, step=1)\n return x**2\n\n\n # Non-int points are specified in the grid.\n search_space = {\"x\": [-0.5, 0.5]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective, n_trials=2)\n\n Note:\n A parameter configuration in the grid is not considered finished until its trial is\n finished. Therefore, during distributed optimization where trials run concurrently,\n different workers will occasionally suggest the same parameter configuration.\n The total number of actual trials may therefore exceed the size of the grid.\n\n Note:\n The grid is randomly shuffled and the order in which parameter configurations are\n suggested may vary. This is to reduce duplicate suggestions during distributed\n optimization.\n\n Note:\n All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with\n :meth:`~optuna.study.Study.enqueue_trial`.\n\n Args:\n search_space:\n A dictionary whose key and value are a parameter name and the corresponding candidates\n of values, respectively.\n \"\"\"\n\n def __init__(self, search_space: Mapping[str, Sequence[GridValueType]]) -> None:\n\n for param_name, param_values in search_space.items():\n for value in param_values:\n self._check_value(param_name, value)\n\n self._search_space = collections.OrderedDict()\n for param_name, param_values in sorted(search_space.items(), key=lambda x: x[0]):\n param_values = cast(SortableParamValueSequenceType, param_values)\n\n self._search_space[param_name] = sorted(param_values)\n\n self._all_grids = list(itertools.product(*self._search_space.values()))\n self._param_names = sorted(search_space.keys())\n self._n_min_trials = len(self._all_grids)\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n\n return {}\n\n def sample_relative(\n self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n ) -> Dict[str, Any]:\n # Instead of returning param values, GridSampler puts the target grid id as a system attr,\n # and the values are returned from `sample_independent`. This is because the distribution\n # object is hard to get at the beginning of trial, while we need the access to the object\n # to validate the sampled value.\n\n # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not\n # assign a new grid_id.\n if \"grid_id\" in trial.system_attrs or \"fixed_params\" in trial.system_attrs:\n return {}\n\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n # This case may occur with distributed optimization or trial queue. If there is no\n # target grid, `GridSampler` evaluates a visited, duplicated point with the current\n # trial. After that, the optimization stops.\n\n _logger.warning(\n \"`GridSampler` is re-evaluating a configuration because the grid has been \"\n \"exhausted. This may happen due to a timing issue during distributed optimization \"\n \"or when re-running optimizations on already finished studies.\"\n )\n\n # One of all grids is randomly picked up in this case.\n target_grids = list(range(len(self._all_grids)))\n\n # In distributed optimization, multiple workers may simultaneously pick up the same grid.\n # To make the conflict less frequent, the grid is chosen randomly.\n grid_id = random.choice(target_grids)\n\n study._storage.set_trial_system_attr(trial._trial_id, \"search_space\", self._search_space)\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", grid_id)\n\n return {}\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n\n if \"grid_id\" not in trial.system_attrs:\n message = \"All parameters must be specified when using GridSampler with enqueue_trial.\"\n raise ValueError(message)\n\n if param_name not in self._search_space:\n message = \"The parameter name, {}, is not found in the given grid.\".format(param_name)\n raise ValueError(message)\n\n # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.\n # Current selection logic may evaluate the same parameters multiple times.\n # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.\n grid_id = trial.system_attrs[\"grid_id\"]\n param_value = self._all_grids[grid_id][self._param_names.index(param_name)]\n contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))\n if not contains:\n warnings.warn(\n f\"The value `{param_value}` is out of range of the parameter `{param_name}`. \"\n f\"The value will be used but the actual distribution is: `{param_distribution}`.\"\n )\n\n return param_value\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n study.stop()\n elif len(target_grids) == 1:\n grid_id = study._storage.get_trial_system_attrs(trial._trial_id)[\"grid_id\"]\n if grid_id == target_grids[0]:\n study.stop()\n\n @staticmethod\n def _check_value(param_name: str, param_value: Any) -> None:\n\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n\n raise ValueError(\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n \" or `None`.\".format(param_name, type(param_value))\n )\n\n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n\n # List up unvisited grids based on already finished ones.\n visited_grids = []\n running_grids = []\n\n # We directly query the storage to get trials here instead of `study.get_trials`,\n # since some pruners such as `HyperbandPruner` use the study transformed\n # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.\n trials = study._storage.get_all_trials(study._study_id, deepcopy=False)\n\n for t in trials:\n if \"grid_id\" in t.system_attrs and self._same_search_space(\n t.system_attrs[\"search_space\"]\n ):\n if t.state.is_finished():\n visited_grids.append(t.system_attrs[\"grid_id\"])\n elif t.state == TrialState.RUNNING:\n running_grids.append(t.system_attrs[\"grid_id\"])\n\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)\n\n # If evaluations for all grids have been started, return grids that have not yet finished\n # because all grids should be evaluated before stopping the optimization.\n if len(unvisited_grids) == 0:\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)\n\n return list(unvisited_grids)\n\n def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:\n\n if set(search_space.keys()) != set(self._search_space.keys()):\n return False\n\n for param_name in search_space.keys():\n if len(search_space[param_name]) != len(self._search_space[param_name]):\n return False\n\n param_values = cast(SortableParamValueSequenceType, search_space[param_name])\n for i, param_value in enumerate(sorted(param_values)):\n if param_value != self._search_space[param_name][i]:\n return False\n\n return True\n", "path": "optuna/samplers/_grid.py"}], "after_files": [{"content": "import collections\nimport itertools\nimport random\nfrom typing import Any\nfrom typing import cast\nfrom typing import Dict\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\nimport warnings\n\nfrom optuna.distributions import BaseDistribution\nfrom optuna.logging import get_logger\nfrom optuna.samplers import BaseSampler\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\nGridValueType = Union[str, float, int, bool, None]\nSortableParamValueSequenceType = Union[List[str], List[float], List[int], List[bool]]\n\n\n_logger = get_logger(__name__)\n\n\nclass GridSampler(BaseSampler):\n \"\"\"Sampler using grid search.\n\n With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters\n in the given search space during the study.\n\n Example:\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_int(\"y\", -100, 100)\n return x**2 + y**2\n\n\n search_space = {\"x\": [-50, 0, 50], \"y\": [-99, 0, 99]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective)\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all\n combinations in the passed ``search_space`` have already been evaluated, internally\n invoking the :func:`~optuna.study.Study.stop` method.\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization\n specified by discrete suggest methods but just samples one of values specified in the\n search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is\n sampled as ``x`` instead of an integer point.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n # The following suggest method specifies integer points between -5 and 5.\n x = trial.suggest_float(\"x\", -5, 5, step=1)\n return x**2\n\n\n # Non-int points are specified in the grid.\n search_space = {\"x\": [-0.5, 0.5]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective, n_trials=2)\n\n Note:\n A parameter configuration in the grid is not considered finished until its trial is\n finished. Therefore, during distributed optimization where trials run concurrently,\n different workers will occasionally suggest the same parameter configuration.\n The total number of actual trials may therefore exceed the size of the grid.\n\n Note:\n The grid is randomly shuffled and the order in which parameter configurations are\n suggested may vary. This is to reduce duplicate suggestions during distributed\n optimization.\n\n Note:\n All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with\n :meth:`~optuna.study.Study.enqueue_trial`.\n\n Args:\n search_space:\n A dictionary whose key and value are a parameter name and the corresponding candidates\n of values, respectively.\n \"\"\"\n\n def __init__(self, search_space: Mapping[str, Sequence[GridValueType]]) -> None:\n\n for param_name, param_values in search_space.items():\n for value in param_values:\n self._check_value(param_name, value)\n\n self._search_space = collections.OrderedDict()\n for param_name, param_values in sorted(search_space.items(), key=lambda x: x[0]):\n param_values = cast(SortableParamValueSequenceType, param_values)\n\n self._search_space[param_name] = sorted(param_values)\n\n self._all_grids = list(itertools.product(*self._search_space.values()))\n self._param_names = sorted(search_space.keys())\n self._n_min_trials = len(self._all_grids)\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n\n return {}\n\n def sample_relative(\n self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n ) -> Dict[str, Any]:\n # Instead of returning param values, GridSampler puts the target grid id as a system attr,\n # and the values are returned from `sample_independent`. This is because the distribution\n # object is hard to get at the beginning of trial, while we need the access to the object\n # to validate the sampled value.\n\n # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not\n # assign a new grid_id.\n if \"grid_id\" in trial.system_attrs or \"fixed_params\" in trial.system_attrs:\n return {}\n\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n # This case may occur with distributed optimization or trial queue. If there is no\n # target grid, `GridSampler` evaluates a visited, duplicated point with the current\n # trial. After that, the optimization stops.\n\n _logger.warning(\n \"`GridSampler` is re-evaluating a configuration because the grid has been \"\n \"exhausted. This may happen due to a timing issue during distributed optimization \"\n \"or when re-running optimizations on already finished studies.\"\n )\n\n # One of all grids is randomly picked up in this case.\n target_grids = list(range(len(self._all_grids)))\n\n # In distributed optimization, multiple workers may simultaneously pick up the same grid.\n # To make the conflict less frequent, the grid is chosen randomly.\n grid_id = random.choice(target_grids)\n\n study._storage.set_trial_system_attr(trial._trial_id, \"search_space\", self._search_space)\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", grid_id)\n\n return {}\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n\n if \"grid_id\" not in trial.system_attrs:\n message = \"All parameters must be specified when using GridSampler with enqueue_trial.\"\n raise ValueError(message)\n\n if param_name not in self._search_space:\n message = \"The parameter name, {}, is not found in the given grid.\".format(param_name)\n raise ValueError(message)\n\n # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.\n # Current selection logic may evaluate the same parameters multiple times.\n # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.\n grid_id = trial.system_attrs[\"grid_id\"]\n param_value = self._all_grids[grid_id][self._param_names.index(param_name)]\n contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))\n if not contains:\n warnings.warn(\n f\"The value `{param_value}` is out of range of the parameter `{param_name}`. \"\n f\"The value will be used but the actual distribution is: `{param_distribution}`.\"\n )\n\n return param_value\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n study.stop()\n elif len(target_grids) == 1:\n grid_id = study._storage.get_trial_system_attrs(trial._trial_id)[\"grid_id\"]\n if grid_id == target_grids[0]:\n study.stop()\n\n @staticmethod\n def _check_value(param_name: str, param_value: Any) -> None:\n\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n\n message = (\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n \" or `None` for persistent storage.\".format(param_name, type(param_value))\n )\n warnings.warn(message)\n\n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n\n # List up unvisited grids based on already finished ones.\n visited_grids = []\n running_grids = []\n\n # We directly query the storage to get trials here instead of `study.get_trials`,\n # since some pruners such as `HyperbandPruner` use the study transformed\n # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.\n trials = study._storage.get_all_trials(study._study_id, deepcopy=False)\n\n for t in trials:\n if \"grid_id\" in t.system_attrs and self._same_search_space(\n t.system_attrs[\"search_space\"]\n ):\n if t.state.is_finished():\n visited_grids.append(t.system_attrs[\"grid_id\"])\n elif t.state == TrialState.RUNNING:\n running_grids.append(t.system_attrs[\"grid_id\"])\n\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)\n\n # If evaluations for all grids have been started, return grids that have not yet finished\n # because all grids should be evaluated before stopping the optimization.\n if len(unvisited_grids) == 0:\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)\n\n return list(unvisited_grids)\n\n def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:\n\n if set(search_space.keys()) != set(self._search_space.keys()):\n return False\n\n for param_name in search_space.keys():\n if len(search_space[param_name]) != len(self._search_space[param_name]):\n return False\n\n param_values = cast(SortableParamValueSequenceType, search_space[param_name])\n for i, param_value in enumerate(sorted(param_values)):\n if param_value != self._search_space[param_name][i]:\n return False\n\n return True\n", "path": "optuna/samplers/_grid.py"}]} | 3,545 | 195 |
gh_patches_debug_14570 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2491 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
localization: various problems
# Bug
## Description
This issue is to collect the various localization problems found before the 0.4.4 release and after the **string freeze**
They should be fixed in a pull request right after the 0.4.4 release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/forms.py`
Content:
```
1 from flask_babel import gettext
2 from flask_wtf import FlaskForm
3 from wtforms import PasswordField
4 from wtforms.validators import InputRequired, Regexp, Length
5
6 from db import Source
7
8
9 class LoginForm(FlaskForm):
10 codename = PasswordField('codename', validators=[
11 InputRequired(message=gettext('This field is required.')),
12 Length(1, Source.MAX_CODENAME_LEN,
13 message=gettext('Field must be between 1 and '
14 '{max_codename_len} characters long. '.format(
15 max_codename_len=Source.MAX_CODENAME_LEN))),
16 # Make sure to allow dashes since some words in the wordlist have them
17 Regexp(r'[\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))
18 ])
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/source_app/forms.py b/securedrop/source_app/forms.py
--- a/securedrop/source_app/forms.py
+++ b/securedrop/source_app/forms.py
@@ -11,7 +11,7 @@
InputRequired(message=gettext('This field is required.')),
Length(1, Source.MAX_CODENAME_LEN,
message=gettext('Field must be between 1 and '
- '{max_codename_len} characters long. '.format(
+ '{max_codename_len} characters long.'.format(
max_codename_len=Source.MAX_CODENAME_LEN))),
# Make sure to allow dashes since some words in the wordlist have them
Regexp(r'[\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))
| {"golden_diff": "diff --git a/securedrop/source_app/forms.py b/securedrop/source_app/forms.py\n--- a/securedrop/source_app/forms.py\n+++ b/securedrop/source_app/forms.py\n@@ -11,7 +11,7 @@\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n- '{max_codename_len} characters long. '.format(\n+ '{max_codename_len} characters long.'.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n", "issue": "localization: various problems\n# Bug\r\n\r\n## Description\r\n\r\nThis issue is to collect the various localization problems found before the 0.4.4 release and after the **string freeze**\r\n\r\nThey should be fixed in a pull request right after the 0.4.4 release.\n", "before_files": [{"content": "from flask_babel import gettext\nfrom flask_wtf import FlaskForm\nfrom wtforms import PasswordField\nfrom wtforms.validators import InputRequired, Regexp, Length\n\nfrom db import Source\n\n\nclass LoginForm(FlaskForm):\n codename = PasswordField('codename', validators=[\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n '{max_codename_len} characters long. '.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n ])\n", "path": "securedrop/source_app/forms.py"}], "after_files": [{"content": "from flask_babel import gettext\nfrom flask_wtf import FlaskForm\nfrom wtforms import PasswordField\nfrom wtforms.validators import InputRequired, Regexp, Length\n\nfrom db import Source\n\n\nclass LoginForm(FlaskForm):\n codename = PasswordField('codename', validators=[\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n '{max_codename_len} characters long.'.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n ])\n", "path": "securedrop/source_app/forms.py"}]} | 513 | 169 |
gh_patches_debug_64451 | rasdani/github-patches | git_diff | bokeh__bokeh-9477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Non-daemon worker thread prevents gunicorn from shutting down cleanly.
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
bokeh HEAD e605297
gunicorn (version 20.0.4)
Python 3.7.4
macOS 10.14.6
#### Description of expected behavior and the observed behavior
I am learning about embedding Bokeh in a Flask project and tried the sample script flask_gunicorn_embed.py from the macOS terminal. After viewing the working web page in Safari, I then pressed Ctrl-C in the terminal to stop the gunicorn server. The expected behaviour was a clean shutdown of gunicorn, but instead it hangs.
Marking the bk_worker thread as a daemon before starting it resolves the hang.
#### Stack traceback and/or browser JavaScript console output
greent7@avocado:~/development/bokeh/examples/howto/server_embed$ BOKEH_ALLOW_WS_ORIGIN=127.0.0.1:8000 gunicorn -w 4 flask_gunicorn_embed:app
[2019-11-29 01:06:31 -0700] [53812] [INFO] Starting gunicorn 20.0.4
[2019-11-29 01:06:31 -0700] [53812] [INFO] Listening at: http://127.0.0.1:8000 (53812)
[2019-11-29 01:06:31 -0700] [53812] [INFO] Using worker: sync
[2019-11-29 01:06:31 -0700] [53815] [INFO] Booting worker with pid: 53815
[2019-11-29 01:06:32 -0700] [53816] [INFO] Booting worker with pid: 53816
[2019-11-29 01:06:32 -0700] [53817] [INFO] Booting worker with pid: 53817
[2019-11-29 01:06:32 -0700] [53818] [INFO] Booting worker with pid: 53818
^C[2019-11-29 01:06:33 -0700] [53812] [INFO] Handling signal: int
[2019-11-29 01:06:33 -0700] [53818] [INFO] Worker exiting (pid: 53818)
[2019-11-29 01:06:33 -0700] [53815] [INFO] Worker exiting (pid: 53815)
[2019-11-29 01:06:33 -0700] [53817] [INFO] Worker exiting (pid: 53817)
[2019-11-29 01:06:33 -0700] [53816] [INFO] Worker exiting (pid: 53816)
If I hit Ctrl-C again, it continues and exits noisily:
^CException ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1308, in _shutdown
lock.acquire()
File "/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py", line 196, in handle_quit
sys.exit(0)
SystemExit: 0
[2019-11-29 01:06:56 -0700] [53812] [INFO] Shutting down: Master
[BUG] Non-daemon worker thread prevents gunicorn from shutting down cleanly.
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
bokeh HEAD e605297
gunicorn (version 20.0.4)
Python 3.7.4
macOS 10.14.6
#### Description of expected behavior and the observed behavior
I am learning about embedding Bokeh in a Flask project and tried the sample script flask_gunicorn_embed.py from the macOS terminal. After viewing the working web page in Safari, I then pressed Ctrl-C in the terminal to stop the gunicorn server. The expected behaviour was a clean shutdown of gunicorn, but instead it hangs.
Marking the bk_worker thread as a daemon before starting it resolves the hang.
#### Stack traceback and/or browser JavaScript console output
greent7@avocado:~/development/bokeh/examples/howto/server_embed$ BOKEH_ALLOW_WS_ORIGIN=127.0.0.1:8000 gunicorn -w 4 flask_gunicorn_embed:app
[2019-11-29 01:06:31 -0700] [53812] [INFO] Starting gunicorn 20.0.4
[2019-11-29 01:06:31 -0700] [53812] [INFO] Listening at: http://127.0.0.1:8000 (53812)
[2019-11-29 01:06:31 -0700] [53812] [INFO] Using worker: sync
[2019-11-29 01:06:31 -0700] [53815] [INFO] Booting worker with pid: 53815
[2019-11-29 01:06:32 -0700] [53816] [INFO] Booting worker with pid: 53816
[2019-11-29 01:06:32 -0700] [53817] [INFO] Booting worker with pid: 53817
[2019-11-29 01:06:32 -0700] [53818] [INFO] Booting worker with pid: 53818
^C[2019-11-29 01:06:33 -0700] [53812] [INFO] Handling signal: int
[2019-11-29 01:06:33 -0700] [53818] [INFO] Worker exiting (pid: 53818)
[2019-11-29 01:06:33 -0700] [53815] [INFO] Worker exiting (pid: 53815)
[2019-11-29 01:06:33 -0700] [53817] [INFO] Worker exiting (pid: 53817)
[2019-11-29 01:06:33 -0700] [53816] [INFO] Worker exiting (pid: 53816)
If I hit Ctrl-C again, it continues and exits noisily:
^CException ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py", line 1308, in _shutdown
lock.acquire()
File "/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py", line 196, in handle_quit
sys.exit(0)
SystemExit: 0
[2019-11-29 01:06:56 -0700] [53812] [INFO] Shutting down: Master
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/howto/server_embed/flask_gunicorn_embed.py`
Content:
```
1 try:
2 import asyncio
3 except ImportError:
4 raise RuntimeError("This example requries Python3 / asyncio")
5
6 from threading import Thread
7
8 from flask import Flask, render_template
9 from tornado.httpserver import HTTPServer
10 from tornado.ioloop import IOLoop
11
12 from bokeh.application import Application
13 from bokeh.application.handlers import FunctionHandler
14 from bokeh.embed import server_document
15 from bokeh.layouts import column
16 from bokeh.models import ColumnDataSource, Slider
17 from bokeh.plotting import figure
18 from bokeh.sampledata.sea_surface_temperature import sea_surface_temperature
19 from bokeh.server.server import BaseServer
20 from bokeh.server.tornado import BokehTornado
21 from bokeh.server.util import bind_sockets
22 from bokeh.themes import Theme
23
24 if __name__ == '__main__':
25 print('This script is intended to be run with gunicorn. e.g.')
26 print()
27 print(' gunicorn -w 4 flask_gunicorn_embed:app')
28 print()
29 print('will start the app on four processes')
30 import sys
31 sys.exit()
32
33
34 app = Flask(__name__)
35
36 def bkapp(doc):
37 df = sea_surface_temperature.copy()
38 source = ColumnDataSource(data=df)
39
40 plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',
41 title="Sea Surface Temperature at 43.18, -70.43")
42 plot.line('time', 'temperature', source=source)
43
44 def callback(attr, old, new):
45 if new == 0:
46 data = df
47 else:
48 data = df.rolling('{0}D'.format(new)).mean()
49 source.data = ColumnDataSource.from_df(data)
50
51 slider = Slider(start=0, end=30, value=0, step=1, title="Smoothing by N Days")
52 slider.on_change('value', callback)
53
54 doc.add_root(column(slider, plot))
55
56 doc.theme = Theme(filename="theme.yaml")
57
58 # can't use shortcuts here, since we are passing to low level BokehTornado
59 bkapp = Application(FunctionHandler(bkapp))
60
61 # This is so that if this app is run using something like "gunicorn -w 4" then
62 # each process will listen on its own port
63 sockets, port = bind_sockets("localhost", 0)
64
65 @app.route('/', methods=['GET'])
66 def bkapp_page():
67 script = server_document('http://localhost:%d/bkapp' % port)
68 return render_template("embed.html", script=script, template="Flask")
69
70 def bk_worker():
71 asyncio.set_event_loop(asyncio.new_event_loop())
72
73 bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=["localhost:8000"])
74 bokeh_http = HTTPServer(bokeh_tornado)
75 bokeh_http.add_sockets(sockets)
76
77 server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)
78 server.start()
79 server.io_loop.start()
80
81 Thread(target=bk_worker).start()
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/howto/server_embed/flask_gunicorn_embed.py b/examples/howto/server_embed/flask_gunicorn_embed.py
--- a/examples/howto/server_embed/flask_gunicorn_embed.py
+++ b/examples/howto/server_embed/flask_gunicorn_embed.py
@@ -78,4 +78,6 @@
server.start()
server.io_loop.start()
-Thread(target=bk_worker).start()
+t = Thread(target=bk_worker)
+t.daemon = True
+t.start()
| {"golden_diff": "diff --git a/examples/howto/server_embed/flask_gunicorn_embed.py b/examples/howto/server_embed/flask_gunicorn_embed.py\n--- a/examples/howto/server_embed/flask_gunicorn_embed.py\n+++ b/examples/howto/server_embed/flask_gunicorn_embed.py\n@@ -78,4 +78,6 @@\n server.start()\n server.io_loop.start()\n \n-Thread(target=bk_worker).start()\n+t = Thread(target=bk_worker)\n+t.daemon = True\n+t.start()\n", "issue": "[BUG] Non-daemon worker thread prevents gunicorn from shutting down cleanly.\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nbokeh HEAD e605297\r\ngunicorn (version 20.0.4)\r\nPython 3.7.4\r\nmacOS 10.14.6\r\n\r\n#### Description of expected behavior and the observed behavior\r\nI am learning about embedding Bokeh in a Flask project and tried the sample script flask_gunicorn_embed.py from the macOS terminal. After viewing the working web page in Safari, I then pressed Ctrl-C in the terminal to stop the gunicorn server. The expected behaviour was a clean shutdown of gunicorn, but instead it hangs.\r\n\r\nMarking the bk_worker thread as a daemon before starting it resolves the hang.\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\ngreent7@avocado:~/development/bokeh/examples/howto/server_embed$ BOKEH_ALLOW_WS_ORIGIN=127.0.0.1:8000 gunicorn -w 4 flask_gunicorn_embed:app\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Starting gunicorn 20.0.4\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Listening at: http://127.0.0.1:8000 (53812)\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Using worker: sync\r\n[2019-11-29 01:06:31 -0700] [53815] [INFO] Booting worker with pid: 53815\r\n[2019-11-29 01:06:32 -0700] [53816] [INFO] Booting worker with pid: 53816\r\n[2019-11-29 01:06:32 -0700] [53817] [INFO] Booting worker with pid: 53817\r\n[2019-11-29 01:06:32 -0700] [53818] [INFO] Booting worker with pid: 53818\r\n^C[2019-11-29 01:06:33 -0700] [53812] [INFO] Handling signal: int\r\n[2019-11-29 01:06:33 -0700] [53818] [INFO] Worker exiting (pid: 53818)\r\n[2019-11-29 01:06:33 -0700] [53815] [INFO] Worker exiting (pid: 53815)\r\n[2019-11-29 01:06:33 -0700] [53817] [INFO] Worker exiting (pid: 53817)\r\n[2019-11-29 01:06:33 -0700] [53816] [INFO] Worker exiting (pid: 53816)\r\n\r\nIf I hit Ctrl-C again, it continues and exits noisily:\r\n\r\n^CException ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py\", line 1308, in _shutdown\r\n lock.acquire()\r\n File \"/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py\", line 196, in handle_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n[2019-11-29 01:06:56 -0700] [53812] [INFO] Shutting down: Master\r\n\n[BUG] Non-daemon worker thread prevents gunicorn from shutting down cleanly.\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nbokeh HEAD e605297\r\ngunicorn (version 20.0.4)\r\nPython 3.7.4\r\nmacOS 10.14.6\r\n\r\n#### Description of expected behavior and the observed behavior\r\nI am learning about embedding Bokeh in a Flask project and tried the sample script flask_gunicorn_embed.py from the macOS terminal. After viewing the working web page in Safari, I then pressed Ctrl-C in the terminal to stop the gunicorn server. The expected behaviour was a clean shutdown of gunicorn, but instead it hangs.\r\n\r\nMarking the bk_worker thread as a daemon before starting it resolves the hang.\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\ngreent7@avocado:~/development/bokeh/examples/howto/server_embed$ BOKEH_ALLOW_WS_ORIGIN=127.0.0.1:8000 gunicorn -w 4 flask_gunicorn_embed:app\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Starting gunicorn 20.0.4\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Listening at: http://127.0.0.1:8000 (53812)\r\n[2019-11-29 01:06:31 -0700] [53812] [INFO] Using worker: sync\r\n[2019-11-29 01:06:31 -0700] [53815] [INFO] Booting worker with pid: 53815\r\n[2019-11-29 01:06:32 -0700] [53816] [INFO] Booting worker with pid: 53816\r\n[2019-11-29 01:06:32 -0700] [53817] [INFO] Booting worker with pid: 53817\r\n[2019-11-29 01:06:32 -0700] [53818] [INFO] Booting worker with pid: 53818\r\n^C[2019-11-29 01:06:33 -0700] [53812] [INFO] Handling signal: int\r\n[2019-11-29 01:06:33 -0700] [53818] [INFO] Worker exiting (pid: 53818)\r\n[2019-11-29 01:06:33 -0700] [53815] [INFO] Worker exiting (pid: 53815)\r\n[2019-11-29 01:06:33 -0700] [53817] [INFO] Worker exiting (pid: 53817)\r\n[2019-11-29 01:06:33 -0700] [53816] [INFO] Worker exiting (pid: 53816)\r\n\r\nIf I hit Ctrl-C again, it continues and exits noisily:\r\n\r\n^CException ignored in: <module 'threading' from '/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py'>\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/lib/python3.7/threading.py\", line 1308, in _shutdown\r\n lock.acquire()\r\n File \"/usr/local/lib/python3.7/site-packages/gunicorn/workers/base.py\", line 196, in handle_quit\r\n sys.exit(0)\r\nSystemExit: 0\r\n[2019-11-29 01:06:56 -0700] [53812] [INFO] Shutting down: Master\r\n\n", "before_files": [{"content": "try:\n import asyncio\nexcept ImportError:\n raise RuntimeError(\"This example requries Python3 / asyncio\")\n\nfrom threading import Thread\n\nfrom flask import Flask, render_template\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.embed import server_document\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, Slider\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.sea_surface_temperature import sea_surface_temperature\nfrom bokeh.server.server import BaseServer\nfrom bokeh.server.tornado import BokehTornado\nfrom bokeh.server.util import bind_sockets\nfrom bokeh.themes import Theme\n\nif __name__ == '__main__':\n print('This script is intended to be run with gunicorn. e.g.')\n print()\n print(' gunicorn -w 4 flask_gunicorn_embed:app')\n print()\n print('will start the app on four processes')\n import sys\n sys.exit()\n\n\napp = Flask(__name__)\n\ndef bkapp(doc):\n df = sea_surface_temperature.copy()\n source = ColumnDataSource(data=df)\n\n plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',\n title=\"Sea Surface Temperature at 43.18, -70.43\")\n plot.line('time', 'temperature', source=source)\n\n def callback(attr, old, new):\n if new == 0:\n data = df\n else:\n data = df.rolling('{0}D'.format(new)).mean()\n source.data = ColumnDataSource.from_df(data)\n\n slider = Slider(start=0, end=30, value=0, step=1, title=\"Smoothing by N Days\")\n slider.on_change('value', callback)\n\n doc.add_root(column(slider, plot))\n\n doc.theme = Theme(filename=\"theme.yaml\")\n\n# can't use shortcuts here, since we are passing to low level BokehTornado\nbkapp = Application(FunctionHandler(bkapp))\n\n# This is so that if this app is run using something like \"gunicorn -w 4\" then\n# each process will listen on its own port\nsockets, port = bind_sockets(\"localhost\", 0)\n\[email protected]('/', methods=['GET'])\ndef bkapp_page():\n script = server_document('http://localhost:%d/bkapp' % port)\n return render_template(\"embed.html\", script=script, template=\"Flask\")\n\ndef bk_worker():\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=[\"localhost:8000\"])\n bokeh_http = HTTPServer(bokeh_tornado)\n bokeh_http.add_sockets(sockets)\n\n server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)\n server.start()\n server.io_loop.start()\n\nThread(target=bk_worker).start()\n", "path": "examples/howto/server_embed/flask_gunicorn_embed.py"}], "after_files": [{"content": "try:\n import asyncio\nexcept ImportError:\n raise RuntimeError(\"This example requries Python3 / asyncio\")\n\nfrom threading import Thread\n\nfrom flask import Flask, render_template\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.embed import server_document\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, Slider\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.sea_surface_temperature import sea_surface_temperature\nfrom bokeh.server.server import BaseServer\nfrom bokeh.server.tornado import BokehTornado\nfrom bokeh.server.util import bind_sockets\nfrom bokeh.themes import Theme\n\nif __name__ == '__main__':\n print('This script is intended to be run with gunicorn. e.g.')\n print()\n print(' gunicorn -w 4 flask_gunicorn_embed:app')\n print()\n print('will start the app on four processes')\n import sys\n sys.exit()\n\n\napp = Flask(__name__)\n\ndef bkapp(doc):\n df = sea_surface_temperature.copy()\n source = ColumnDataSource(data=df)\n\n plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',\n title=\"Sea Surface Temperature at 43.18, -70.43\")\n plot.line('time', 'temperature', source=source)\n\n def callback(attr, old, new):\n if new == 0:\n data = df\n else:\n data = df.rolling('{0}D'.format(new)).mean()\n source.data = ColumnDataSource.from_df(data)\n\n slider = Slider(start=0, end=30, value=0, step=1, title=\"Smoothing by N Days\")\n slider.on_change('value', callback)\n\n doc.add_root(column(slider, plot))\n\n doc.theme = Theme(filename=\"theme.yaml\")\n\n# can't use shortcuts here, since we are passing to low level BokehTornado\nbkapp = Application(FunctionHandler(bkapp))\n\n# This is so that if this app is run using something like \"gunicorn -w 4\" then\n# each process will listen on its own port\nsockets, port = bind_sockets(\"localhost\", 0)\n\[email protected]('/', methods=['GET'])\ndef bkapp_page():\n script = server_document('http://localhost:%d/bkapp' % port)\n return render_template(\"embed.html\", script=script, template=\"Flask\")\n\ndef bk_worker():\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=[\"localhost:8000\"])\n bokeh_http = HTTPServer(bokeh_tornado)\n bokeh_http.add_sockets(sockets)\n\n server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)\n server.start()\n server.io_loop.start()\n\nt = Thread(target=bk_worker)\nt.daemon = True\nt.start()\n", "path": "examples/howto/server_embed/flask_gunicorn_embed.py"}]} | 3,126 | 105 |
gh_patches_debug_43357 | rasdani/github-patches | git_diff | streamlink__streamlink-4840 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.rtve: ZTNR.translate() runs endlessly
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
RTVE plugin is not resolving any streams and stuck after message "Found matching plugin rtve for URL". Upon debugging in Python the following while block seems to cause an endless loop: https://github.com/streamlink/streamlink/blob/master/src/streamlink/plugins/rtve.py#L111
Thanks for the good work!
Cheers.
### Debug log
```text
bin\streamlink.exe -l debug https://rtve.es/play/videos/directo/canales-lineales/24h
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.10.7
[cli][debug] Streamlink: 5.0.0
[cli][debug] Dependencies:
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] websocket-client: 1.4.1
[cli][debug] Arguments:
[cli][debug] url=https://rtve.es/play/videos/directo/canales-lineales/24h
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin rtve for URL https://rtve.es/play/videos/directo/canales-lineales/24h
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/rtve.py`
Content:
```
1 """
2 $description Live TV channels and video on-demand service from RTVE, a Spanish public, state-owned broadcaster.
3 $url rtve.es
4 $type live, vod
5 $region Spain
6 """
7
8 import logging
9 import re
10 from base64 import b64decode
11 from io import BytesIO
12 from typing import Iterator, Sequence, Tuple
13 from urllib.parse import urlparse
14
15 from streamlink.plugin import Plugin, pluginargument, pluginmatcher
16 from streamlink.plugin.api import validate
17 from streamlink.stream.ffmpegmux import MuxedStream
18 from streamlink.stream.hls import HLSStream
19 from streamlink.stream.http import HTTPStream
20 from streamlink.utils.url import update_scheme
21
22 log = logging.getLogger(__name__)
23
24
25 class Base64Reader:
26 def __init__(self, data: str):
27 stream = BytesIO(b64decode(data))
28
29 def _iterate():
30 while True:
31 chunk = stream.read(1)
32 if len(chunk) == 0: # pragma: no cover
33 return
34 yield ord(chunk)
35
36 self._iterator: Iterator[int] = _iterate()
37
38 def read(self, num: int) -> Sequence[int]:
39 res = []
40 for _ in range(num):
41 item = next(self._iterator, None)
42 if item is None: # pragma: no cover
43 break
44 res.append(item)
45 return res
46
47 def skip(self, num: int) -> None:
48 self.read(num)
49
50 def read_chars(self, num: int) -> str:
51 return "".join(chr(item) for item in self.read(num))
52
53 def read_int(self) -> int:
54 a, b, c, d = self.read(4)
55 return a << 24 | b << 16 | c << 8 | d
56
57 def read_chunk(self) -> Tuple[str, Sequence[int]]:
58 size = self.read_int()
59 chunktype = self.read_chars(4)
60 chunkdata = self.read(size)
61 if len(chunkdata) != size: # pragma: no cover
62 raise ValueError("Invalid chunk length")
63 self.skip(4)
64 return chunktype, chunkdata
65
66
67 class ZTNR:
68 @staticmethod
69 def _get_alphabet(text: str) -> str:
70 res = []
71 j = 0
72 k = 0
73 for char in text:
74 if k > 0:
75 k -= 1
76 else:
77 res.append(char)
78 j = (j + 1) % 4
79 k = j
80 return "".join(res)
81
82 @staticmethod
83 def _get_url(text: str, alphabet: str) -> str:
84 res = []
85 j = 0
86 n = 0
87 k = 3
88 cont = 0
89 for char in text:
90 if j == 0:
91 n = int(char) * 10
92 j = 1
93 elif k > 0:
94 k -= 1
95 else:
96 res.append(alphabet[n + int(char)])
97 j = 0
98 k = cont % 4
99 cont += 1
100 return "".join(res)
101
102 @classmethod
103 def _get_source(cls, alphabet: str, data: str) -> str:
104 return cls._get_url(data, cls._get_alphabet(alphabet))
105
106 @classmethod
107 def translate(cls, data: str) -> Iterator[Tuple[str, str]]:
108 reader = Base64Reader(data.replace("\n", ""))
109 reader.skip(8)
110 chunk_type, chunk_data = reader.read_chunk()
111 while chunk_type != "IEND":
112 if chunk_type == "tEXt":
113 content = "".join(chr(item) for item in chunk_data if item > 0)
114 if "#" not in content or "%%" not in content: # pragma: no cover
115 continue
116 alphabet, content = content.split("#", 1)
117 quality, content = content.split("%%", 1)
118 yield quality, cls._get_source(alphabet, content)
119 chunk_type, chunk_data = reader.read_chunk()
120
121
122 @pluginmatcher(re.compile(
123 r"https?://(?:www\.)?rtve\.es/play/videos/.+"
124 ))
125 @pluginargument(
126 "mux-subtitles",
127 is_global=True,
128 )
129 class Rtve(Plugin):
130 URL_M3U8 = "https://ztnr.rtve.es/ztnr/{id}.m3u8"
131 URL_VIDEOS = "https://ztnr.rtve.es/ztnr/movil/thumbnail/rtveplayw/videos/{id}.png?q=v2"
132 URL_SUBTITLES = "https://www.rtve.es/api/videos/{id}/subtitulos.json"
133
134 def _get_streams(self):
135 self.id = self.session.http.get(self.url, schema=validate.Schema(
136 re.compile(r"\bdata-setup='({.+?})'", re.DOTALL),
137 validate.none_or_all(
138 validate.get(1),
139 validate.parse_json(),
140 {
141 "idAsset": validate.any(int, validate.all(str, validate.transform(int))),
142 },
143 validate.get("idAsset"),
144 ),
145 ))
146 if not self.id:
147 return
148
149 # check obfuscated stream URLs via self.URL_VIDEOS and ZTNR.translate() first
150 # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in same cases
151 urls = self.session.http.get(
152 self.URL_VIDEOS.format(id=self.id),
153 schema=validate.Schema(
154 validate.transform(ZTNR.translate),
155 validate.transform(list),
156 [(str, validate.url())],
157 ),
158 )
159
160 # then fall back to self.URL_M3U8
161 if not urls:
162 url = self.URL_M3U8.format(id=self.id)
163 else:
164 url = next((url for _, url in urls if urlparse(url).path.endswith(".m3u8")), None)
165 if not url:
166 url = next((url for _, url in urls if urlparse(url).path.endswith(".mp4")), None)
167 if url:
168 yield "vod", HTTPStream(self.session, url)
169 return
170
171 streams = HLSStream.parse_variant_playlist(self.session, url).items()
172
173 if self.options.get("mux-subtitles"):
174 subs = self.session.http.get(
175 self.URL_SUBTITLES.format(id=self.id),
176 schema=validate.Schema(
177 validate.parse_json(),
178 {
179 "page": {
180 "items": [{
181 "lang": str,
182 "src": validate.url(),
183 }],
184 },
185 },
186 validate.get(("page", "items")),
187 ),
188 )
189 if subs:
190 subtitles = {
191 s["lang"]: HTTPStream(self.session, update_scheme("https://", s["src"], force=True))
192 for s in subs
193 }
194 for quality, stream in streams:
195 yield quality, MuxedStream(self.session, stream, subtitles=subtitles)
196 return
197
198 yield from streams
199
200
201 __plugin__ = Rtve
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/rtve.py b/src/streamlink/plugins/rtve.py
--- a/src/streamlink/plugins/rtve.py
+++ b/src/streamlink/plugins/rtve.py
@@ -12,7 +12,7 @@
from typing import Iterator, Sequence, Tuple
from urllib.parse import urlparse
-from streamlink.plugin import Plugin, pluginargument, pluginmatcher
+from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.ffmpegmux import MuxedStream
from streamlink.stream.hls import HLSStream
@@ -29,7 +29,7 @@
def _iterate():
while True:
chunk = stream.read(1)
- if len(chunk) == 0: # pragma: no cover
+ if len(chunk) == 0:
return
yield ord(chunk)
@@ -39,7 +39,7 @@
res = []
for _ in range(num):
item = next(self._iterator, None)
- if item is None: # pragma: no cover
+ if item is None:
break
res.append(item)
return res
@@ -63,6 +63,14 @@
self.skip(4)
return chunktype, chunkdata
+ def __iter__(self):
+ self.skip(8)
+ while True:
+ try:
+ yield self.read_chunk()
+ except ValueError:
+ return
+
class ZTNR:
@staticmethod
@@ -106,17 +114,16 @@
@classmethod
def translate(cls, data: str) -> Iterator[Tuple[str, str]]:
reader = Base64Reader(data.replace("\n", ""))
- reader.skip(8)
- chunk_type, chunk_data = reader.read_chunk()
- while chunk_type != "IEND":
+ for chunk_type, chunk_data in reader:
+ if chunk_type == "IEND":
+ break
if chunk_type == "tEXt":
content = "".join(chr(item) for item in chunk_data if item > 0)
- if "#" not in content or "%%" not in content: # pragma: no cover
+ if "#" not in content or "%%" not in content:
continue
alphabet, content = content.split("#", 1)
quality, content = content.split("%%", 1)
yield quality, cls._get_source(alphabet, content)
- chunk_type, chunk_data = reader.read_chunk()
@pluginmatcher(re.compile(
@@ -147,18 +154,19 @@
return
# check obfuscated stream URLs via self.URL_VIDEOS and ZTNR.translate() first
- # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in same cases
- urls = self.session.http.get(
- self.URL_VIDEOS.format(id=self.id),
- schema=validate.Schema(
- validate.transform(ZTNR.translate),
- validate.transform(list),
- [(str, validate.url())],
- ),
- )
-
- # then fall back to self.URL_M3U8
- if not urls:
+ # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in some cases
+ try:
+ urls = self.session.http.get(
+ self.URL_VIDEOS.format(id=self.id),
+ schema=validate.Schema(
+ validate.transform(ZTNR.translate),
+ validate.transform(list),
+ [(str, validate.url())],
+ validate.length(1),
+ ),
+ )
+ except PluginError:
+ # catch HTTP errors and validation errors, and fall back to generic HLS URL template
url = self.URL_M3U8.format(id=self.id)
else:
url = next((url for _, url in urls if urlparse(url).path.endswith(".m3u8")), None)
| {"golden_diff": "diff --git a/src/streamlink/plugins/rtve.py b/src/streamlink/plugins/rtve.py\n--- a/src/streamlink/plugins/rtve.py\n+++ b/src/streamlink/plugins/rtve.py\n@@ -12,7 +12,7 @@\n from typing import Iterator, Sequence, Tuple\n from urllib.parse import urlparse\n \n-from streamlink.plugin import Plugin, pluginargument, pluginmatcher\n+from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher\n from streamlink.plugin.api import validate\n from streamlink.stream.ffmpegmux import MuxedStream\n from streamlink.stream.hls import HLSStream\n@@ -29,7 +29,7 @@\n def _iterate():\n while True:\n chunk = stream.read(1)\n- if len(chunk) == 0: # pragma: no cover\n+ if len(chunk) == 0:\n return\n yield ord(chunk)\n \n@@ -39,7 +39,7 @@\n res = []\n for _ in range(num):\n item = next(self._iterator, None)\n- if item is None: # pragma: no cover\n+ if item is None:\n break\n res.append(item)\n return res\n@@ -63,6 +63,14 @@\n self.skip(4)\n return chunktype, chunkdata\n \n+ def __iter__(self):\n+ self.skip(8)\n+ while True:\n+ try:\n+ yield self.read_chunk()\n+ except ValueError:\n+ return\n+\n \n class ZTNR:\n @staticmethod\n@@ -106,17 +114,16 @@\n @classmethod\n def translate(cls, data: str) -> Iterator[Tuple[str, str]]:\n reader = Base64Reader(data.replace(\"\\n\", \"\"))\n- reader.skip(8)\n- chunk_type, chunk_data = reader.read_chunk()\n- while chunk_type != \"IEND\":\n+ for chunk_type, chunk_data in reader:\n+ if chunk_type == \"IEND\":\n+ break\n if chunk_type == \"tEXt\":\n content = \"\".join(chr(item) for item in chunk_data if item > 0)\n- if \"#\" not in content or \"%%\" not in content: # pragma: no cover\n+ if \"#\" not in content or \"%%\" not in content:\n continue\n alphabet, content = content.split(\"#\", 1)\n quality, content = content.split(\"%%\", 1)\n yield quality, cls._get_source(alphabet, content)\n- chunk_type, chunk_data = reader.read_chunk()\n \n \n @pluginmatcher(re.compile(\n@@ -147,18 +154,19 @@\n return\n \n # check obfuscated stream URLs via self.URL_VIDEOS and ZTNR.translate() first\n- # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in same cases\n- urls = self.session.http.get(\n- self.URL_VIDEOS.format(id=self.id),\n- schema=validate.Schema(\n- validate.transform(ZTNR.translate),\n- validate.transform(list),\n- [(str, validate.url())],\n- ),\n- )\n-\n- # then fall back to self.URL_M3U8\n- if not urls:\n+ # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in some cases\n+ try:\n+ urls = self.session.http.get(\n+ self.URL_VIDEOS.format(id=self.id),\n+ schema=validate.Schema(\n+ validate.transform(ZTNR.translate),\n+ validate.transform(list),\n+ [(str, validate.url())],\n+ validate.length(1),\n+ ),\n+ )\n+ except PluginError:\n+ # catch HTTP errors and validation errors, and fall back to generic HLS URL template\n url = self.URL_M3U8.format(id=self.id)\n else:\n url = next((url for _, url in urls if urlparse(url).path.endswith(\".m3u8\")), None)\n", "issue": "plugins.rtve: ZTNR.translate() runs endlessly\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nRTVE plugin is not resolving any streams and stuck after message \"Found matching plugin rtve for URL\". Upon debugging in Python the following while block seems to cause an endless loop: https://github.com/streamlink/streamlink/blob/master/src/streamlink/plugins/rtve.py#L111\r\n\r\nThanks for the good work!\r\n \r\nCheers.\n\n### Debug log\n\n```text\nbin\\streamlink.exe -l debug https://rtve.es/play/videos/directo/canales-lineales/24h\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.10.7\r\n[cli][debug] Streamlink: 5.0.0\r\n[cli][debug] Dependencies:\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] websocket-client: 1.4.1\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://rtve.es/play/videos/directo/canales-lineales/24h\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin rtve for URL https://rtve.es/play/videos/directo/canales-lineales/24h\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels and video on-demand service from RTVE, a Spanish public, state-owned broadcaster.\n$url rtve.es\n$type live, vod\n$region Spain\n\"\"\"\n\nimport logging\nimport re\nfrom base64 import b64decode\nfrom io import BytesIO\nfrom typing import Iterator, Sequence, Tuple\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, pluginargument, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.ffmpegmux import MuxedStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.stream.http import HTTPStream\nfrom streamlink.utils.url import update_scheme\n\nlog = logging.getLogger(__name__)\n\n\nclass Base64Reader:\n def __init__(self, data: str):\n stream = BytesIO(b64decode(data))\n\n def _iterate():\n while True:\n chunk = stream.read(1)\n if len(chunk) == 0: # pragma: no cover\n return\n yield ord(chunk)\n\n self._iterator: Iterator[int] = _iterate()\n\n def read(self, num: int) -> Sequence[int]:\n res = []\n for _ in range(num):\n item = next(self._iterator, None)\n if item is None: # pragma: no cover\n break\n res.append(item)\n return res\n\n def skip(self, num: int) -> None:\n self.read(num)\n\n def read_chars(self, num: int) -> str:\n return \"\".join(chr(item) for item in self.read(num))\n\n def read_int(self) -> int:\n a, b, c, d = self.read(4)\n return a << 24 | b << 16 | c << 8 | d\n\n def read_chunk(self) -> Tuple[str, Sequence[int]]:\n size = self.read_int()\n chunktype = self.read_chars(4)\n chunkdata = self.read(size)\n if len(chunkdata) != size: # pragma: no cover\n raise ValueError(\"Invalid chunk length\")\n self.skip(4)\n return chunktype, chunkdata\n\n\nclass ZTNR:\n @staticmethod\n def _get_alphabet(text: str) -> str:\n res = []\n j = 0\n k = 0\n for char in text:\n if k > 0:\n k -= 1\n else:\n res.append(char)\n j = (j + 1) % 4\n k = j\n return \"\".join(res)\n\n @staticmethod\n def _get_url(text: str, alphabet: str) -> str:\n res = []\n j = 0\n n = 0\n k = 3\n cont = 0\n for char in text:\n if j == 0:\n n = int(char) * 10\n j = 1\n elif k > 0:\n k -= 1\n else:\n res.append(alphabet[n + int(char)])\n j = 0\n k = cont % 4\n cont += 1\n return \"\".join(res)\n\n @classmethod\n def _get_source(cls, alphabet: str, data: str) -> str:\n return cls._get_url(data, cls._get_alphabet(alphabet))\n\n @classmethod\n def translate(cls, data: str) -> Iterator[Tuple[str, str]]:\n reader = Base64Reader(data.replace(\"\\n\", \"\"))\n reader.skip(8)\n chunk_type, chunk_data = reader.read_chunk()\n while chunk_type != \"IEND\":\n if chunk_type == \"tEXt\":\n content = \"\".join(chr(item) for item in chunk_data if item > 0)\n if \"#\" not in content or \"%%\" not in content: # pragma: no cover\n continue\n alphabet, content = content.split(\"#\", 1)\n quality, content = content.split(\"%%\", 1)\n yield quality, cls._get_source(alphabet, content)\n chunk_type, chunk_data = reader.read_chunk()\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?rtve\\.es/play/videos/.+\"\n))\n@pluginargument(\n \"mux-subtitles\",\n is_global=True,\n)\nclass Rtve(Plugin):\n URL_M3U8 = \"https://ztnr.rtve.es/ztnr/{id}.m3u8\"\n URL_VIDEOS = \"https://ztnr.rtve.es/ztnr/movil/thumbnail/rtveplayw/videos/{id}.png?q=v2\"\n URL_SUBTITLES = \"https://www.rtve.es/api/videos/{id}/subtitulos.json\"\n\n def _get_streams(self):\n self.id = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\\bdata-setup='({.+?})'\", re.DOTALL),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n {\n \"idAsset\": validate.any(int, validate.all(str, validate.transform(int))),\n },\n validate.get(\"idAsset\"),\n ),\n ))\n if not self.id:\n return\n\n # check obfuscated stream URLs via self.URL_VIDEOS and ZTNR.translate() first\n # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in same cases\n urls = self.session.http.get(\n self.URL_VIDEOS.format(id=self.id),\n schema=validate.Schema(\n validate.transform(ZTNR.translate),\n validate.transform(list),\n [(str, validate.url())],\n ),\n )\n\n # then fall back to self.URL_M3U8\n if not urls:\n url = self.URL_M3U8.format(id=self.id)\n else:\n url = next((url for _, url in urls if urlparse(url).path.endswith(\".m3u8\")), None)\n if not url:\n url = next((url for _, url in urls if urlparse(url).path.endswith(\".mp4\")), None)\n if url:\n yield \"vod\", HTTPStream(self.session, url)\n return\n\n streams = HLSStream.parse_variant_playlist(self.session, url).items()\n\n if self.options.get(\"mux-subtitles\"):\n subs = self.session.http.get(\n self.URL_SUBTITLES.format(id=self.id),\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"page\": {\n \"items\": [{\n \"lang\": str,\n \"src\": validate.url(),\n }],\n },\n },\n validate.get((\"page\", \"items\")),\n ),\n )\n if subs:\n subtitles = {\n s[\"lang\"]: HTTPStream(self.session, update_scheme(\"https://\", s[\"src\"], force=True))\n for s in subs\n }\n for quality, stream in streams:\n yield quality, MuxedStream(self.session, stream, subtitles=subtitles)\n return\n\n yield from streams\n\n\n__plugin__ = Rtve\n", "path": "src/streamlink/plugins/rtve.py"}], "after_files": [{"content": "\"\"\"\n$description Live TV channels and video on-demand service from RTVE, a Spanish public, state-owned broadcaster.\n$url rtve.es\n$type live, vod\n$region Spain\n\"\"\"\n\nimport logging\nimport re\nfrom base64 import b64decode\nfrom io import BytesIO\nfrom typing import Iterator, Sequence, Tuple\nfrom urllib.parse import urlparse\n\nfrom streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.ffmpegmux import MuxedStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.stream.http import HTTPStream\nfrom streamlink.utils.url import update_scheme\n\nlog = logging.getLogger(__name__)\n\n\nclass Base64Reader:\n def __init__(self, data: str):\n stream = BytesIO(b64decode(data))\n\n def _iterate():\n while True:\n chunk = stream.read(1)\n if len(chunk) == 0:\n return\n yield ord(chunk)\n\n self._iterator: Iterator[int] = _iterate()\n\n def read(self, num: int) -> Sequence[int]:\n res = []\n for _ in range(num):\n item = next(self._iterator, None)\n if item is None:\n break\n res.append(item)\n return res\n\n def skip(self, num: int) -> None:\n self.read(num)\n\n def read_chars(self, num: int) -> str:\n return \"\".join(chr(item) for item in self.read(num))\n\n def read_int(self) -> int:\n a, b, c, d = self.read(4)\n return a << 24 | b << 16 | c << 8 | d\n\n def read_chunk(self) -> Tuple[str, Sequence[int]]:\n size = self.read_int()\n chunktype = self.read_chars(4)\n chunkdata = self.read(size)\n if len(chunkdata) != size: # pragma: no cover\n raise ValueError(\"Invalid chunk length\")\n self.skip(4)\n return chunktype, chunkdata\n\n def __iter__(self):\n self.skip(8)\n while True:\n try:\n yield self.read_chunk()\n except ValueError:\n return\n\n\nclass ZTNR:\n @staticmethod\n def _get_alphabet(text: str) -> str:\n res = []\n j = 0\n k = 0\n for char in text:\n if k > 0:\n k -= 1\n else:\n res.append(char)\n j = (j + 1) % 4\n k = j\n return \"\".join(res)\n\n @staticmethod\n def _get_url(text: str, alphabet: str) -> str:\n res = []\n j = 0\n n = 0\n k = 3\n cont = 0\n for char in text:\n if j == 0:\n n = int(char) * 10\n j = 1\n elif k > 0:\n k -= 1\n else:\n res.append(alphabet[n + int(char)])\n j = 0\n k = cont % 4\n cont += 1\n return \"\".join(res)\n\n @classmethod\n def _get_source(cls, alphabet: str, data: str) -> str:\n return cls._get_url(data, cls._get_alphabet(alphabet))\n\n @classmethod\n def translate(cls, data: str) -> Iterator[Tuple[str, str]]:\n reader = Base64Reader(data.replace(\"\\n\", \"\"))\n for chunk_type, chunk_data in reader:\n if chunk_type == \"IEND\":\n break\n if chunk_type == \"tEXt\":\n content = \"\".join(chr(item) for item in chunk_data if item > 0)\n if \"#\" not in content or \"%%\" not in content:\n continue\n alphabet, content = content.split(\"#\", 1)\n quality, content = content.split(\"%%\", 1)\n yield quality, cls._get_source(alphabet, content)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?rtve\\.es/play/videos/.+\"\n))\n@pluginargument(\n \"mux-subtitles\",\n is_global=True,\n)\nclass Rtve(Plugin):\n URL_M3U8 = \"https://ztnr.rtve.es/ztnr/{id}.m3u8\"\n URL_VIDEOS = \"https://ztnr.rtve.es/ztnr/movil/thumbnail/rtveplayw/videos/{id}.png?q=v2\"\n URL_SUBTITLES = \"https://www.rtve.es/api/videos/{id}/subtitulos.json\"\n\n def _get_streams(self):\n self.id = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\\bdata-setup='({.+?})'\", re.DOTALL),\n validate.none_or_all(\n validate.get(1),\n validate.parse_json(),\n {\n \"idAsset\": validate.any(int, validate.all(str, validate.transform(int))),\n },\n validate.get(\"idAsset\"),\n ),\n ))\n if not self.id:\n return\n\n # check obfuscated stream URLs via self.URL_VIDEOS and ZTNR.translate() first\n # self.URL_M3U8 appears to be valid for all streams, but doesn't provide any content in some cases\n try:\n urls = self.session.http.get(\n self.URL_VIDEOS.format(id=self.id),\n schema=validate.Schema(\n validate.transform(ZTNR.translate),\n validate.transform(list),\n [(str, validate.url())],\n validate.length(1),\n ),\n )\n except PluginError:\n # catch HTTP errors and validation errors, and fall back to generic HLS URL template\n url = self.URL_M3U8.format(id=self.id)\n else:\n url = next((url for _, url in urls if urlparse(url).path.endswith(\".m3u8\")), None)\n if not url:\n url = next((url for _, url in urls if urlparse(url).path.endswith(\".mp4\")), None)\n if url:\n yield \"vod\", HTTPStream(self.session, url)\n return\n\n streams = HLSStream.parse_variant_playlist(self.session, url).items()\n\n if self.options.get(\"mux-subtitles\"):\n subs = self.session.http.get(\n self.URL_SUBTITLES.format(id=self.id),\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"page\": {\n \"items\": [{\n \"lang\": str,\n \"src\": validate.url(),\n }],\n },\n },\n validate.get((\"page\", \"items\")),\n ),\n )\n if subs:\n subtitles = {\n s[\"lang\"]: HTTPStream(self.session, update_scheme(\"https://\", s[\"src\"], force=True))\n for s in subs\n }\n for quality, stream in streams:\n yield quality, MuxedStream(self.session, stream, subtitles=subtitles)\n return\n\n yield from streams\n\n\n__plugin__ = Rtve\n", "path": "src/streamlink/plugins/rtve.py"}]} | 2,782 | 877 |
gh_patches_debug_41609 | rasdani/github-patches | git_diff | getnikola__nikola-1292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
render fails if the theme has a code.css
The `conf.py` says:
```
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
```
I've [provided one](https://github.com/mgaitan/my-nikola-theme/commit/f0140d0d67771d7ee9f46df2c78548c0e757f65e) but then I can't render my site
```
(blog)tin@morochita:~/lab/blog$ nikola build
Scanning posts.....done!
ERROR: Two different tasks can't have a common target.'output/assets/css/code.css' is a target for copy_files:output/assets/css/code.css and copy_assets:output/assets/css/code.css.
(blog)tin@morochita:~/lab/blog$
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/copy_assets.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 import codecs
28 import os
29
30 from nikola.plugin_categories import Task
31 from nikola import utils
32
33
34 class CopyAssets(Task):
35 """Copy theme assets into output."""
36
37 name = "copy_assets"
38
39 def gen_tasks(self):
40 """Create tasks to copy the assets of the whole theme chain.
41
42 If a file is present on two themes, use the version
43 from the "youngest" theme.
44 """
45
46 kw = {
47 "themes": self.site.THEMES,
48 "output_folder": self.site.config['OUTPUT_FOLDER'],
49 "filters": self.site.config['FILTERS'],
50 "code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
51 "code.css_selectors": 'pre.code',
52 "code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
53 }
54 has_code_css = False
55 tasks = {}
56 code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
57
58 yield self.group_task()
59
60 for theme_name in kw['themes']:
61 src = os.path.join(utils.get_theme_path(theme_name), 'assets')
62 dst = os.path.join(kw['output_folder'], 'assets')
63 for task in utils.copy_tree(src, dst):
64 if task['name'] in tasks:
65 continue
66 has_code_css = task['targets'][0] == code_css_path
67 tasks[task['name']] = task
68 task['uptodate'] = [utils.config_changed(kw)]
69 task['basename'] = self.name
70 yield utils.apply_filters(task, kw['filters'])
71
72 if not has_code_css: # Generate it
73
74 def create_code_css():
75 from pygments.formatters import get_formatter_by_name
76 formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
77 utils.makedirs(os.path.dirname(code_css_path))
78 with codecs.open(code_css_path, 'wb+', 'utf8') as outf:
79 outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
80 outf.write(kw["code.css_close"])
81
82 task = {
83 'basename': self.name,
84 'name': code_css_path,
85 'targets': [code_css_path],
86 'uptodate': [utils.config_changed(kw)],
87 'actions': [(create_code_css, [])],
88 'clean': True,
89 }
90 yield utils.apply_filters(task, kw['filters'])
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py
--- a/nikola/plugins/task/copy_assets.py
+++ b/nikola/plugins/task/copy_assets.py
@@ -45,15 +45,21 @@
kw = {
"themes": self.site.THEMES,
+ "files_folders": self.site.config['FILES_FOLDERS'],
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
"code.css_selectors": 'pre.code',
+ "code.css_head": '/* code.css file generated by Nikola */\n',
"code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
}
- has_code_css = False
tasks = {}
code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
+ code_css_input = utils.get_asset_path('assets/css/code.css',
+ themes=kw['themes'],
+ files_folders=kw['files_folders'])
+
+ kw["code.css_input"] = code_css_input
yield self.group_task()
@@ -63,27 +69,35 @@
for task in utils.copy_tree(src, dst):
if task['name'] in tasks:
continue
- has_code_css = task['targets'][0] == code_css_path
tasks[task['name']] = task
task['uptodate'] = [utils.config_changed(kw)]
task['basename'] = self.name
+ if code_css_input:
+ task['file_dep'] = [code_css_input]
yield utils.apply_filters(task, kw['filters'])
- if not has_code_css: # Generate it
-
+ # Check whether or not there is a code.css file around.
+ if not code_css_input:
def create_code_css():
from pygments.formatters import get_formatter_by_name
formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
utils.makedirs(os.path.dirname(code_css_path))
with codecs.open(code_css_path, 'wb+', 'utf8') as outf:
+ outf.write(kw["code.css_head"])
outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
outf.write(kw["code.css_close"])
+ if os.path.exists(code_css_path):
+ with codecs.open(code_css_path, 'r', 'utf-8') as fh:
+ testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"]
+ else:
+ testcontents = False
+
task = {
'basename': self.name,
'name': code_css_path,
'targets': [code_css_path],
- 'uptodate': [utils.config_changed(kw)],
+ 'uptodate': [utils.config_changed(kw), testcontents],
'actions': [(create_code_css, [])],
'clean': True,
}
| {"golden_diff": "diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py\n--- a/nikola/plugins/task/copy_assets.py\n+++ b/nikola/plugins/task/copy_assets.py\n@@ -45,15 +45,21 @@\n \n kw = {\n \"themes\": self.site.THEMES,\n+ \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n+ \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n- has_code_css = False\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n+ code_css_input = utils.get_asset_path('assets/css/code.css',\n+ themes=kw['themes'],\n+ files_folders=kw['files_folders'])\n+\n+ kw[\"code.css_input\"] = code_css_input\n \n yield self.group_task()\n \n@@ -63,27 +69,35 @@\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n- has_code_css = task['targets'][0] == code_css_path\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n+ if code_css_input:\n+ task['file_dep'] = [code_css_input]\n yield utils.apply_filters(task, kw['filters'])\n \n- if not has_code_css: # Generate it\n-\n+ # Check whether or not there is a code.css file around.\n+ if not code_css_input:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n+ outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n \n+ if os.path.exists(code_css_path):\n+ with codecs.open(code_css_path, 'r', 'utf-8') as fh:\n+ testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n+ else:\n+ testcontents = False\n+\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n- 'uptodate': [utils.config_changed(kw)],\n+ 'uptodate': [utils.config_changed(kw), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n", "issue": "render fails if the theme has a code.css \nThe `conf.py` says: \n\n```\n# Color scheme to be used for code blocks. If your theme provides\n# \"assets/css/code.css\" this is ignored.\n```\n\nI've [provided one](https://github.com/mgaitan/my-nikola-theme/commit/f0140d0d67771d7ee9f46df2c78548c0e757f65e) but then I can't render my site\n\n```\n(blog)tin@morochita:~/lab/blog$ nikola build\nScanning posts.....done!\nERROR: Two different tasks can't have a common target.'output/assets/css/code.css' is a target for copy_files:output/assets/css/code.css and copy_assets:output/assets/css/code.css.\n(blog)tin@morochita:~/lab/blog$ \n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport codecs\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n\n kw = {\n \"themes\": self.site.THEMES,\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n has_code_css = False\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n\n yield self.group_task()\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n has_code_css = task['targets'][0] == code_css_path\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n yield utils.apply_filters(task, kw['filters'])\n\n if not has_code_css: # Generate it\n\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw)],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport codecs\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n\n kw = {\n \"themes\": self.site.THEMES,\n \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n code_css_input = utils.get_asset_path('assets/css/code.css',\n themes=kw['themes'],\n files_folders=kw['files_folders'])\n\n kw[\"code.css_input\"] = code_css_input\n\n yield self.group_task()\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n if code_css_input:\n task['file_dep'] = [code_css_input]\n yield utils.apply_filters(task, kw['filters'])\n\n # Check whether or not there is a code.css file around.\n if not code_css_input:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n if os.path.exists(code_css_path):\n with codecs.open(code_css_path, 'r', 'utf-8') as fh:\n testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n else:\n testcontents = False\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}]} | 1,425 | 689 |
gh_patches_debug_35953 | rasdani/github-patches | git_diff | azavea__raster-vision-272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
More efficient chipping strategy
In `make_training_chips` for object detection, we create a chip for each object. This means that in a crowded area (like a parking lot), we create many chips that only vary slightly for that area. This is good from a data augmentation standpoint, since it's effectively adding random translations. But, we are also wasting time when training since so many of the images are so similar. This might explain why it takes us so much longer to train an object detection model than a classification model.
We can avoid this by using the same strategy except that we keep track of which objects have appeared entirely in a chip. If an object has already appeared in a chip, then we don't need to create a chip for it. That way, the parking lot (or similar) only gets into a few chips instead of 50.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/rastervision/label_stores/utils.py`
Content:
```
1 import copy
2
3
4 def add_classes_to_geojson(geojson, class_map):
5 """Add missing class_names and class_ids from label GeoJSON."""
6 geojson = copy.deepcopy(geojson)
7 features = geojson['features']
8
9 for feature in features:
10 properties = feature.get('properties', {})
11 if 'class_id' not in properties:
12 if 'class_name' in properties:
13 properties['class_id'] = \
14 class_map.get_by_name(properties['class_name']).id
15 elif 'label' in properties:
16 # label is considered a synonym of class_name for now in order
17 # to interface with Raster Foundry.
18 properties['class_id'] = \
19 class_map.get_by_name(properties['label']).id
20 properties['class_name'] = properties['label']
21 else:
22 # if no class_id, class_name, or label, then just assume
23 # everything corresponds to class_id = 1.
24 class_id = 1
25 class_name = class_map.get_by_id(class_id).name
26 properties['class_id'] = class_id
27 properties['class_name'] = class_name
28
29 return geojson
30
```
Path: `src/rastervision/ml_tasks/object_detection.py`
Content:
```
1 import numpy as np
2
3 from object_detection.utils import visualization_utils as vis_util
4
5 from rastervision.core.ml_task import MLTask
6 from rastervision.evaluations.object_detection_evaluation import (
7 ObjectDetectionEvaluation)
8 from rastervision.utils.misc import save_img
9
10
11 def save_debug_image(im, labels, class_map, output_path):
12 npboxes = labels.get_npboxes()
13 class_ids = labels.get_class_ids()
14 scores = labels.get_scores()
15 if scores is None:
16 scores = [1.0] * len(labels)
17
18 vis_util.visualize_boxes_and_labels_on_image_array(
19 im, npboxes, class_ids, scores,
20 class_map.get_category_index(), use_normalized_coordinates=True,
21 line_thickness=2, max_boxes_to_draw=None)
22 save_img(im, output_path)
23
24
25 def _make_chip_pos_windows(image_extent, label_store, options):
26 chip_size = options.chip_size
27 pos_windows = []
28 for box in label_store.get_all_labels().get_boxes():
29 window = box.make_random_square_container(
30 image_extent.get_width(), image_extent.get_height(), chip_size)
31 pos_windows.append(window)
32
33 return pos_windows
34
35
36 def _make_label_pos_windows(image_extent, label_store, options):
37 label_buffer = options.object_detection_options.label_buffer
38 pos_windows = []
39 for box in label_store.get_all_labels().get_boxes():
40 window = box.make_buffer(label_buffer, image_extent)
41 pos_windows.append(window)
42
43 return pos_windows
44
45
46 def make_pos_windows(image_extent, label_store, options):
47 window_method = options.object_detection_options.window_method
48
49 if window_method == 'label':
50 return _make_label_pos_windows(image_extent, label_store, options)
51 elif window_method == 'image':
52 return [image_extent.make_copy()]
53 else:
54 return _make_chip_pos_windows(image_extent, label_store, options)
55
56
57 def make_neg_windows(raster_source, label_store, chip_size, nb_windows,
58 max_attempts):
59 extent = raster_source.get_extent()
60 neg_windows = []
61 for _ in range(max_attempts):
62 window = extent.make_random_square(chip_size)
63 chip = raster_source.get_chip(window)
64 labels = label_store.get_labels(
65 window, ioa_thresh=0.2)
66
67 # If no labels and not blank, append the chip
68 if len(labels) == 0 and np.sum(chip.ravel()) > 0:
69 neg_windows.append(window)
70
71 if len(neg_windows) == nb_windows:
72 break
73
74 return neg_windows
75
76
77 class ObjectDetection(MLTask):
78 def get_train_windows(self, scene, options):
79 raster_source = scene.raster_source
80 label_store = scene.ground_truth_label_store
81 # Make positive windows which contain labels.
82 pos_windows = make_pos_windows(
83 raster_source.get_extent(), label_store, options)
84 nb_pos_windows = len(pos_windows)
85
86 # Make negative windows which do not contain labels.
87 # Generate randow windows and save the ones that don't contain
88 # any labels. It may take many attempts to generate a single
89 # negative window, and could get into an infinite loop in some cases,
90 # so we cap the number of attempts.
91 if nb_pos_windows:
92 nb_neg_windows = round(
93 options.object_detection_options.neg_ratio * nb_pos_windows)
94 else:
95 nb_neg_windows = 100 # just make some
96 max_attempts = 100 * nb_neg_windows
97 neg_windows = make_neg_windows(
98 raster_source, label_store, options.chip_size,
99 nb_neg_windows, max_attempts)
100
101 return pos_windows + neg_windows
102
103 def get_train_labels(self, window, scene, options):
104 return scene.ground_truth_label_store.get_labels(
105 window, ioa_thresh=options.object_detection_options.ioa_thresh)
106
107 def get_predict_windows(self, extent, options):
108 chip_size = options.chip_size
109 stride = chip_size // 2
110 return extent.get_windows(chip_size, stride)
111
112 def get_evaluation(self):
113 return ObjectDetectionEvaluation()
114
115 def save_debug_predict_image(self, scene, debug_dir_uri):
116 # TODO implement this
117 pass
118
```
Path: `src/rastervision/labels/object_detection_labels.py`
Content:
```
1 import numpy as np
2
3 from object_detection.utils.np_box_list import BoxList
4 from object_detection.utils.np_box_list_ops import (
5 prune_non_overlapping_boxes, clip_to_window, change_coordinate_frame,
6 concatenate, scale, multi_class_non_max_suppression, _copy_extra_fields)
7
8 from rastervision.core.box import Box
9 from rastervision.core.labels import Labels
10 from rastervision.labels.utils import boxes_to_geojson
11
12
13 def geojson_to_labels(geojson, crs_transformer, extent):
14 """Extract boxes and related info from GeoJSON file."""
15 features = geojson['features']
16 boxes = []
17 class_ids = []
18 scores = []
19
20 for feature in features:
21 # Convert polygon to pixel coords and then convert to bounding box.
22 polygon = feature['geometry']['coordinates'][0]
23 polygon = [crs_transformer.web_to_pixel(p) for p in polygon]
24 xmin, ymin = np.min(polygon, axis=0)
25 xmax, ymax = np.max(polygon, axis=0)
26 boxes.append(Box(ymin, xmin, ymax, xmax))
27
28 properties = feature['properties']
29 class_ids.append(properties['class_id'])
30 scores.append(properties.get('score', 1.0))
31
32 boxes = np.array([box.npbox_format() for box in boxes], dtype=float)
33 class_ids = np.array(class_ids)
34 scores = np.array(scores)
35 labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)
36 labels = labels.get_intersection(extent)
37 return labels
38
39
40 def inverse_change_coordinate_frame(boxlist, window):
41 scaled_boxlist = scale(boxlist, window.get_height(), window.get_width())
42 npboxes = np.round(scaled_boxlist.get())
43 npboxes += [window.ymin, window.xmin, window.ymin, window.xmin]
44 boxlist_new = BoxList(npboxes)
45 _copy_extra_fields(boxlist_new, boxlist)
46 return boxlist_new
47
48
49 class ObjectDetectionLabels(Labels):
50 def __init__(self, npboxes, class_ids, scores=None):
51 self.boxlist = BoxList(npboxes)
52 # This field name actually needs to be 'classes' to be able to use
53 # certain utility functions in the TF Object Detection API.
54 self.boxlist.add_field('classes', class_ids)
55 if scores is not None:
56 self.boxlist.add_field('scores', scores)
57
58 @staticmethod
59 def from_boxlist(boxlist):
60 scores = boxlist.get_field('scores') \
61 if boxlist.has_field('scores') else None
62 return ObjectDetectionLabels(
63 boxlist.get(), boxlist.get_field('classes'), scores)
64
65 @staticmethod
66 def from_geojson(geojson, crs_transformer, extent):
67 return geojson_to_labels(geojson, crs_transformer, extent)
68
69 @staticmethod
70 def make_empty():
71 npboxes = np.empty((0, 4))
72 labels = np.empty((0,))
73 scores = np.empty((0,))
74 return ObjectDetectionLabels(npboxes, labels, scores)
75
76 def get_subwindow(self, window, ioa_thresh=1.0):
77 """Returns boxes relative to window.
78
79 This returns the boxes that overlap enough with window, clipped to
80 the window and in relative coordinates that lie between 0 and 1.
81 """
82 window_npbox = window.npbox_format()
83 window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))
84 boxlist = prune_non_overlapping_boxes(
85 self.boxlist, window_boxlist, minoverlap=ioa_thresh)
86 boxlist = clip_to_window(boxlist, window_npbox)
87 boxlist = change_coordinate_frame(boxlist, window_npbox)
88 return ObjectDetectionLabels.from_boxlist(boxlist)
89
90 def get_boxes(self):
91 return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]
92
93 def get_intersection(self, window):
94 """Returns list of boxes that intersect with window.
95
96 Does not clip or perform coordinate transform.
97 """
98 window_npbox = window.npbox_format()
99 window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))
100 boxlist = prune_non_overlapping_boxes(
101 self.boxlist, window_boxlist, minoverlap=0.000001)
102 return ObjectDetectionLabels.from_boxlist(boxlist)
103
104 def get_coordinates(self):
105 return self.boxlist.get_coordinates()
106
107 def get_npboxes(self):
108 return self.boxlist.get()
109
110 def get_scores(self):
111 if self.boxlist.has_field('scores'):
112 return self.boxlist.get_field('scores')
113 return None
114
115 def get_class_ids(self):
116 return self.boxlist.get_field('classes')
117
118 def __len__(self):
119 return self.boxlist.get().shape[0]
120
121 def __str__(self):
122 return str(self.boxlist.get())
123
124 def concatenate(self, window, labels):
125 boxlist_new = concatenate([
126 self.boxlist,
127 inverse_change_coordinate_frame(labels.boxlist, window)])
128 return ObjectDetectionLabels.from_boxlist(boxlist_new)
129
130 def prune_duplicates(self, score_thresh, merge_thresh):
131 max_output_size = 1000000
132
133 # Create a copy of self.boxlist that has a 2D scores
134 # field with a column for each class which is required
135 # by the multi_class_non_max_suppression function. It's
136 # suprising that the scores field has to be in this form since
137 # I haven't seen other functions require that.
138 boxlist = BoxList(self.boxlist.get())
139 classes = self.boxlist.get_field('classes').astype(np.int32)
140 nb_boxes = classes.shape[0]
141 nb_classes = np.max(classes)
142 class_inds = classes - 1
143 scores_1d = self.boxlist.get_field('scores')
144 scores_2d = np.zeros((nb_boxes, nb_classes))
145 # Not sure how to vectorize this so just do for loop :(
146 for box_ind in range(nb_boxes):
147 scores_2d[box_ind, class_inds[box_ind]] = scores_1d[box_ind]
148 boxlist.add_field('scores', scores_2d)
149
150 pruned_boxlist = multi_class_non_max_suppression(
151 boxlist, score_thresh, merge_thresh, max_output_size)
152 # Add one because multi_class_nms outputs labels that start at zero
153 # instead of one like in the rest of the system.
154 class_ids = pruned_boxlist.get_field('classes')
155 class_ids += 1
156 return ObjectDetectionLabels.from_boxlist(pruned_boxlist)
157
158 def to_geojson(self, crs_transformer, class_map):
159 boxes = self.get_boxes()
160 class_ids = self.get_class_ids().tolist()
161 scores = self.get_scores().tolist()
162
163 return boxes_to_geojson(boxes, class_ids, crs_transformer, class_map,
164 scores=scores)
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/rastervision/label_stores/utils.py b/src/rastervision/label_stores/utils.py
--- a/src/rastervision/label_stores/utils.py
+++ b/src/rastervision/label_stores/utils.py
@@ -26,4 +26,6 @@
properties['class_id'] = class_id
properties['class_name'] = class_name
+ feature['properties'] = properties
+
return geojson
diff --git a/src/rastervision/labels/object_detection_labels.py b/src/rastervision/labels/object_detection_labels.py
--- a/src/rastervision/labels/object_detection_labels.py
+++ b/src/rastervision/labels/object_detection_labels.py
@@ -90,7 +90,7 @@
def get_boxes(self):
return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]
- def get_intersection(self, window):
+ def get_intersection(self, window, min_ioa=0.000001):
"""Returns list of boxes that intersect with window.
Does not clip or perform coordinate transform.
@@ -98,7 +98,7 @@
window_npbox = window.npbox_format()
window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))
boxlist = prune_non_overlapping_boxes(
- self.boxlist, window_boxlist, minoverlap=0.000001)
+ self.boxlist, window_boxlist, minoverlap=min_ioa)
return ObjectDetectionLabels.from_boxlist(boxlist)
def get_coordinates(self):
diff --git a/src/rastervision/ml_tasks/object_detection.py b/src/rastervision/ml_tasks/object_detection.py
--- a/src/rastervision/ml_tasks/object_detection.py
+++ b/src/rastervision/ml_tasks/object_detection.py
@@ -25,10 +25,22 @@
def _make_chip_pos_windows(image_extent, label_store, options):
chip_size = options.chip_size
pos_windows = []
- for box in label_store.get_all_labels().get_boxes():
- window = box.make_random_square_container(
- image_extent.get_width(), image_extent.get_height(), chip_size)
- pos_windows.append(window)
+ boxes = label_store.get_all_labels().get_boxes()
+ done_boxes = set()
+
+ # Get a random window around each box. If a box was previously included
+ # in a window, then it is skipped.
+ for box in boxes:
+ if box.tuple_format() not in done_boxes:
+ window = box.make_random_square_container(
+ image_extent.get_width(), image_extent.get_height(), chip_size)
+ pos_windows.append(window)
+
+ # Get boxes that lie completely within window
+ window_boxes = label_store.get_all_labels().get_intersection(
+ window, min_ioa=1.0).get_boxes()
+ window_boxes = [box.tuple_format() for box in window_boxes]
+ done_boxes.update(window_boxes)
return pos_windows
| {"golden_diff": "diff --git a/src/rastervision/label_stores/utils.py b/src/rastervision/label_stores/utils.py\n--- a/src/rastervision/label_stores/utils.py\n+++ b/src/rastervision/label_stores/utils.py\n@@ -26,4 +26,6 @@\n properties['class_id'] = class_id\n properties['class_name'] = class_name\n \n+ feature['properties'] = properties\n+\n return geojson\ndiff --git a/src/rastervision/labels/object_detection_labels.py b/src/rastervision/labels/object_detection_labels.py\n--- a/src/rastervision/labels/object_detection_labels.py\n+++ b/src/rastervision/labels/object_detection_labels.py\n@@ -90,7 +90,7 @@\n def get_boxes(self):\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]\n \n- def get_intersection(self, window):\n+ def get_intersection(self, window, min_ioa=0.000001):\n \"\"\"Returns list of boxes that intersect with window.\n \n Does not clip or perform coordinate transform.\n@@ -98,7 +98,7 @@\n window_npbox = window.npbox_format()\n window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))\n boxlist = prune_non_overlapping_boxes(\n- self.boxlist, window_boxlist, minoverlap=0.000001)\n+ self.boxlist, window_boxlist, minoverlap=min_ioa)\n return ObjectDetectionLabels.from_boxlist(boxlist)\n \n def get_coordinates(self):\ndiff --git a/src/rastervision/ml_tasks/object_detection.py b/src/rastervision/ml_tasks/object_detection.py\n--- a/src/rastervision/ml_tasks/object_detection.py\n+++ b/src/rastervision/ml_tasks/object_detection.py\n@@ -25,10 +25,22 @@\n def _make_chip_pos_windows(image_extent, label_store, options):\n chip_size = options.chip_size\n pos_windows = []\n- for box in label_store.get_all_labels().get_boxes():\n- window = box.make_random_square_container(\n- image_extent.get_width(), image_extent.get_height(), chip_size)\n- pos_windows.append(window)\n+ boxes = label_store.get_all_labels().get_boxes()\n+ done_boxes = set()\n+\n+ # Get a random window around each box. If a box was previously included\n+ # in a window, then it is skipped.\n+ for box in boxes:\n+ if box.tuple_format() not in done_boxes:\n+ window = box.make_random_square_container(\n+ image_extent.get_width(), image_extent.get_height(), chip_size)\n+ pos_windows.append(window)\n+\n+ # Get boxes that lie completely within window\n+ window_boxes = label_store.get_all_labels().get_intersection(\n+ window, min_ioa=1.0).get_boxes()\n+ window_boxes = [box.tuple_format() for box in window_boxes]\n+ done_boxes.update(window_boxes)\n \n return pos_windows\n", "issue": "More efficient chipping strategy\nIn `make_training_chips` for object detection, we create a chip for each object. This means that in a crowded area (like a parking lot), we create many chips that only vary slightly for that area. This is good from a data augmentation standpoint, since it's effectively adding random translations. But, we are also wasting time when training since so many of the images are so similar. This might explain why it takes us so much longer to train an object detection model than a classification model. \r\n\r\nWe can avoid this by using the same strategy except that we keep track of which objects have appeared entirely in a chip. If an object has already appeared in a chip, then we don't need to create a chip for it. That way, the parking lot (or similar) only gets into a few chips instead of 50.\n", "before_files": [{"content": "import copy\n\n\ndef add_classes_to_geojson(geojson, class_map):\n \"\"\"Add missing class_names and class_ids from label GeoJSON.\"\"\"\n geojson = copy.deepcopy(geojson)\n features = geojson['features']\n\n for feature in features:\n properties = feature.get('properties', {})\n if 'class_id' not in properties:\n if 'class_name' in properties:\n properties['class_id'] = \\\n class_map.get_by_name(properties['class_name']).id\n elif 'label' in properties:\n # label is considered a synonym of class_name for now in order\n # to interface with Raster Foundry.\n properties['class_id'] = \\\n class_map.get_by_name(properties['label']).id\n properties['class_name'] = properties['label']\n else:\n # if no class_id, class_name, or label, then just assume\n # everything corresponds to class_id = 1.\n class_id = 1\n class_name = class_map.get_by_id(class_id).name\n properties['class_id'] = class_id\n properties['class_name'] = class_name\n\n return geojson\n", "path": "src/rastervision/label_stores/utils.py"}, {"content": "import numpy as np\n\nfrom object_detection.utils import visualization_utils as vis_util\n\nfrom rastervision.core.ml_task import MLTask\nfrom rastervision.evaluations.object_detection_evaluation import (\n ObjectDetectionEvaluation)\nfrom rastervision.utils.misc import save_img\n\n\ndef save_debug_image(im, labels, class_map, output_path):\n npboxes = labels.get_npboxes()\n class_ids = labels.get_class_ids()\n scores = labels.get_scores()\n if scores is None:\n scores = [1.0] * len(labels)\n\n vis_util.visualize_boxes_and_labels_on_image_array(\n im, npboxes, class_ids, scores,\n class_map.get_category_index(), use_normalized_coordinates=True,\n line_thickness=2, max_boxes_to_draw=None)\n save_img(im, output_path)\n\n\ndef _make_chip_pos_windows(image_extent, label_store, options):\n chip_size = options.chip_size\n pos_windows = []\n for box in label_store.get_all_labels().get_boxes():\n window = box.make_random_square_container(\n image_extent.get_width(), image_extent.get_height(), chip_size)\n pos_windows.append(window)\n\n return pos_windows\n\n\ndef _make_label_pos_windows(image_extent, label_store, options):\n label_buffer = options.object_detection_options.label_buffer\n pos_windows = []\n for box in label_store.get_all_labels().get_boxes():\n window = box.make_buffer(label_buffer, image_extent)\n pos_windows.append(window)\n\n return pos_windows\n\n\ndef make_pos_windows(image_extent, label_store, options):\n window_method = options.object_detection_options.window_method\n\n if window_method == 'label':\n return _make_label_pos_windows(image_extent, label_store, options)\n elif window_method == 'image':\n return [image_extent.make_copy()]\n else:\n return _make_chip_pos_windows(image_extent, label_store, options)\n\n\ndef make_neg_windows(raster_source, label_store, chip_size, nb_windows,\n max_attempts):\n extent = raster_source.get_extent()\n neg_windows = []\n for _ in range(max_attempts):\n window = extent.make_random_square(chip_size)\n chip = raster_source.get_chip(window)\n labels = label_store.get_labels(\n window, ioa_thresh=0.2)\n\n # If no labels and not blank, append the chip\n if len(labels) == 0 and np.sum(chip.ravel()) > 0:\n neg_windows.append(window)\n\n if len(neg_windows) == nb_windows:\n break\n\n return neg_windows\n\n\nclass ObjectDetection(MLTask):\n def get_train_windows(self, scene, options):\n raster_source = scene.raster_source\n label_store = scene.ground_truth_label_store\n # Make positive windows which contain labels.\n pos_windows = make_pos_windows(\n raster_source.get_extent(), label_store, options)\n nb_pos_windows = len(pos_windows)\n\n # Make negative windows which do not contain labels.\n # Generate randow windows and save the ones that don't contain\n # any labels. It may take many attempts to generate a single\n # negative window, and could get into an infinite loop in some cases,\n # so we cap the number of attempts.\n if nb_pos_windows:\n nb_neg_windows = round(\n options.object_detection_options.neg_ratio * nb_pos_windows)\n else:\n nb_neg_windows = 100 # just make some\n max_attempts = 100 * nb_neg_windows\n neg_windows = make_neg_windows(\n raster_source, label_store, options.chip_size,\n nb_neg_windows, max_attempts)\n\n return pos_windows + neg_windows\n\n def get_train_labels(self, window, scene, options):\n return scene.ground_truth_label_store.get_labels(\n window, ioa_thresh=options.object_detection_options.ioa_thresh)\n\n def get_predict_windows(self, extent, options):\n chip_size = options.chip_size\n stride = chip_size // 2\n return extent.get_windows(chip_size, stride)\n\n def get_evaluation(self):\n return ObjectDetectionEvaluation()\n\n def save_debug_predict_image(self, scene, debug_dir_uri):\n # TODO implement this\n pass\n", "path": "src/rastervision/ml_tasks/object_detection.py"}, {"content": "import numpy as np\n\nfrom object_detection.utils.np_box_list import BoxList\nfrom object_detection.utils.np_box_list_ops import (\n prune_non_overlapping_boxes, clip_to_window, change_coordinate_frame,\n concatenate, scale, multi_class_non_max_suppression, _copy_extra_fields)\n\nfrom rastervision.core.box import Box\nfrom rastervision.core.labels import Labels\nfrom rastervision.labels.utils import boxes_to_geojson\n\n\ndef geojson_to_labels(geojson, crs_transformer, extent):\n \"\"\"Extract boxes and related info from GeoJSON file.\"\"\"\n features = geojson['features']\n boxes = []\n class_ids = []\n scores = []\n\n for feature in features:\n # Convert polygon to pixel coords and then convert to bounding box.\n polygon = feature['geometry']['coordinates'][0]\n polygon = [crs_transformer.web_to_pixel(p) for p in polygon]\n xmin, ymin = np.min(polygon, axis=0)\n xmax, ymax = np.max(polygon, axis=0)\n boxes.append(Box(ymin, xmin, ymax, xmax))\n\n properties = feature['properties']\n class_ids.append(properties['class_id'])\n scores.append(properties.get('score', 1.0))\n\n boxes = np.array([box.npbox_format() for box in boxes], dtype=float)\n class_ids = np.array(class_ids)\n scores = np.array(scores)\n labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)\n labels = labels.get_intersection(extent)\n return labels\n\n\ndef inverse_change_coordinate_frame(boxlist, window):\n scaled_boxlist = scale(boxlist, window.get_height(), window.get_width())\n npboxes = np.round(scaled_boxlist.get())\n npboxes += [window.ymin, window.xmin, window.ymin, window.xmin]\n boxlist_new = BoxList(npboxes)\n _copy_extra_fields(boxlist_new, boxlist)\n return boxlist_new\n\n\nclass ObjectDetectionLabels(Labels):\n def __init__(self, npboxes, class_ids, scores=None):\n self.boxlist = BoxList(npboxes)\n # This field name actually needs to be 'classes' to be able to use\n # certain utility functions in the TF Object Detection API.\n self.boxlist.add_field('classes', class_ids)\n if scores is not None:\n self.boxlist.add_field('scores', scores)\n\n @staticmethod\n def from_boxlist(boxlist):\n scores = boxlist.get_field('scores') \\\n if boxlist.has_field('scores') else None\n return ObjectDetectionLabels(\n boxlist.get(), boxlist.get_field('classes'), scores)\n\n @staticmethod\n def from_geojson(geojson, crs_transformer, extent):\n return geojson_to_labels(geojson, crs_transformer, extent)\n\n @staticmethod\n def make_empty():\n npboxes = np.empty((0, 4))\n labels = np.empty((0,))\n scores = np.empty((0,))\n return ObjectDetectionLabels(npboxes, labels, scores)\n\n def get_subwindow(self, window, ioa_thresh=1.0):\n \"\"\"Returns boxes relative to window.\n\n This returns the boxes that overlap enough with window, clipped to\n the window and in relative coordinates that lie between 0 and 1.\n \"\"\"\n window_npbox = window.npbox_format()\n window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))\n boxlist = prune_non_overlapping_boxes(\n self.boxlist, window_boxlist, minoverlap=ioa_thresh)\n boxlist = clip_to_window(boxlist, window_npbox)\n boxlist = change_coordinate_frame(boxlist, window_npbox)\n return ObjectDetectionLabels.from_boxlist(boxlist)\n\n def get_boxes(self):\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]\n\n def get_intersection(self, window):\n \"\"\"Returns list of boxes that intersect with window.\n\n Does not clip or perform coordinate transform.\n \"\"\"\n window_npbox = window.npbox_format()\n window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))\n boxlist = prune_non_overlapping_boxes(\n self.boxlist, window_boxlist, minoverlap=0.000001)\n return ObjectDetectionLabels.from_boxlist(boxlist)\n\n def get_coordinates(self):\n return self.boxlist.get_coordinates()\n\n def get_npboxes(self):\n return self.boxlist.get()\n\n def get_scores(self):\n if self.boxlist.has_field('scores'):\n return self.boxlist.get_field('scores')\n return None\n\n def get_class_ids(self):\n return self.boxlist.get_field('classes')\n\n def __len__(self):\n return self.boxlist.get().shape[0]\n\n def __str__(self):\n return str(self.boxlist.get())\n\n def concatenate(self, window, labels):\n boxlist_new = concatenate([\n self.boxlist,\n inverse_change_coordinate_frame(labels.boxlist, window)])\n return ObjectDetectionLabels.from_boxlist(boxlist_new)\n\n def prune_duplicates(self, score_thresh, merge_thresh):\n max_output_size = 1000000\n\n # Create a copy of self.boxlist that has a 2D scores\n # field with a column for each class which is required\n # by the multi_class_non_max_suppression function. It's\n # suprising that the scores field has to be in this form since\n # I haven't seen other functions require that.\n boxlist = BoxList(self.boxlist.get())\n classes = self.boxlist.get_field('classes').astype(np.int32)\n nb_boxes = classes.shape[0]\n nb_classes = np.max(classes)\n class_inds = classes - 1\n scores_1d = self.boxlist.get_field('scores')\n scores_2d = np.zeros((nb_boxes, nb_classes))\n # Not sure how to vectorize this so just do for loop :(\n for box_ind in range(nb_boxes):\n scores_2d[box_ind, class_inds[box_ind]] = scores_1d[box_ind]\n boxlist.add_field('scores', scores_2d)\n\n pruned_boxlist = multi_class_non_max_suppression(\n boxlist, score_thresh, merge_thresh, max_output_size)\n # Add one because multi_class_nms outputs labels that start at zero\n # instead of one like in the rest of the system.\n class_ids = pruned_boxlist.get_field('classes')\n class_ids += 1\n return ObjectDetectionLabels.from_boxlist(pruned_boxlist)\n\n def to_geojson(self, crs_transformer, class_map):\n boxes = self.get_boxes()\n class_ids = self.get_class_ids().tolist()\n scores = self.get_scores().tolist()\n\n return boxes_to_geojson(boxes, class_ids, crs_transformer, class_map,\n scores=scores)\n", "path": "src/rastervision/labels/object_detection_labels.py"}], "after_files": [{"content": "import copy\n\n\ndef add_classes_to_geojson(geojson, class_map):\n \"\"\"Add missing class_names and class_ids from label GeoJSON.\"\"\"\n geojson = copy.deepcopy(geojson)\n features = geojson['features']\n\n for feature in features:\n properties = feature.get('properties', {})\n if 'class_id' not in properties:\n if 'class_name' in properties:\n properties['class_id'] = \\\n class_map.get_by_name(properties['class_name']).id\n elif 'label' in properties:\n # label is considered a synonym of class_name for now in order\n # to interface with Raster Foundry.\n properties['class_id'] = \\\n class_map.get_by_name(properties['label']).id\n properties['class_name'] = properties['label']\n else:\n # if no class_id, class_name, or label, then just assume\n # everything corresponds to class_id = 1.\n class_id = 1\n class_name = class_map.get_by_id(class_id).name\n properties['class_id'] = class_id\n properties['class_name'] = class_name\n\n feature['properties'] = properties\n\n return geojson\n", "path": "src/rastervision/label_stores/utils.py"}, {"content": "import numpy as np\n\nfrom object_detection.utils import visualization_utils as vis_util\n\nfrom rastervision.core.ml_task import MLTask\nfrom rastervision.evaluations.object_detection_evaluation import (\n ObjectDetectionEvaluation)\nfrom rastervision.utils.misc import save_img\n\n\ndef save_debug_image(im, labels, class_map, output_path):\n npboxes = labels.get_npboxes()\n class_ids = labels.get_class_ids()\n scores = labels.get_scores()\n if scores is None:\n scores = [1.0] * len(labels)\n\n vis_util.visualize_boxes_and_labels_on_image_array(\n im, npboxes, class_ids, scores,\n class_map.get_category_index(), use_normalized_coordinates=True,\n line_thickness=2, max_boxes_to_draw=None)\n save_img(im, output_path)\n\n\ndef _make_chip_pos_windows(image_extent, label_store, options):\n chip_size = options.chip_size\n pos_windows = []\n boxes = label_store.get_all_labels().get_boxes()\n done_boxes = set()\n\n # Get a random window around each box. If a box was previously included\n # in a window, then it is skipped.\n for box in boxes:\n if box.tuple_format() not in done_boxes:\n window = box.make_random_square_container(\n image_extent.get_width(), image_extent.get_height(), chip_size)\n pos_windows.append(window)\n\n # Get boxes that lie completely within window\n window_boxes = label_store.get_all_labels().get_intersection(\n window, min_ioa=1.0).get_boxes()\n window_boxes = [box.tuple_format() for box in window_boxes]\n done_boxes.update(window_boxes)\n\n return pos_windows\n\n\ndef _make_label_pos_windows(image_extent, label_store, options):\n label_buffer = options.object_detection_options.label_buffer\n pos_windows = []\n for box in label_store.get_all_labels().get_boxes():\n window = box.make_buffer(label_buffer, image_extent)\n pos_windows.append(window)\n\n return pos_windows\n\n\ndef make_pos_windows(image_extent, label_store, options):\n window_method = options.object_detection_options.window_method\n\n if window_method == 'label':\n return _make_label_pos_windows(image_extent, label_store, options)\n elif window_method == 'image':\n return [image_extent.make_copy()]\n else:\n return _make_chip_pos_windows(image_extent, label_store, options)\n\n\ndef make_neg_windows(raster_source, label_store, chip_size, nb_windows,\n max_attempts):\n extent = raster_source.get_extent()\n neg_windows = []\n for _ in range(max_attempts):\n window = extent.make_random_square(chip_size)\n chip = raster_source.get_chip(window)\n labels = label_store.get_labels(\n window, ioa_thresh=0.2)\n\n # If no labels and not blank, append the chip\n if len(labels) == 0 and np.sum(chip.ravel()) > 0:\n neg_windows.append(window)\n\n if len(neg_windows) == nb_windows:\n break\n\n return neg_windows\n\n\nclass ObjectDetection(MLTask):\n def get_train_windows(self, scene, options):\n raster_source = scene.raster_source\n label_store = scene.ground_truth_label_store\n # Make positive windows which contain labels.\n pos_windows = make_pos_windows(\n raster_source.get_extent(), label_store, options)\n nb_pos_windows = len(pos_windows)\n\n # Make negative windows which do not contain labels.\n # Generate randow windows and save the ones that don't contain\n # any labels. It may take many attempts to generate a single\n # negative window, and could get into an infinite loop in some cases,\n # so we cap the number of attempts.\n if nb_pos_windows:\n nb_neg_windows = round(\n options.object_detection_options.neg_ratio * nb_pos_windows)\n else:\n nb_neg_windows = 100 # just make some\n max_attempts = 100 * nb_neg_windows\n neg_windows = make_neg_windows(\n raster_source, label_store, options.chip_size,\n nb_neg_windows, max_attempts)\n\n return pos_windows + neg_windows\n\n def get_train_labels(self, window, scene, options):\n return scene.ground_truth_label_store.get_labels(\n window, ioa_thresh=options.object_detection_options.ioa_thresh)\n\n def get_predict_windows(self, extent, options):\n chip_size = options.chip_size\n stride = chip_size // 2\n return extent.get_windows(chip_size, stride)\n\n def get_evaluation(self):\n return ObjectDetectionEvaluation()\n\n def save_debug_predict_image(self, scene, debug_dir_uri):\n # TODO implement this\n pass\n", "path": "src/rastervision/ml_tasks/object_detection.py"}, {"content": "import numpy as np\n\nfrom object_detection.utils.np_box_list import BoxList\nfrom object_detection.utils.np_box_list_ops import (\n prune_non_overlapping_boxes, clip_to_window, change_coordinate_frame,\n concatenate, scale, multi_class_non_max_suppression, _copy_extra_fields)\n\nfrom rastervision.core.box import Box\nfrom rastervision.core.labels import Labels\nfrom rastervision.labels.utils import boxes_to_geojson\n\n\ndef geojson_to_labels(geojson, crs_transformer, extent):\n \"\"\"Extract boxes and related info from GeoJSON file.\"\"\"\n features = geojson['features']\n boxes = []\n class_ids = []\n scores = []\n\n for feature in features:\n # Convert polygon to pixel coords and then convert to bounding box.\n polygon = feature['geometry']['coordinates'][0]\n polygon = [crs_transformer.web_to_pixel(p) for p in polygon]\n xmin, ymin = np.min(polygon, axis=0)\n xmax, ymax = np.max(polygon, axis=0)\n boxes.append(Box(ymin, xmin, ymax, xmax))\n\n properties = feature['properties']\n class_ids.append(properties['class_id'])\n scores.append(properties.get('score', 1.0))\n\n boxes = np.array([box.npbox_format() for box in boxes], dtype=float)\n class_ids = np.array(class_ids)\n scores = np.array(scores)\n labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)\n labels = labels.get_intersection(extent)\n return labels\n\n\ndef inverse_change_coordinate_frame(boxlist, window):\n scaled_boxlist = scale(boxlist, window.get_height(), window.get_width())\n npboxes = np.round(scaled_boxlist.get())\n npboxes += [window.ymin, window.xmin, window.ymin, window.xmin]\n boxlist_new = BoxList(npboxes)\n _copy_extra_fields(boxlist_new, boxlist)\n return boxlist_new\n\n\nclass ObjectDetectionLabels(Labels):\n def __init__(self, npboxes, class_ids, scores=None):\n self.boxlist = BoxList(npboxes)\n # This field name actually needs to be 'classes' to be able to use\n # certain utility functions in the TF Object Detection API.\n self.boxlist.add_field('classes', class_ids)\n if scores is not None:\n self.boxlist.add_field('scores', scores)\n\n @staticmethod\n def from_boxlist(boxlist):\n scores = boxlist.get_field('scores') \\\n if boxlist.has_field('scores') else None\n return ObjectDetectionLabels(\n boxlist.get(), boxlist.get_field('classes'), scores)\n\n @staticmethod\n def from_geojson(geojson, crs_transformer, extent):\n return geojson_to_labels(geojson, crs_transformer, extent)\n\n @staticmethod\n def make_empty():\n npboxes = np.empty((0, 4))\n labels = np.empty((0,))\n scores = np.empty((0,))\n return ObjectDetectionLabels(npboxes, labels, scores)\n\n def get_subwindow(self, window, ioa_thresh=1.0):\n \"\"\"Returns boxes relative to window.\n\n This returns the boxes that overlap enough with window, clipped to\n the window and in relative coordinates that lie between 0 and 1.\n \"\"\"\n window_npbox = window.npbox_format()\n window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))\n boxlist = prune_non_overlapping_boxes(\n self.boxlist, window_boxlist, minoverlap=ioa_thresh)\n boxlist = clip_to_window(boxlist, window_npbox)\n boxlist = change_coordinate_frame(boxlist, window_npbox)\n return ObjectDetectionLabels.from_boxlist(boxlist)\n\n def get_boxes(self):\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]\n\n def get_intersection(self, window, min_ioa=0.000001):\n \"\"\"Returns list of boxes that intersect with window.\n\n Does not clip or perform coordinate transform.\n \"\"\"\n window_npbox = window.npbox_format()\n window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))\n boxlist = prune_non_overlapping_boxes(\n self.boxlist, window_boxlist, minoverlap=min_ioa)\n return ObjectDetectionLabels.from_boxlist(boxlist)\n\n def get_coordinates(self):\n return self.boxlist.get_coordinates()\n\n def get_npboxes(self):\n return self.boxlist.get()\n\n def get_scores(self):\n if self.boxlist.has_field('scores'):\n return self.boxlist.get_field('scores')\n return None\n\n def get_class_ids(self):\n return self.boxlist.get_field('classes')\n\n def __len__(self):\n return self.boxlist.get().shape[0]\n\n def __str__(self):\n return str(self.boxlist.get())\n\n def concatenate(self, window, labels):\n boxlist_new = concatenate([\n self.boxlist,\n inverse_change_coordinate_frame(labels.boxlist, window)])\n return ObjectDetectionLabels.from_boxlist(boxlist_new)\n\n def prune_duplicates(self, score_thresh, merge_thresh):\n max_output_size = 1000000\n\n # Create a copy of self.boxlist that has a 2D scores\n # field with a column for each class which is required\n # by the multi_class_non_max_suppression function. It's\n # suprising that the scores field has to be in this form since\n # I haven't seen other functions require that.\n boxlist = BoxList(self.boxlist.get())\n classes = self.boxlist.get_field('classes').astype(np.int32)\n nb_boxes = classes.shape[0]\n nb_classes = np.max(classes)\n class_inds = classes - 1\n scores_1d = self.boxlist.get_field('scores')\n scores_2d = np.zeros((nb_boxes, nb_classes))\n # Not sure how to vectorize this so just do for loop :(\n for box_ind in range(nb_boxes):\n scores_2d[box_ind, class_inds[box_ind]] = scores_1d[box_ind]\n boxlist.add_field('scores', scores_2d)\n\n pruned_boxlist = multi_class_non_max_suppression(\n boxlist, score_thresh, merge_thresh, max_output_size)\n # Add one because multi_class_nms outputs labels that start at zero\n # instead of one like in the rest of the system.\n class_ids = pruned_boxlist.get_field('classes')\n class_ids += 1\n return ObjectDetectionLabels.from_boxlist(pruned_boxlist)\n\n def to_geojson(self, crs_transformer, class_map):\n boxes = self.get_boxes()\n class_ids = self.get_class_ids().tolist()\n scores = self.get_scores().tolist()\n\n return boxes_to_geojson(boxes, class_ids, crs_transformer, class_map,\n scores=scores)\n", "path": "src/rastervision/labels/object_detection_labels.py"}]} | 3,822 | 671 |
gh_patches_debug_2437 | rasdani/github-patches | git_diff | urllib3__urllib3-2843 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
flaky and pytest-memray incompatible
### Subject
```
______________________________________________________________________________________________________ TestHTTPProxyManager.test_forwarding_proxy_request_timeout[https-https-True] ______________________________________________________________________________________________________
Traceback (most recent call last):
File "/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py", line 122, in wrapper
result: object | None = func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py", line 121, in wrapper
with Tracker(result_file):
File "src/memray/_memray.pyx", line 404, in memray._memray.Tracker.__enter__
RuntimeError: No more than one Tracker instance can be active at the same time
```
caused by a flaky test:
```
===Flaky Test Report===
test_forwarding_proxy_request_timeout[https-https-True] failed (1 runs remaining out of 2).
<class 'AssertionError'>
assert <class 'urllib3.exceptions.ProxyError'> == ReadTimeoutError
+ where <class 'urllib3.exceptions.ProxyError'> = type(ProxyError('Unable to connect to proxy', ReadTimeoutError("HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)")))
+ where ProxyError('Unable to connect to proxy', ReadTimeoutError("HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)")) = MaxRetryError('HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\'Unable to connect to proxy\', ReadTimeoutError("HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Read timed out. (read timeout=0.01)")))').reason
+ where MaxRetryError('HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\'Unable to connect to proxy\', ReadTimeoutError("HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Read timed out. (read timeout=0.01)")))') = <ExceptionInfo MaxRetryError('HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Max retries exceeded with url: https://240.0.0.0 (Ca...proxy\', ReadTimeoutError("HTTPSConnectionPool(host=\'240.0.0.0\', port=443): Read timed out. (read timeout=0.01)")))') tblen=10>.value
[<TracebackEntry /home/graingert/projects/urllib3/test/with_dummyserver/test_proxy_poolmanager.py:484>]
test_forwarding_proxy_request_timeout[https-https-True] failed; it passed 0 out of the required 1 times.
<class 'RuntimeError'>
No more than one Tracker instance can be active at the same time
[<TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:122>, <TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:121>, <TracebackEntry src/memray/_memray.pyx:404>]
```
see also https://github.com/bloomberg/pytest-memray/issues/53
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 from __future__ import annotations
2
3 import os
4 import shutil
5 import subprocess
6 import sys
7
8 import nox
9
10 SOURCE_FILES = [
11 "docs/",
12 "dummyserver/",
13 "src/",
14 "test/",
15 "noxfile.py",
16 "setup.py",
17 ]
18
19
20 def tests_impl(
21 session: nox.Session,
22 extras: str = "socks,secure,brotli,zstd",
23 byte_string_comparisons: bool = True,
24 ) -> None:
25 # Install deps and the package itself.
26 session.install("-r", "dev-requirements.txt")
27 session.install(f".[{extras}]")
28
29 # Show the pip version.
30 session.run("pip", "--version")
31 # Print the Python version and bytesize.
32 session.run("python", "--version")
33 session.run("python", "-c", "import struct; print(struct.calcsize('P') * 8)")
34 # Print OpenSSL information.
35 session.run("python", "-m", "OpenSSL.debug")
36
37 memray_supported = True
38 if sys.implementation.name != "cpython" or sys.version_info < (3, 8):
39 memray_supported = False # pytest-memray requires CPython 3.8+
40 elif sys.platform == "win32":
41 memray_supported = False
42
43 # Inspired from https://hynek.me/articles/ditch-codecov-python/
44 # We use parallel mode and then combine in a later CI step
45 session.run(
46 "python",
47 *(("-bb",) if byte_string_comparisons else ()),
48 "-m",
49 "coverage",
50 "run",
51 "--parallel-mode",
52 "-m",
53 "pytest",
54 *("--memray", "--hide-memray-summary") if memray_supported else (),
55 "-v",
56 "-ra",
57 f"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}",
58 "--tb=native",
59 "--no-success-flaky-report",
60 "--durations=10",
61 "--strict-config",
62 "--strict-markers",
63 *(session.posargs or ("test/",)),
64 env={"PYTHONWARNINGS": "always::DeprecationWarning"},
65 )
66
67
68 @nox.session(python=["3.7", "3.8", "3.9", "3.10", "3.11", "pypy"])
69 def test(session: nox.Session) -> None:
70 tests_impl(session)
71
72
73 @nox.session(python=["2.7"])
74 def unsupported_setup_py(session: nox.Session) -> None:
75 # Can't check both returncode and output with session.run
76 process = subprocess.run(
77 ["python", "setup.py", "install"],
78 env={**session.env},
79 text=True,
80 capture_output=True,
81 )
82 assert process.returncode == 1
83 print(process.stderr)
84 assert "Please use `python -m pip install .` instead." in process.stderr
85
86
87 @nox.session(python=["3"])
88 def test_brotlipy(session: nox.Session) -> None:
89 """Check that if 'brotlipy' is installed instead of 'brotli' or
90 'brotlicffi' that we still don't blow up.
91 """
92 session.install("brotlipy")
93 tests_impl(session, extras="socks,secure", byte_string_comparisons=False)
94
95
96 def git_clone(session: nox.Session, git_url: str) -> None:
97 """We either clone the target repository or if already exist
98 simply reset the state and pull.
99 """
100 expected_directory = git_url.split("/")[-1]
101
102 if expected_directory.endswith(".git"):
103 expected_directory = expected_directory[:-4]
104
105 if not os.path.isdir(expected_directory):
106 session.run("git", "clone", "--depth", "1", git_url, external=True)
107 else:
108 session.run(
109 "git", "-C", expected_directory, "reset", "--hard", "HEAD", external=True
110 )
111 session.run("git", "-C", expected_directory, "pull", external=True)
112
113
114 @nox.session()
115 def downstream_botocore(session: nox.Session) -> None:
116 root = os.getcwd()
117 tmp_dir = session.create_tmp()
118
119 session.cd(tmp_dir)
120 git_clone(session, "https://github.com/boto/botocore")
121 session.chdir("botocore")
122 for patch in [
123 "0001-Mark-100-Continue-tests-as-failing.patch",
124 "0002-Stop-relying-on-removed-DEFAULT_CIPHERS.patch",
125 ]:
126 session.run("git", "apply", f"{root}/ci/{patch}", external=True)
127 session.run("git", "rev-parse", "HEAD", external=True)
128 session.run("python", "scripts/ci/install")
129
130 session.cd(root)
131 session.install(".", silent=False)
132 session.cd(f"{tmp_dir}/botocore")
133
134 session.run("python", "-c", "import urllib3; print(urllib3.__version__)")
135 session.run("python", "scripts/ci/run-tests")
136
137
138 @nox.session()
139 def downstream_requests(session: nox.Session) -> None:
140 root = os.getcwd()
141 tmp_dir = session.create_tmp()
142
143 session.cd(tmp_dir)
144 git_clone(session, "https://github.com/psf/requests")
145 session.chdir("requests")
146 session.run(
147 "git", "apply", f"{root}/ci/0003-requests-removed-warnings.patch", external=True
148 )
149 session.run(
150 "git", "apply", f"{root}/ci/0004-requests-chunked-requests.patch", external=True
151 )
152 session.run("git", "rev-parse", "HEAD", external=True)
153 session.install(".[socks]", silent=False)
154 session.install("-r", "requirements-dev.txt", silent=False)
155
156 session.cd(root)
157 session.install(".", silent=False)
158 session.cd(f"{tmp_dir}/requests")
159
160 session.run("python", "-c", "import urllib3; print(urllib3.__version__)")
161 session.run("pytest", "tests")
162
163
164 @nox.session()
165 def format(session: nox.Session) -> None:
166 """Run code formatters."""
167 lint(session)
168
169
170 @nox.session
171 def lint(session: nox.Session) -> None:
172 session.install("pre-commit")
173 session.run("pre-commit", "run", "--all-files")
174
175 mypy(session)
176
177
178 @nox.session(python="3.8")
179 def mypy(session: nox.Session) -> None:
180 """Run mypy."""
181 session.install("-r", "mypy-requirements.txt")
182 session.run("mypy", "--version")
183 session.run(
184 "mypy",
185 "dummyserver",
186 "noxfile.py",
187 "src/urllib3",
188 "test",
189 )
190
191
192 @nox.session
193 def docs(session: nox.Session) -> None:
194 session.install("-r", "docs/requirements.txt")
195 session.install(".[socks,secure,brotli,zstd]")
196
197 session.chdir("docs")
198 if os.path.exists("_build"):
199 shutil.rmtree("_build")
200 session.run("sphinx-build", "-b", "html", "-W", ".", "_build/html")
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -56,7 +56,6 @@
"-ra",
f"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}",
"--tb=native",
- "--no-success-flaky-report",
"--durations=10",
"--strict-config",
"--strict-markers",
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -56,7 +56,6 @@\n \"-ra\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n- \"--no-success-flaky-report\",\n \"--durations=10\",\n \"--strict-config\",\n \"--strict-markers\",\n", "issue": "flaky and pytest-memray incompatible\n### Subject\r\n\r\n```\r\n______________________________________________________________________________________________________ TestHTTPProxyManager.test_forwarding_proxy_request_timeout[https-https-True] ______________________________________________________________________________________________________\r\nTraceback (most recent call last):\r\n File \"/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py\", line 122, in wrapper\r\n result: object | None = func(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py\", line 121, in wrapper\r\n with Tracker(result_file):\r\n File \"src/memray/_memray.pyx\", line 404, in memray._memray.Tracker.__enter__\r\nRuntimeError: No more than one Tracker instance can be active at the same time\r\n```\r\ncaused by a flaky test:\r\n```\r\n===Flaky Test Report===\r\n\r\ntest_forwarding_proxy_request_timeout[https-https-True] failed (1 runs remaining out of 2).\r\n <class 'AssertionError'>\r\n assert <class 'urllib3.exceptions.ProxyError'> == ReadTimeoutError\r\n + where <class 'urllib3.exceptions.ProxyError'> = type(ProxyError('Unable to connect to proxy', ReadTimeoutError(\"HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)\")))\r\n + where ProxyError('Unable to connect to proxy', ReadTimeoutError(\"HTTPSConnectionPool(host='240.0.0.0', port=443): Read timed out. (read timeout=0.01)\")) = MaxRetryError('HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\\'Unable to connect to proxy\\', ReadTimeoutError(\"HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Read timed out. (read timeout=0.01)\")))').reason\r\n + where MaxRetryError('HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Max retries exceeded with url: https://240.0.0.0 (Caused by ProxyError(\\'Unable to connect to proxy\\', ReadTimeoutError(\"HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Read timed out. (read timeout=0.01)\")))') = <ExceptionInfo MaxRetryError('HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Max retries exceeded with url: https://240.0.0.0 (Ca...proxy\\', ReadTimeoutError(\"HTTPSConnectionPool(host=\\'240.0.0.0\\', port=443): Read timed out. (read timeout=0.01)\")))') tblen=10>.value\r\n [<TracebackEntry /home/graingert/projects/urllib3/test/with_dummyserver/test_proxy_poolmanager.py:484>]\r\ntest_forwarding_proxy_request_timeout[https-https-True] failed; it passed 0 out of the required 1 times.\r\n <class 'RuntimeError'>\r\n No more than one Tracker instance can be active at the same time\r\n [<TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:122>, <TracebackEntry /home/graingert/projects/urllib3/.nox/test-3-11/lib/python3.11/site-packages/pytest_memray/plugin.py:121>, <TracebackEntry src/memray/_memray.pyx:404>]\r\n\r\n```\r\n\r\nsee also https://github.com/bloomberg/pytest-memray/issues/53\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli,zstd\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n memray_supported = True\n if sys.implementation.name != \"cpython\" or sys.version_info < (3, 8):\n memray_supported = False # pytest-memray requires CPython 3.8+\n elif sys.platform == \"win32\":\n memray_supported = False\n\n # Inspired from https://hynek.me/articles/ditch-codecov-python/\n # We use parallel mode and then combine in a later CI step\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n *(\"--memray\", \"--hide-memray-summary\") if memray_supported else (),\n \"-v\",\n \"-ra\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n \"--durations=10\",\n \"--strict-config\",\n \"--strict-markers\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_setup_py(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Please use `python -m pip install .` instead.\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n \"\"\"We either clone the target repository or if already exist\n simply reset the state and pull.\n \"\"\"\n expected_directory = git_url.split(\"/\")[-1]\n\n if expected_directory.endswith(\".git\"):\n expected_directory = expected_directory[:-4]\n\n if not os.path.isdir(expected_directory):\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n else:\n session.run(\n \"git\", \"-C\", expected_directory, \"reset\", \"--hard\", \"HEAD\", external=True\n )\n session.run(\"git\", \"-C\", expected_directory, \"pull\", external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n for patch in [\n \"0001-Mark-100-Continue-tests-as-failing.patch\",\n \"0002-Stop-relying-on-removed-DEFAULT_CIPHERS.patch\",\n ]:\n session.run(\"git\", \"apply\", f\"{root}/ci/{patch}\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0003-requests-removed-warnings.patch\", external=True\n )\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0004-requests-chunked-requests.patch\", external=True\n )\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli,zstd]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli,zstd\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n memray_supported = True\n if sys.implementation.name != \"cpython\" or sys.version_info < (3, 8):\n memray_supported = False # pytest-memray requires CPython 3.8+\n elif sys.platform == \"win32\":\n memray_supported = False\n\n # Inspired from https://hynek.me/articles/ditch-codecov-python/\n # We use parallel mode and then combine in a later CI step\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n *(\"--memray\", \"--hide-memray-summary\") if memray_supported else (),\n \"-v\",\n \"-ra\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n \"--durations=10\",\n \"--strict-config\",\n \"--strict-markers\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_setup_py(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Please use `python -m pip install .` instead.\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n \"\"\"We either clone the target repository or if already exist\n simply reset the state and pull.\n \"\"\"\n expected_directory = git_url.split(\"/\")[-1]\n\n if expected_directory.endswith(\".git\"):\n expected_directory = expected_directory[:-4]\n\n if not os.path.isdir(expected_directory):\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n else:\n session.run(\n \"git\", \"-C\", expected_directory, \"reset\", \"--hard\", \"HEAD\", external=True\n )\n session.run(\"git\", \"-C\", expected_directory, \"pull\", external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n for patch in [\n \"0001-Mark-100-Continue-tests-as-failing.patch\",\n \"0002-Stop-relying-on-removed-DEFAULT_CIPHERS.patch\",\n ]:\n session.run(\"git\", \"apply\", f\"{root}/ci/{patch}\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0003-requests-removed-warnings.patch\", external=True\n )\n session.run(\n \"git\", \"apply\", f\"{root}/ci/0004-requests-chunked-requests.patch\", external=True\n )\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli,zstd]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}]} | 3,214 | 99 |
gh_patches_debug_34527 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1694 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: source 'City of Karlsruhe' stopped working
### I Have A Problem With:
A specific source
### What's Your Problem
Release 1.44.0:
Due to changes on the website the source '**City of Karlsruhe**' (name: karlsruhe_de) stopped working.
I start troubleshooting and add my findings here.
### Source (if relevant)
karlsruhe_de
### Logs
_No response_
### Relevant Configuration
_No response_
### Checklist Source Error
- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [x] Checked that the website of your service provider is still working
- [x] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py`
Content:
```
1 from datetime import datetime
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6
7 TITLE = "City of Karlsruhe"
8 DESCRIPTION = "Source for City of Karlsruhe."
9 URL = "https://www.karlsruhe.de/"
10 TEST_CASES = {
11 "Östliche Rheinbrückenstraße 1": {
12 "street": "Östliche Rheinbrückenstraße",
13 "hnr": 1,
14 },
15 "Habichtweg 4": {"street": "Habichtweg", "hnr": 4},
16 "Machstraße 5": {"street": "Machstraße", "hnr": 5},
17 "Bernsteinstraße 10 ladeort 1": {
18 "street": "Bernsteinstraße",
19 "hnr": 10,
20 "ladeort": 1,
21 },
22 "Bernsteinstraße 10 ladeort 2": {
23 "street": "Bernsteinstraße",
24 "hnr": 10,
25 "ladeort": 2,
26 },
27 }
28
29
30 ICON_MAP = {
31 "Restmüll": "mdi:trash-can",
32 "Bioabfall": "mdi:leaf",
33 "Papier": "mdi:package-variant",
34 "Wertstoff": "mdi:recycle",
35 "Sperrmüllabholung": "mdi:wardrobe",
36 }
37
38
39 API_URL = "https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php"
40
41
42 class Source:
43 def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):
44 self._street: str = street
45 self._hnr: str | int = hnr
46 self._ladeort: int | None = ladeort
47 self.ics = ICS()
48
49 def fetch(self):
50 now = datetime.now()
51 error = None
52 for year in (now.year, now.year + 1, now.year - 1):
53 try:
54 return self.get_data(API_URL.format(year=year))
55 except Exception as e:
56 error = e
57 raise error
58
59 def get_data(self, url):
60 data = {
61 "strasse_n": self._street,
62 "hausnr": self._hnr,
63 "ical": "+iCalendar",
64 "ladeort": self._ladeort,
65 }
66 params = {"hausnr": self._hnr}
67
68 r = requests.post(url, data=data, params=params)
69 dates = self.ics.convert(r.text)
70
71 entries = []
72 for d in dates:
73 date, waste_type = d
74 waste_type = waste_type.split(",")[0]
75 icon = ICON_MAP.get(waste_type)
76 entries.append(Collection(date=date, t=waste_type, icon=icon))
77
78 return entries
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
@@ -1,9 +1,17 @@
from datetime import datetime
import requests
+import urllib3
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS
+# With verify=True the POST fails due to a SSLCertVerificationError.
+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
+# These two lines areused to suppress the InsecureRequestWarning when using verify=False
+urllib3.disable_warnings()
+
TITLE = "City of Karlsruhe"
DESCRIPTION = "Source for City of Karlsruhe."
URL = "https://www.karlsruhe.de/"
@@ -36,7 +44,7 @@
}
-API_URL = "https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php"
+API_URL = "https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php"
class Source:
@@ -50,10 +58,11 @@
now = datetime.now()
error = None
for year in (now.year, now.year + 1, now.year - 1):
- try:
- return self.get_data(API_URL.format(year=year))
- except Exception as e:
- error = e
+ for i in (4, 6):
+ try:
+ return self.get_data(API_URL.format(year=year, i=i))
+ except Exception as e:
+ error = e
raise error
def get_data(self, url):
@@ -65,7 +74,7 @@
}
params = {"hausnr": self._hnr}
- r = requests.post(url, data=data, params=params)
+ r = requests.post(url, data=data, params=params, verify=False)
dates = self.ics.convert(r.text)
entries = []
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n@@ -1,9 +1,17 @@\n from datetime import datetime\n \n import requests\n+import urllib3\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n from waste_collection_schedule.service.ICS import ICS\n \n+# With verify=True the POST fails due to a SSLCertVerificationError.\n+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n+# These two lines areused to suppress the InsecureRequestWarning when using verify=False\n+urllib3.disable_warnings()\n+\n TITLE = \"City of Karlsruhe\"\n DESCRIPTION = \"Source for City of Karlsruhe.\"\n URL = \"https://www.karlsruhe.de/\"\n@@ -36,7 +44,7 @@\n }\n \n \n-API_URL = \"https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n+API_URL = \"https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n \n \n class Source:\n@@ -50,10 +58,11 @@\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n- try:\n- return self.get_data(API_URL.format(year=year))\n- except Exception as e:\n- error = e\n+ for i in (4, 6):\n+ try:\n+ return self.get_data(API_URL.format(year=year, i=i))\n+ except Exception as e:\n+ error = e\n raise error\n \n def get_data(self, url):\n@@ -65,7 +74,7 @@\n }\n params = {\"hausnr\": self._hnr}\n \n- r = requests.post(url, data=data, params=params)\n+ r = requests.post(url, data=data, params=params, verify=False)\n dates = self.ics.convert(r.text)\n \n entries = []\n", "issue": "[Bug]: source 'City of Karlsruhe' stopped working\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nRelease 1.44.0:\r\nDue to changes on the website the source '**City of Karlsruhe**' (name: karlsruhe_de) stopped working.\r\nI start troubleshooting and add my findings here.\n\n### Source (if relevant)\n\nkarlsruhe_de\n\n### Logs\n\n_No response_\n\n### Relevant Configuration\n\n_No response_\n\n### Checklist Source Error\n\n- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [x] Checked that the website of your service provider is still working\n- [x] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"City of Karlsruhe\"\nDESCRIPTION = \"Source for City of Karlsruhe.\"\nURL = \"https://www.karlsruhe.de/\"\nTEST_CASES = {\n \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe 1\": {\n \"street\": \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe\",\n \"hnr\": 1,\n },\n \"Habichtweg 4\": {\"street\": \"Habichtweg\", \"hnr\": 4},\n \"Machstra\u00dfe 5\": {\"street\": \"Machstra\u00dfe\", \"hnr\": 5},\n \"Bernsteinstra\u00dfe 10 ladeort 1\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 1,\n },\n \"Bernsteinstra\u00dfe 10 ladeort 2\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 2,\n },\n}\n\n\nICON_MAP = {\n \"Restm\u00fcll\": \"mdi:trash-can\",\n \"Bioabfall\": \"mdi:leaf\",\n \"Papier\": \"mdi:package-variant\",\n \"Wertstoff\": \"mdi:recycle\",\n \"Sperrm\u00fcllabholung\": \"mdi:wardrobe\",\n}\n\n\nAPI_URL = \"https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n\n\nclass Source:\n def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):\n self._street: str = street\n self._hnr: str | int = hnr\n self._ladeort: int | None = ladeort\n self.ics = ICS()\n\n def fetch(self):\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n try:\n return self.get_data(API_URL.format(year=year))\n except Exception as e:\n error = e\n raise error\n\n def get_data(self, url):\n data = {\n \"strasse_n\": self._street,\n \"hausnr\": self._hnr,\n \"ical\": \"+iCalendar\",\n \"ladeort\": self._ladeort,\n }\n params = {\"hausnr\": self._hnr}\n\n r = requests.post(url, data=data, params=params)\n dates = self.ics.convert(r.text)\n\n entries = []\n for d in dates:\n date, waste_type = d\n waste_type = waste_type.split(\",\")[0]\n icon = ICON_MAP.get(waste_type)\n entries.append(Collection(date=date, t=waste_type, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py"}], "after_files": [{"content": "from datetime import datetime\n\nimport requests\nimport urllib3\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\n# With verify=True the POST fails due to a SSLCertVerificationError.\n# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n# These two lines areused to suppress the InsecureRequestWarning when using verify=False\nurllib3.disable_warnings()\n\nTITLE = \"City of Karlsruhe\"\nDESCRIPTION = \"Source for City of Karlsruhe.\"\nURL = \"https://www.karlsruhe.de/\"\nTEST_CASES = {\n \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe 1\": {\n \"street\": \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe\",\n \"hnr\": 1,\n },\n \"Habichtweg 4\": {\"street\": \"Habichtweg\", \"hnr\": 4},\n \"Machstra\u00dfe 5\": {\"street\": \"Machstra\u00dfe\", \"hnr\": 5},\n \"Bernsteinstra\u00dfe 10 ladeort 1\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 1,\n },\n \"Bernsteinstra\u00dfe 10 ladeort 2\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 2,\n },\n}\n\n\nICON_MAP = {\n \"Restm\u00fcll\": \"mdi:trash-can\",\n \"Bioabfall\": \"mdi:leaf\",\n \"Papier\": \"mdi:package-variant\",\n \"Wertstoff\": \"mdi:recycle\",\n \"Sperrm\u00fcllabholung\": \"mdi:wardrobe\",\n}\n\n\nAPI_URL = \"https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n\n\nclass Source:\n def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):\n self._street: str = street\n self._hnr: str | int = hnr\n self._ladeort: int | None = ladeort\n self.ics = ICS()\n\n def fetch(self):\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n for i in (4, 6):\n try:\n return self.get_data(API_URL.format(year=year, i=i))\n except Exception as e:\n error = e\n raise error\n\n def get_data(self, url):\n data = {\n \"strasse_n\": self._street,\n \"hausnr\": self._hnr,\n \"ical\": \"+iCalendar\",\n \"ladeort\": self._ladeort,\n }\n params = {\"hausnr\": self._hnr}\n\n r = requests.post(url, data=data, params=params, verify=False)\n dates = self.ics.convert(r.text)\n\n entries = []\n for d in dates:\n date, waste_type = d\n waste_type = waste_type.split(\",\")[0]\n icon = ICON_MAP.get(waste_type)\n entries.append(Collection(date=date, t=waste_type, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py"}]} | 1,364 | 546 |
gh_patches_debug_10893 | rasdani/github-patches | git_diff | conan-io__conan-4888 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remote lookup in offline mode
Hello,
Following up the small issue that I had during my demo at swampUP:
I was trying to create a package for which I had every build requirement recipe already exported and thus available.
I was expecting to get an error:
>no such package with current options/settings: **build requirement package**
However, it seems that Conan did try to check this package in remotes (with no internet access, it failed). The expected behavior would be for Conan to check the local cache after remotes.
Passing `--build missing` to `conan create` did not fix the issue.
Using Conan v1.3.3
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/graph/graph_binaries.py`
Content:
```
1 import os
2
3 from conans.client.graph.graph import (BINARY_BUILD, BINARY_CACHE, BINARY_DOWNLOAD, BINARY_MISSING,
4 BINARY_SKIP, BINARY_UPDATE,
5 RECIPE_EDITABLE, BINARY_EDITABLE,
6 RECIPE_CONSUMER, RECIPE_VIRTUAL)
7 from conans.errors import NoRemoteAvailable, NotFoundException,\
8 conanfile_exception_formatter
9 from conans.model.info import ConanInfo
10 from conans.model.manifest import FileTreeManifest
11 from conans.model.ref import PackageReference
12 from conans.util.files import is_dirty, rmdir
13
14
15 class GraphBinariesAnalyzer(object):
16
17 def __init__(self, cache, output, remote_manager):
18 self._cache = cache
19 self._out = output
20 self._remote_manager = remote_manager
21 self._registry = cache.registry
22
23 def _check_update(self, upstream_manifest, package_folder, output, node):
24 read_manifest = FileTreeManifest.load(package_folder)
25 if upstream_manifest != read_manifest:
26 if upstream_manifest.time > read_manifest.time:
27 output.warn("Current package is older than remote upstream one")
28 node.update_manifest = upstream_manifest
29 return True
30 else:
31 output.warn("Current package is newer than remote upstream one")
32
33 def _evaluate_node(self, node, build_mode, update, evaluated_nodes, remote_name):
34 assert node.binary is None, "Node.binary should be None"
35 assert node.package_id is not None, "Node.package_id shouldn't be None"
36
37 ref, conanfile = node.ref, node.conanfile
38 pref = PackageReference(ref, node.package_id)
39
40 # Check that this same reference hasn't already been checked
41 previous_nodes = evaluated_nodes.get(pref)
42 if previous_nodes:
43 previous_nodes.append(node)
44 previous_node = previous_nodes[0]
45 node.binary = previous_node.binary
46 node.binary_remote = previous_node.binary_remote
47 node.prev = previous_node.prev
48 return
49 evaluated_nodes[pref] = [node]
50
51 output = conanfile.output
52
53 if node.recipe == RECIPE_EDITABLE:
54 node.binary = BINARY_EDITABLE
55 # TODO: PREV?
56 return
57
58 if build_mode.forced(conanfile, ref):
59 output.warn('Forced build from source')
60 node.binary = BINARY_BUILD
61 node.prev = None
62 return
63
64 package_folder = self._cache.package(pref, short_paths=conanfile.short_paths)
65
66 # Check if dirty, to remove it
67 with self._cache.package_lock(pref):
68 assert node.recipe != RECIPE_EDITABLE, "Editable package shouldn't reach this code"
69 if is_dirty(package_folder):
70 output.warn("Package is corrupted, removing folder: %s" % package_folder)
71 rmdir(package_folder) # Do not remove if it is EDITABLE
72
73 if self._cache.config.revisions_enabled:
74 metadata = self._cache.package_layout(pref.ref).load_metadata()
75 rec_rev = metadata.packages[pref.id].recipe_revision
76 if rec_rev and rec_rev != node.ref.revision:
77 output.warn("The package {} doesn't belong "
78 "to the installed recipe revision, removing folder".format(pref))
79 rmdir(package_folder)
80
81 if remote_name:
82 remote = self._registry.remotes.get(remote_name)
83 else:
84 # If the remote_name is not given, follow the binary remote, or
85 # the recipe remote
86 # If it is defined it won't iterate (might change in conan2.0)
87 remote = self._registry.prefs.get(pref) or self._registry.refs.get(ref)
88 remotes = self._registry.remotes.list
89
90 if os.path.exists(package_folder):
91 if update:
92 if remote:
93 try:
94 tmp = self._remote_manager.get_package_manifest(pref, remote)
95 upstream_manifest, pref = tmp
96 except NotFoundException:
97 output.warn("Can't update, no package in remote")
98 except NoRemoteAvailable:
99 output.warn("Can't update, no remote defined")
100 else:
101 if self._check_update(upstream_manifest, package_folder, output, node):
102 node.binary = BINARY_UPDATE
103 node.prev = pref.revision # With revision
104 if build_mode.outdated:
105 info, pref = self._remote_manager.get_package_info(pref, remote)
106 package_hash = info.recipe_hash
107 elif remotes:
108 pass
109 else:
110 output.warn("Can't update, no remote defined")
111 if not node.binary:
112 node.binary = BINARY_CACHE
113 metadata = self._cache.package_layout(pref.ref).load_metadata()
114 node.prev = metadata.packages[pref.id].revision
115 package_hash = ConanInfo.load_from_package(package_folder).recipe_hash
116
117 else: # Binary does NOT exist locally
118 remote_info = None
119 if remote:
120 try:
121 remote_info, pref = self._remote_manager.get_package_info(pref, remote)
122 except NotFoundException:
123 pass
124
125 # If the "remote" came from the registry but the user didn't specified the -r, with
126 # revisions iterate all remotes
127 if not remote or (not remote_info and self._cache.config.revisions_enabled
128 and not remote_name):
129 for r in remotes:
130 try:
131 remote_info, pref = self._remote_manager.get_package_info(pref, r)
132 except NotFoundException:
133 pass
134 else:
135 if remote_info:
136 remote = r
137 break
138
139 if remote_info:
140 node.binary = BINARY_DOWNLOAD
141 node.prev = pref.revision
142 package_hash = remote_info.recipe_hash
143 else:
144 if build_mode.allowed(conanfile):
145 node.binary = BINARY_BUILD
146 else:
147 node.binary = BINARY_MISSING
148 node.prev = None
149
150 if build_mode.outdated:
151 if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):
152 local_recipe_hash = self._cache.package_layout(ref).recipe_manifest().summary_hash
153 if local_recipe_hash != package_hash:
154 output.info("Outdated package!")
155 node.binary = BINARY_BUILD
156 node.prev = None
157 else:
158 output.info("Package is up to date")
159
160 node.binary_remote = remote
161
162 @staticmethod
163 def _compute_package_id(node, default_package_id_mode):
164 conanfile = node.conanfile
165 neighbors = node.neighbors()
166 direct_reqs = [] # of PackageReference
167 indirect_reqs = set() # of PackageReference, avoid duplicates
168 for neighbor in neighbors:
169 ref, nconan = neighbor.ref, neighbor.conanfile
170 direct_reqs.append(neighbor.pref)
171 indirect_reqs.update(nconan.info.requires.refs())
172 conanfile.options.propagate_downstream(ref, nconan.info.full_options)
173 # Might be never used, but update original requirement, just in case
174 conanfile.requires[ref.name].ref = ref
175
176 # Make sure not duplicated
177 indirect_reqs.difference_update(direct_reqs)
178 # There might be options that are not upstream, backup them, might be
179 # for build-requires
180 conanfile.build_requires_options = conanfile.options.values
181 conanfile.options.clear_unused(indirect_reqs.union(direct_reqs))
182 conanfile.options.freeze()
183
184 conanfile.info = ConanInfo.create(conanfile.settings.values,
185 conanfile.options.values,
186 direct_reqs,
187 indirect_reqs,
188 default_package_id_mode=default_package_id_mode)
189
190 # Once we are done, call package_id() to narrow and change possible values
191 with conanfile_exception_formatter(str(conanfile), "package_id"):
192 conanfile.package_id()
193
194 info = conanfile.info
195 node.package_id = info.package_id()
196
197 def _handle_private(self, node):
198 if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE, BINARY_SKIP):
199 private_neighbours = node.private_neighbors()
200 for neigh in private_neighbours:
201 # Current closure contains own node to be skipped
202 for n in neigh.public_closure.values():
203 n.binary = BINARY_SKIP
204 self._handle_private(n)
205
206 def evaluate_graph(self, deps_graph, build_mode, update, remote_name):
207 default_package_id_mode = self._cache.config.default_package_id_mode
208 evaluated = deps_graph.evaluated
209 for node in deps_graph.ordered_iterate():
210 self._compute_package_id(node, default_package_id_mode)
211 if node.recipe in (RECIPE_CONSUMER, RECIPE_VIRTUAL):
212 continue
213 self._evaluate_node(node, build_mode, update, evaluated, remote_name)
214 self._handle_private(node)
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/graph/graph_binaries.py b/conans/client/graph/graph_binaries.py
--- a/conans/client/graph/graph_binaries.py
+++ b/conans/client/graph/graph_binaries.py
@@ -121,6 +121,9 @@
remote_info, pref = self._remote_manager.get_package_info(pref, remote)
except NotFoundException:
pass
+ except Exception:
+ conanfile.output.error("Error downloading binary package: '{}'".format(pref))
+ raise
# If the "remote" came from the registry but the user didn't specified the -r, with
# revisions iterate all remotes
| {"golden_diff": "diff --git a/conans/client/graph/graph_binaries.py b/conans/client/graph/graph_binaries.py\n--- a/conans/client/graph/graph_binaries.py\n+++ b/conans/client/graph/graph_binaries.py\n@@ -121,6 +121,9 @@\n remote_info, pref = self._remote_manager.get_package_info(pref, remote)\n except NotFoundException:\n pass\n+ except Exception:\n+ conanfile.output.error(\"Error downloading binary package: '{}'\".format(pref))\n+ raise\n \n # If the \"remote\" came from the registry but the user didn't specified the -r, with\n # revisions iterate all remotes\n", "issue": "Remote lookup in offline mode \nHello,\r\n\r\nFollowing up the small issue that I had during my demo at swampUP:\r\n\r\nI was trying to create a package for which I had every build requirement recipe already exported and thus available.\r\n\r\nI was expecting to get an error:\r\n\r\n>no such package with current options/settings: **build requirement package**\r\n\r\nHowever, it seems that Conan did try to check this package in remotes (with no internet access, it failed). The expected behavior would be for Conan to check the local cache after remotes.\r\n\r\nPassing `--build missing` to `conan create` did not fix the issue.\r\n\r\nUsing Conan v1.3.3\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "import os\n\nfrom conans.client.graph.graph import (BINARY_BUILD, BINARY_CACHE, BINARY_DOWNLOAD, BINARY_MISSING,\n BINARY_SKIP, BINARY_UPDATE,\n RECIPE_EDITABLE, BINARY_EDITABLE,\n RECIPE_CONSUMER, RECIPE_VIRTUAL)\nfrom conans.errors import NoRemoteAvailable, NotFoundException,\\\n conanfile_exception_formatter\nfrom conans.model.info import ConanInfo\nfrom conans.model.manifest import FileTreeManifest\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import is_dirty, rmdir\n\n\nclass GraphBinariesAnalyzer(object):\n\n def __init__(self, cache, output, remote_manager):\n self._cache = cache\n self._out = output\n self._remote_manager = remote_manager\n self._registry = cache.registry\n\n def _check_update(self, upstream_manifest, package_folder, output, node):\n read_manifest = FileTreeManifest.load(package_folder)\n if upstream_manifest != read_manifest:\n if upstream_manifest.time > read_manifest.time:\n output.warn(\"Current package is older than remote upstream one\")\n node.update_manifest = upstream_manifest\n return True\n else:\n output.warn(\"Current package is newer than remote upstream one\")\n\n def _evaluate_node(self, node, build_mode, update, evaluated_nodes, remote_name):\n assert node.binary is None, \"Node.binary should be None\"\n assert node.package_id is not None, \"Node.package_id shouldn't be None\"\n\n ref, conanfile = node.ref, node.conanfile\n pref = PackageReference(ref, node.package_id)\n\n # Check that this same reference hasn't already been checked\n previous_nodes = evaluated_nodes.get(pref)\n if previous_nodes:\n previous_nodes.append(node)\n previous_node = previous_nodes[0]\n node.binary = previous_node.binary\n node.binary_remote = previous_node.binary_remote\n node.prev = previous_node.prev\n return\n evaluated_nodes[pref] = [node]\n\n output = conanfile.output\n\n if node.recipe == RECIPE_EDITABLE:\n node.binary = BINARY_EDITABLE\n # TODO: PREV?\n return\n\n if build_mode.forced(conanfile, ref):\n output.warn('Forced build from source')\n node.binary = BINARY_BUILD\n node.prev = None\n return\n\n package_folder = self._cache.package(pref, short_paths=conanfile.short_paths)\n\n # Check if dirty, to remove it\n with self._cache.package_lock(pref):\n assert node.recipe != RECIPE_EDITABLE, \"Editable package shouldn't reach this code\"\n if is_dirty(package_folder):\n output.warn(\"Package is corrupted, removing folder: %s\" % package_folder)\n rmdir(package_folder) # Do not remove if it is EDITABLE\n\n if self._cache.config.revisions_enabled:\n metadata = self._cache.package_layout(pref.ref).load_metadata()\n rec_rev = metadata.packages[pref.id].recipe_revision\n if rec_rev and rec_rev != node.ref.revision:\n output.warn(\"The package {} doesn't belong \"\n \"to the installed recipe revision, removing folder\".format(pref))\n rmdir(package_folder)\n\n if remote_name:\n remote = self._registry.remotes.get(remote_name)\n else:\n # If the remote_name is not given, follow the binary remote, or\n # the recipe remote\n # If it is defined it won't iterate (might change in conan2.0)\n remote = self._registry.prefs.get(pref) or self._registry.refs.get(ref)\n remotes = self._registry.remotes.list\n\n if os.path.exists(package_folder):\n if update:\n if remote:\n try:\n tmp = self._remote_manager.get_package_manifest(pref, remote)\n upstream_manifest, pref = tmp\n except NotFoundException:\n output.warn(\"Can't update, no package in remote\")\n except NoRemoteAvailable:\n output.warn(\"Can't update, no remote defined\")\n else:\n if self._check_update(upstream_manifest, package_folder, output, node):\n node.binary = BINARY_UPDATE\n node.prev = pref.revision # With revision\n if build_mode.outdated:\n info, pref = self._remote_manager.get_package_info(pref, remote)\n package_hash = info.recipe_hash\n elif remotes:\n pass\n else:\n output.warn(\"Can't update, no remote defined\")\n if not node.binary:\n node.binary = BINARY_CACHE\n metadata = self._cache.package_layout(pref.ref).load_metadata()\n node.prev = metadata.packages[pref.id].revision\n package_hash = ConanInfo.load_from_package(package_folder).recipe_hash\n\n else: # Binary does NOT exist locally\n remote_info = None\n if remote:\n try:\n remote_info, pref = self._remote_manager.get_package_info(pref, remote)\n except NotFoundException:\n pass\n\n # If the \"remote\" came from the registry but the user didn't specified the -r, with\n # revisions iterate all remotes\n if not remote or (not remote_info and self._cache.config.revisions_enabled\n and not remote_name):\n for r in remotes:\n try:\n remote_info, pref = self._remote_manager.get_package_info(pref, r)\n except NotFoundException:\n pass\n else:\n if remote_info:\n remote = r\n break\n\n if remote_info:\n node.binary = BINARY_DOWNLOAD\n node.prev = pref.revision\n package_hash = remote_info.recipe_hash\n else:\n if build_mode.allowed(conanfile):\n node.binary = BINARY_BUILD\n else:\n node.binary = BINARY_MISSING\n node.prev = None\n\n if build_mode.outdated:\n if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):\n local_recipe_hash = self._cache.package_layout(ref).recipe_manifest().summary_hash\n if local_recipe_hash != package_hash:\n output.info(\"Outdated package!\")\n node.binary = BINARY_BUILD\n node.prev = None\n else:\n output.info(\"Package is up to date\")\n\n node.binary_remote = remote\n\n @staticmethod\n def _compute_package_id(node, default_package_id_mode):\n conanfile = node.conanfile\n neighbors = node.neighbors()\n direct_reqs = [] # of PackageReference\n indirect_reqs = set() # of PackageReference, avoid duplicates\n for neighbor in neighbors:\n ref, nconan = neighbor.ref, neighbor.conanfile\n direct_reqs.append(neighbor.pref)\n indirect_reqs.update(nconan.info.requires.refs())\n conanfile.options.propagate_downstream(ref, nconan.info.full_options)\n # Might be never used, but update original requirement, just in case\n conanfile.requires[ref.name].ref = ref\n\n # Make sure not duplicated\n indirect_reqs.difference_update(direct_reqs)\n # There might be options that are not upstream, backup them, might be\n # for build-requires\n conanfile.build_requires_options = conanfile.options.values\n conanfile.options.clear_unused(indirect_reqs.union(direct_reqs))\n conanfile.options.freeze()\n\n conanfile.info = ConanInfo.create(conanfile.settings.values,\n conanfile.options.values,\n direct_reqs,\n indirect_reqs,\n default_package_id_mode=default_package_id_mode)\n\n # Once we are done, call package_id() to narrow and change possible values\n with conanfile_exception_formatter(str(conanfile), \"package_id\"):\n conanfile.package_id()\n\n info = conanfile.info\n node.package_id = info.package_id()\n\n def _handle_private(self, node):\n if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE, BINARY_SKIP):\n private_neighbours = node.private_neighbors()\n for neigh in private_neighbours:\n # Current closure contains own node to be skipped\n for n in neigh.public_closure.values():\n n.binary = BINARY_SKIP\n self._handle_private(n)\n\n def evaluate_graph(self, deps_graph, build_mode, update, remote_name):\n default_package_id_mode = self._cache.config.default_package_id_mode\n evaluated = deps_graph.evaluated\n for node in deps_graph.ordered_iterate():\n self._compute_package_id(node, default_package_id_mode)\n if node.recipe in (RECIPE_CONSUMER, RECIPE_VIRTUAL):\n continue\n self._evaluate_node(node, build_mode, update, evaluated, remote_name)\n self._handle_private(node)\n", "path": "conans/client/graph/graph_binaries.py"}], "after_files": [{"content": "import os\n\nfrom conans.client.graph.graph import (BINARY_BUILD, BINARY_CACHE, BINARY_DOWNLOAD, BINARY_MISSING,\n BINARY_SKIP, BINARY_UPDATE,\n RECIPE_EDITABLE, BINARY_EDITABLE,\n RECIPE_CONSUMER, RECIPE_VIRTUAL)\nfrom conans.errors import NoRemoteAvailable, NotFoundException,\\\n conanfile_exception_formatter\nfrom conans.model.info import ConanInfo\nfrom conans.model.manifest import FileTreeManifest\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import is_dirty, rmdir\n\n\nclass GraphBinariesAnalyzer(object):\n\n def __init__(self, cache, output, remote_manager):\n self._cache = cache\n self._out = output\n self._remote_manager = remote_manager\n self._registry = cache.registry\n\n def _check_update(self, upstream_manifest, package_folder, output, node):\n read_manifest = FileTreeManifest.load(package_folder)\n if upstream_manifest != read_manifest:\n if upstream_manifest.time > read_manifest.time:\n output.warn(\"Current package is older than remote upstream one\")\n node.update_manifest = upstream_manifest\n return True\n else:\n output.warn(\"Current package is newer than remote upstream one\")\n\n def _evaluate_node(self, node, build_mode, update, evaluated_nodes, remote_name):\n assert node.binary is None, \"Node.binary should be None\"\n assert node.package_id is not None, \"Node.package_id shouldn't be None\"\n\n ref, conanfile = node.ref, node.conanfile\n pref = PackageReference(ref, node.package_id)\n\n # Check that this same reference hasn't already been checked\n previous_nodes = evaluated_nodes.get(pref)\n if previous_nodes:\n previous_nodes.append(node)\n previous_node = previous_nodes[0]\n node.binary = previous_node.binary\n node.binary_remote = previous_node.binary_remote\n node.prev = previous_node.prev\n return\n evaluated_nodes[pref] = [node]\n\n output = conanfile.output\n\n if node.recipe == RECIPE_EDITABLE:\n node.binary = BINARY_EDITABLE\n # TODO: PREV?\n return\n\n if build_mode.forced(conanfile, ref):\n output.warn('Forced build from source')\n node.binary = BINARY_BUILD\n node.prev = None\n return\n\n package_folder = self._cache.package(pref, short_paths=conanfile.short_paths)\n\n # Check if dirty, to remove it\n with self._cache.package_lock(pref):\n assert node.recipe != RECIPE_EDITABLE, \"Editable package shouldn't reach this code\"\n if is_dirty(package_folder):\n output.warn(\"Package is corrupted, removing folder: %s\" % package_folder)\n rmdir(package_folder) # Do not remove if it is EDITABLE\n\n if self._cache.config.revisions_enabled:\n metadata = self._cache.package_layout(pref.ref).load_metadata()\n rec_rev = metadata.packages[pref.id].recipe_revision\n if rec_rev and rec_rev != node.ref.revision:\n output.warn(\"The package {} doesn't belong \"\n \"to the installed recipe revision, removing folder\".format(pref))\n rmdir(package_folder)\n\n if remote_name:\n remote = self._registry.remotes.get(remote_name)\n else:\n # If the remote_name is not given, follow the binary remote, or\n # the recipe remote\n # If it is defined it won't iterate (might change in conan2.0)\n remote = self._registry.prefs.get(pref) or self._registry.refs.get(ref)\n remotes = self._registry.remotes.list\n\n if os.path.exists(package_folder):\n if update:\n if remote:\n try:\n tmp = self._remote_manager.get_package_manifest(pref, remote)\n upstream_manifest, pref = tmp\n except NotFoundException:\n output.warn(\"Can't update, no package in remote\")\n except NoRemoteAvailable:\n output.warn(\"Can't update, no remote defined\")\n else:\n if self._check_update(upstream_manifest, package_folder, output, node):\n node.binary = BINARY_UPDATE\n node.prev = pref.revision # With revision\n if build_mode.outdated:\n info, pref = self._remote_manager.get_package_info(pref, remote)\n package_hash = info.recipe_hash\n elif remotes:\n pass\n else:\n output.warn(\"Can't update, no remote defined\")\n if not node.binary:\n node.binary = BINARY_CACHE\n metadata = self._cache.package_layout(pref.ref).load_metadata()\n node.prev = metadata.packages[pref.id].revision\n package_hash = ConanInfo.load_from_package(package_folder).recipe_hash\n\n else: # Binary does NOT exist locally\n remote_info = None\n if remote:\n try:\n remote_info, pref = self._remote_manager.get_package_info(pref, remote)\n except NotFoundException:\n pass\n except Exception:\n conanfile.output.error(\"Error downloading binary package: '{}'\".format(pref))\n raise\n\n # If the \"remote\" came from the registry but the user didn't specified the -r, with\n # revisions iterate all remotes\n if not remote or (not remote_info and self._cache.config.revisions_enabled\n and not remote_name):\n for r in remotes:\n try:\n remote_info, pref = self._remote_manager.get_package_info(pref, r)\n except NotFoundException:\n pass\n else:\n if remote_info:\n remote = r\n break\n\n if remote_info:\n node.binary = BINARY_DOWNLOAD\n node.prev = pref.revision\n package_hash = remote_info.recipe_hash\n else:\n if build_mode.allowed(conanfile):\n node.binary = BINARY_BUILD\n else:\n node.binary = BINARY_MISSING\n node.prev = None\n\n if build_mode.outdated:\n if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE):\n local_recipe_hash = self._cache.package_layout(ref).recipe_manifest().summary_hash\n if local_recipe_hash != package_hash:\n output.info(\"Outdated package!\")\n node.binary = BINARY_BUILD\n node.prev = None\n else:\n output.info(\"Package is up to date\")\n\n node.binary_remote = remote\n\n @staticmethod\n def _compute_package_id(node, default_package_id_mode):\n conanfile = node.conanfile\n neighbors = node.neighbors()\n direct_reqs = [] # of PackageReference\n indirect_reqs = set() # of PackageReference, avoid duplicates\n for neighbor in neighbors:\n ref, nconan = neighbor.ref, neighbor.conanfile\n direct_reqs.append(neighbor.pref)\n indirect_reqs.update(nconan.info.requires.refs())\n conanfile.options.propagate_downstream(ref, nconan.info.full_options)\n # Might be never used, but update original requirement, just in case\n conanfile.requires[ref.name].ref = ref\n\n # Make sure not duplicated\n indirect_reqs.difference_update(direct_reqs)\n # There might be options that are not upstream, backup them, might be\n # for build-requires\n conanfile.build_requires_options = conanfile.options.values\n conanfile.options.clear_unused(indirect_reqs.union(direct_reqs))\n conanfile.options.freeze()\n\n conanfile.info = ConanInfo.create(conanfile.settings.values,\n conanfile.options.values,\n direct_reqs,\n indirect_reqs,\n default_package_id_mode=default_package_id_mode)\n\n # Once we are done, call package_id() to narrow and change possible values\n with conanfile_exception_formatter(str(conanfile), \"package_id\"):\n conanfile.package_id()\n\n info = conanfile.info\n node.package_id = info.package_id()\n\n def _handle_private(self, node):\n if node.binary in (BINARY_CACHE, BINARY_DOWNLOAD, BINARY_UPDATE, BINARY_SKIP):\n private_neighbours = node.private_neighbors()\n for neigh in private_neighbours:\n # Current closure contains own node to be skipped\n for n in neigh.public_closure.values():\n n.binary = BINARY_SKIP\n self._handle_private(n)\n\n def evaluate_graph(self, deps_graph, build_mode, update, remote_name):\n default_package_id_mode = self._cache.config.default_package_id_mode\n evaluated = deps_graph.evaluated\n for node in deps_graph.ordered_iterate():\n self._compute_package_id(node, default_package_id_mode)\n if node.recipe in (RECIPE_CONSUMER, RECIPE_VIRTUAL):\n continue\n self._evaluate_node(node, build_mode, update, evaluated, remote_name)\n self._handle_private(node)\n", "path": "conans/client/graph/graph_binaries.py"}]} | 2,877 | 140 |
gh_patches_debug_28800 | rasdani/github-patches | git_diff | quantumlib__Cirq-1674 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve error message if on_each gets a list
When you do `cirq.H.on_each([q0, q1])` instead of the unpacked version `cirq.H.on_each(q0, q1)` for some qubits `q0` and `q1`, the error message you get is **Gate was called with type different than Qid**.
Maybe either flatten (because most of the time you'll have your qubits in a list or a list of lists), or give a more descriptive error message.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/ops/gate_features.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Marker classes for indicating which additional features gates support.
16
17 For example: some gates are reversible, some have known matrices, etc.
18 """
19
20 import abc
21
22 from cirq.ops import op_tree, raw_types
23
24
25 class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
26 """Indicates operations should be equal under some qubit permutations."""
27
28 def qubit_index_to_equivalence_group_key(self, index: int) -> int:
29 """Returns a key that differs between non-interchangeable qubits."""
30 return 0
31
32
33 class SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
34 """A gate that must be applied to exactly one qubit."""
35 def num_qubits(self) -> int:
36 return 1
37
38 def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:
39 """Returns a list of operations apply this gate to each of the targets.
40
41 Args:
42 *targets: The qubits to apply this gate to.
43
44 Returns:
45 Operations applying this gate to the target qubits.
46
47 Raises:
48 ValueError if targets are not instances of Qid.
49 """
50 return [self.on(target) for target in targets]
51
52
53 class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
54 """A gate that must be applied to exactly two qubits."""
55 def num_qubits(self) -> int:
56 return 2
57
58
59 class ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
60 """A gate that must be applied to exactly three qubits."""
61 def num_qubits(self) -> int:
62 return 3
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/ops/gate_features.py b/cirq/ops/gate_features.py
--- a/cirq/ops/gate_features.py
+++ b/cirq/ops/gate_features.py
@@ -18,8 +18,10 @@
"""
import abc
+import collections
+from typing import Union, Iterable, Any, List
-from cirq.ops import op_tree, raw_types
+from cirq.ops import raw_types
class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
@@ -35,7 +37,8 @@
def num_qubits(self) -> int:
return 1
- def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:
+ def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]
+ ) -> List[raw_types.Operation]:
"""Returns a list of operations apply this gate to each of the targets.
Args:
@@ -45,9 +48,20 @@
Operations applying this gate to the target qubits.
Raises:
- ValueError if targets are not instances of Qid.
+ ValueError if targets are not instances of Qid or List[Qid].
"""
- return [self.on(target) for target in targets]
+ operations = [] # type: List[raw_types.Operation]
+ for target in targets:
+ if isinstance(target,
+ collections.Iterable) and not isinstance(target, str):
+ operations.extend(self.on_each(*target))
+ elif isinstance(target, raw_types.Qid):
+ operations.append(self.on(target))
+ else:
+ raise ValueError(
+ 'Gate was called with type different than Qid. Type: {}'.
+ format(type(target)))
+ return operations
class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
| {"golden_diff": "diff --git a/cirq/ops/gate_features.py b/cirq/ops/gate_features.py\n--- a/cirq/ops/gate_features.py\n+++ b/cirq/ops/gate_features.py\n@@ -18,8 +18,10 @@\n \"\"\"\n \n import abc\n+import collections\n+from typing import Union, Iterable, Any, List\n \n-from cirq.ops import op_tree, raw_types\n+from cirq.ops import raw_types\n \n \n class InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n@@ -35,7 +37,8 @@\n def num_qubits(self) -> int:\n return 1\n \n- def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:\n+ def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]\n+ ) -> List[raw_types.Operation]:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n \n Args:\n@@ -45,9 +48,20 @@\n Operations applying this gate to the target qubits.\n \n Raises:\n- ValueError if targets are not instances of Qid.\n+ ValueError if targets are not instances of Qid or List[Qid].\n \"\"\"\n- return [self.on(target) for target in targets]\n+ operations = [] # type: List[raw_types.Operation]\n+ for target in targets:\n+ if isinstance(target,\n+ collections.Iterable) and not isinstance(target, str):\n+ operations.extend(self.on_each(*target))\n+ elif isinstance(target, raw_types.Qid):\n+ operations.append(self.on(target))\n+ else:\n+ raise ValueError(\n+ 'Gate was called with type different than Qid. Type: {}'.\n+ format(type(target)))\n+ return operations\n \n \n class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n", "issue": "Improve error message if on_each gets a list\nWhen you do `cirq.H.on_each([q0, q1])` instead of the unpacked version `cirq.H.on_each(q0, q1)` for some qubits `q0` and `q1`, the error message you get is **Gate was called with type different than Qid**. \r\n\r\nMaybe either flatten (because most of the time you'll have your qubits in a list or a list of lists), or give a more descriptive error message.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Marker classes for indicating which additional features gates support.\n\nFor example: some gates are reversible, some have known matrices, etc.\n\"\"\"\n\nimport abc\n\nfrom cirq.ops import op_tree, raw_types\n\n\nclass InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n \"\"\"Indicates operations should be equal under some qubit permutations.\"\"\"\n\n def qubit_index_to_equivalence_group_key(self, index: int) -> int:\n \"\"\"Returns a key that differs between non-interchangeable qubits.\"\"\"\n return 0\n\n\nclass SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly one qubit.\"\"\"\n def num_qubits(self) -> int:\n return 1\n\n def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n\n Args:\n *targets: The qubits to apply this gate to.\n\n Returns:\n Operations applying this gate to the target qubits.\n\n Raises:\n ValueError if targets are not instances of Qid.\n \"\"\"\n return [self.on(target) for target in targets]\n\n\nclass TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly two qubits.\"\"\"\n def num_qubits(self) -> int:\n return 2\n\n\nclass ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly three qubits.\"\"\"\n def num_qubits(self) -> int:\n return 3\n", "path": "cirq/ops/gate_features.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Marker classes for indicating which additional features gates support.\n\nFor example: some gates are reversible, some have known matrices, etc.\n\"\"\"\n\nimport abc\nimport collections\nfrom typing import Union, Iterable, Any, List\n\nfrom cirq.ops import raw_types\n\n\nclass InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n \"\"\"Indicates operations should be equal under some qubit permutations.\"\"\"\n\n def qubit_index_to_equivalence_group_key(self, index: int) -> int:\n \"\"\"Returns a key that differs between non-interchangeable qubits.\"\"\"\n return 0\n\n\nclass SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly one qubit.\"\"\"\n def num_qubits(self) -> int:\n return 1\n\n def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]\n ) -> List[raw_types.Operation]:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n\n Args:\n *targets: The qubits to apply this gate to.\n\n Returns:\n Operations applying this gate to the target qubits.\n\n Raises:\n ValueError if targets are not instances of Qid or List[Qid].\n \"\"\"\n operations = [] # type: List[raw_types.Operation]\n for target in targets:\n if isinstance(target,\n collections.Iterable) and not isinstance(target, str):\n operations.extend(self.on_each(*target))\n elif isinstance(target, raw_types.Qid):\n operations.append(self.on(target))\n else:\n raise ValueError(\n 'Gate was called with type different than Qid. Type: {}'.\n format(type(target)))\n return operations\n\n\nclass TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly two qubits.\"\"\"\n def num_qubits(self) -> int:\n return 2\n\n\nclass ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly three qubits.\"\"\"\n def num_qubits(self) -> int:\n return 3\n", "path": "cirq/ops/gate_features.py"}]} | 984 | 411 |
gh_patches_debug_39718 | rasdani/github-patches | git_diff | prowler-cloud__prowler-2291 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: backup_plans_exist and backup_reportplans_exist trigger in regions where not backups exist
### Steps to Reproduce
The mentioned checks are triggered even if no backups are present or configured.
### Expected behavior
When the check can't find a resource ID (it actually says "No Backups"), the check shouldn't trigger
### Actual Result with Screenshots or Logs

### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
Workstation
### OS used
WSL2 under Windows 11
### Prowler version
Prowler 3.4.0 (it is the latest version, yay!)
### Pip version
pip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)
### Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py`
Content:
```
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_plans_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Plan Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_plans:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
17 report.resource_arn = backup_client.backup_plans[0].arn
18 report.resource_id = backup_client.backup_plans[0].name
19 report.region = backup_client.backup_plans[0].region
20
21 findings.append(report)
22 return findings
23
```
Path: `prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py`
Content:
```
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_reportplans_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Report Plan Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_report_plans:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
17 report.resource_arn = backup_client.backup_report_plans[0].arn
18 report.resource_id = backup_client.backup_report_plans[0].name
19 report.region = backup_client.backup_report_plans[0].region
20
21 findings.append(report)
22 return findings
23
```
Path: `prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py`
Content:
```
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_vaults_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Vault Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_vaults:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup vault exists: { backup_client.backup_vaults[0].name}"
17 report.resource_arn = backup_client.backup_vaults[0].arn
18 report.resource_id = backup_client.backup_vaults[0].name
19 report.region = backup_client.backup_vaults[0].region
20
21 findings.append(report)
22 return findings
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
--- a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
@@ -9,11 +9,13 @@
report.status = "FAIL"
report.status_extended = "No Backup Plan Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_plans:
report.status = "PASS"
- report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
+ report.status_extended = (
+ f"At least one backup plan exists: {backup_client.backup_plans[0].name}"
+ )
report.resource_arn = backup_client.backup_plans[0].arn
report.resource_id = backup_client.backup_plans[0].name
report.region = backup_client.backup_plans[0].region
diff --git a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
--- a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
@@ -5,18 +5,20 @@
class backup_reportplans_exist(Check):
def execute(self):
findings = []
- report = Check_Report_AWS(self.metadata())
- report.status = "FAIL"
- report.status_extended = "No Backup Report Plan Exist"
- report.resource_arn = ""
- report.resource_id = "No Backups"
- report.region = backup_client.region
- if backup_client.backup_report_plans:
- report.status = "PASS"
- report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
- report.resource_arn = backup_client.backup_report_plans[0].arn
- report.resource_id = backup_client.backup_report_plans[0].name
- report.region = backup_client.backup_report_plans[0].region
+ # We only check report plans if backup plans exist, reducing noise
+ if backup_client.backup_plans:
+ report = Check_Report_AWS(self.metadata())
+ report.status = "FAIL"
+ report.status_extended = "No Backup Report Plan Exist"
+ report.resource_arn = ""
+ report.resource_id = "Backups"
+ report.region = backup_client.region
+ if backup_client.backup_report_plans:
+ report.status = "PASS"
+ report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
+ report.resource_arn = backup_client.backup_report_plans[0].arn
+ report.resource_id = backup_client.backup_report_plans[0].name
+ report.region = backup_client.backup_report_plans[0].region
- findings.append(report)
+ findings.append(report)
return findings
diff --git a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
--- a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
+++ b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
@@ -9,7 +9,7 @@
report.status = "FAIL"
report.status_extended = "No Backup Vault Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_vaults:
report.status = "PASS"
| {"golden_diff": "diff --git a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n--- a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n@@ -9,11 +9,13 @@\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n+ report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n- report.status_extended = f\"At least one backup plan exists: { backup_client.backup_plans[0].name}\"\n+ report.status_extended = (\n+ f\"At least one backup plan exists: {backup_client.backup_plans[0].name}\"\n+ )\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\ndiff --git a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n--- a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n@@ -5,18 +5,20 @@\n class backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n- report = Check_Report_AWS(self.metadata())\n- report.status = \"FAIL\"\n- report.status_extended = \"No Backup Report Plan Exist\"\n- report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n- report.region = backup_client.region\n- if backup_client.backup_report_plans:\n- report.status = \"PASS\"\n- report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n- report.resource_arn = backup_client.backup_report_plans[0].arn\n- report.resource_id = backup_client.backup_report_plans[0].name\n- report.region = backup_client.backup_report_plans[0].region\n+ # We only check report plans if backup plans exist, reducing noise\n+ if backup_client.backup_plans:\n+ report = Check_Report_AWS(self.metadata())\n+ report.status = \"FAIL\"\n+ report.status_extended = \"No Backup Report Plan Exist\"\n+ report.resource_arn = \"\"\n+ report.resource_id = \"Backups\"\n+ report.region = backup_client.region\n+ if backup_client.backup_report_plans:\n+ report.status = \"PASS\"\n+ report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n+ report.resource_arn = backup_client.backup_report_plans[0].arn\n+ report.resource_id = backup_client.backup_report_plans[0].name\n+ report.region = backup_client.backup_report_plans[0].region\n \n- findings.append(report)\n+ findings.append(report)\n return findings\ndiff --git a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n--- a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n@@ -9,7 +9,7 @@\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n+ report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n", "issue": "[Bug]: backup_plans_exist and backup_reportplans_exist trigger in regions where not backups exist\n### Steps to Reproduce\n\nThe mentioned checks are triggered even if no backups are present or configured.\n\n### Expected behavior\n\nWhen the check can't find a resource ID (it actually says \"No Backups\"), the check shouldn't trigger\n\n### Actual Result with Screenshots or Logs\n\n\r\n\n\n### How did you install Prowler?\n\nFrom pip package (pip install prowler)\n\n### Environment Resource\n\nWorkstation\n\n### OS used\n\nWSL2 under Windows 11\n\n### Prowler version\n\nProwler 3.4.0 (it is the latest version, yay!)\n\n### Pip version\n\npip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)\n\n### Context\n\n_No response_\n", "before_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_plans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup plan exists: { backup_client.backup_plans[0].name}\"\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Report Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_report_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n report.resource_arn = backup_client.backup_report_plans[0].arn\n report.resource_id = backup_client.backup_report_plans[0].name\n report.region = backup_client.backup_report_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_vaults_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup vault exists: { backup_client.backup_vaults[0].name}\"\n report.resource_arn = backup_client.backup_vaults[0].arn\n report.resource_id = backup_client.backup_vaults[0].name\n report.region = backup_client.backup_vaults[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py"}], "after_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_plans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n report.status_extended = (\n f\"At least one backup plan exists: {backup_client.backup_plans[0].name}\"\n )\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n # We only check report plans if backup plans exist, reducing noise\n if backup_client.backup_plans:\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Report Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_report_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n report.resource_arn = backup_client.backup_report_plans[0].arn\n report.resource_id = backup_client.backup_report_plans[0].name\n report.region = backup_client.backup_report_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_vaults_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup vault exists: { backup_client.backup_vaults[0].name}\"\n report.resource_arn = backup_client.backup_vaults[0].arn\n report.resource_id = backup_client.backup_vaults[0].name\n report.region = backup_client.backup_vaults[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py"}]} | 1,274 | 915 |
gh_patches_debug_19093 | rasdani/github-patches | git_diff | weecology__retriever-287 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
download command should probably fail when specified path does not exist
A datsaet can be downloaded to a specific path with the function `download` while specifying the -p argument. For example `retriever download MCDB -p my_path` will download each of the MCDB files and then copy them to the directory my_path but if my_path does not exist a file called my_path is created and the files overwrite that file as each is copied from the download directory. It may be best if the retriever fails with a warning that the path does not exist.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `engines/download_only.py`
Content:
```
1 import os
2 import platform
3 import shutil
4 import inspect
5 from retriever.lib.engine import filename_from_url
6 from retriever.lib.models import Engine, no_cleanup
7 from retriever import DATA_DIR, HOME_DIR
8
9 class DummyConnection:
10 def cursor(self):
11 pass
12 def commit(self):
13 pass
14 def rollback(self):
15 pass
16 def close(self):
17 pass
18
19 class DummyCursor(DummyConnection):
20 pass
21
22
23 class engine(Engine):
24 """Engine instance for writing data to a CSV file."""
25 name = "Download Only"
26 abbreviation = "download"
27 required_opts = [("path",
28 "File path to copy data files",
29 "./"),
30 ]
31
32 def table_exists(self, dbname, tablename):
33 try:
34 tablename = self.table_name(name=tablename, dbname=dbname)
35 return os.path.exists(tablename)
36 except:
37 return False
38
39 def get_connection(self):
40 """Gets the db connection."""
41 self.get_input()
42 return DummyConnection()
43
44 def final_cleanup(self):
45 data_dir = self.format_data_dir()
46 if hasattr(self, "all_files"):
47 for file_name in self.all_files:
48 file_path, file_name_nopath = os.path.split(file_name)
49 if file_path == DATA_DIR:
50 print ("%s is already in the working directory" % file_name_nopath)
51 print("Keeping existing copy.")
52 else:
53 print("Copying %s from %s" % (file_name_nopath, file_path))
54 shutil.copy(file_name, self.opts['path'])
55 self.all_files = set()
56
57 def auto_create_table(self, table, url=None, filename=None, pk=None):
58 if url and not filename:
59 filename = filename_from_url(url)
60
61 if url and not self.find_file(filename):
62 # If the file doesn't exist, download it
63 self.download_file(url, filename)
64
65 def insert_data_from_url(self, url):
66 filename = filename_from_url(url)
67 find = self.find_file(filename)
68 if not find:
69 self.create_raw_data_dir()
70 self.download_file(url, filename)
71
72 def find_file(self, filename):
73 result = Engine.find_file(self, filename)
74 if not hasattr(self, "all_files"): self.all_files = set()
75 if result: self.all_files.add(result)
76 return result
77
78 def register_files(self, filenames):
79 """Identify a list of files to be moved by the download
80
81 When downloading archives with multiple files the engine needs to be
82 informed of all of the file names so that it can move them.
83
84 """
85 full_filenames = {self.find_file(filename) for filename in filenames}
86 self.all_files = self.all_files.union(full_filenames)
87
88
89 # replace all other methods with a function that does nothing
90 def dummy_method(self, *args, **kwargs):
91 pass
92 methods = inspect.getmembers(engine, predicate=inspect.ismethod)
93 keep_methods = {'table_exists',
94 'get_connection',
95 'final_cleanup',
96 'auto_create_table',
97 'insert_data_from_url',
98 }
99 remove_methods = ['insert_data_from_file']
100 for name, method in methods:
101 if (not name in keep_methods
102 and not 'download' in name
103 and not 'file' in name
104 and not 'dir' in name):
105
106 setattr(engine, name, dummy_method)
107 for name in remove_methods:
108 setattr(engine, name, dummy_method)
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/engines/download_only.py b/engines/download_only.py
--- a/engines/download_only.py
+++ b/engines/download_only.py
@@ -51,7 +51,18 @@
print("Keeping existing copy.")
else:
print("Copying %s from %s" % (file_name_nopath, file_path))
- shutil.copy(file_name, self.opts['path'])
+ if os.path.isdir(self.opts['path']):
+ try:
+ shutil.copy(file_name, self.opts['path'])
+ except:
+ print("Couldn't copy file to %s" % self.opts['path'])
+ else:
+ try:
+ print("Creating directory %s" % self.opts['path'])
+ os.mkdir(self.opts['path'])
+ shutil.copy(file_name, self.opts['path'])
+ except:
+ print("Couldn't create directory %s" % self.opts['path'])
self.all_files = set()
def auto_create_table(self, table, url=None, filename=None, pk=None):
| {"golden_diff": "diff --git a/engines/download_only.py b/engines/download_only.py\n--- a/engines/download_only.py\n+++ b/engines/download_only.py\n@@ -51,7 +51,18 @@\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n- shutil.copy(file_name, self.opts['path'])\n+ if os.path.isdir(self.opts['path']):\n+ try:\n+ shutil.copy(file_name, self.opts['path'])\n+ except:\n+ print(\"Couldn't copy file to %s\" % self.opts['path'])\n+ else:\n+ try:\n+ print(\"Creating directory %s\" % self.opts['path'])\n+ os.mkdir(self.opts['path'])\n+ shutil.copy(file_name, self.opts['path'])\n+ except:\n+ print(\"Couldn't create directory %s\" % self.opts['path'])\n self.all_files = set()\n \n def auto_create_table(self, table, url=None, filename=None, pk=None):\n", "issue": "download command should probably fail when specified path does not exist\nA datsaet can be downloaded to a specific path with the function `download` while specifying the -p argument. For example `retriever download MCDB -p my_path` will download each of the MCDB files and then copy them to the directory my_path but if my_path does not exist a file called my_path is created and the files overwrite that file as each is copied from the download directory. It may be best if the retriever fails with a warning that the path does not exist. \n\n", "before_files": [{"content": "import os\nimport platform\nimport shutil\nimport inspect\nfrom retriever.lib.engine import filename_from_url\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import DATA_DIR, HOME_DIR\n\nclass DummyConnection:\n def cursor(self):\n pass\n def commit(self):\n pass\n def rollback(self):\n pass\n def close(self):\n pass\n\nclass DummyCursor(DummyConnection):\n pass\n\n\nclass engine(Engine):\n \"\"\"Engine instance for writing data to a CSV file.\"\"\"\n name = \"Download Only\"\n abbreviation = \"download\"\n required_opts = [(\"path\",\n \"File path to copy data files\",\n \"./\"),\n ]\n\n def table_exists(self, dbname, tablename):\n try:\n tablename = self.table_name(name=tablename, dbname=dbname)\n return os.path.exists(tablename)\n except:\n return False\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n self.get_input()\n return DummyConnection()\n\n def final_cleanup(self):\n data_dir = self.format_data_dir()\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n if file_path == DATA_DIR:\n print (\"%s is already in the working directory\" % file_name_nopath)\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n shutil.copy(file_name, self.opts['path'])\n self.all_files = set()\n\n def auto_create_table(self, table, url=None, filename=None, pk=None):\n if url and not filename:\n filename = filename_from_url(url)\n\n if url and not self.find_file(filename):\n # If the file doesn't exist, download it\n self.download_file(url, filename)\n\n def insert_data_from_url(self, url):\n filename = filename_from_url(url)\n find = self.find_file(filename)\n if not find:\n self.create_raw_data_dir()\n self.download_file(url, filename)\n\n def find_file(self, filename):\n result = Engine.find_file(self, filename)\n if not hasattr(self, \"all_files\"): self.all_files = set()\n if result: self.all_files.add(result)\n return result\n\n def register_files(self, filenames):\n \"\"\"Identify a list of files to be moved by the download\n\n When downloading archives with multiple files the engine needs to be\n informed of all of the file names so that it can move them.\n\n \"\"\"\n full_filenames = {self.find_file(filename) for filename in filenames}\n self.all_files = self.all_files.union(full_filenames)\n\n\n# replace all other methods with a function that does nothing\ndef dummy_method(self, *args, **kwargs):\n pass\nmethods = inspect.getmembers(engine, predicate=inspect.ismethod)\nkeep_methods = {'table_exists',\n 'get_connection',\n 'final_cleanup',\n 'auto_create_table',\n 'insert_data_from_url',\n }\nremove_methods = ['insert_data_from_file']\nfor name, method in methods:\n if (not name in keep_methods\n and not 'download' in name\n and not 'file' in name\n and not 'dir' in name):\n\n setattr(engine, name, dummy_method)\nfor name in remove_methods:\n setattr(engine, name, dummy_method)\n", "path": "engines/download_only.py"}], "after_files": [{"content": "import os\nimport platform\nimport shutil\nimport inspect\nfrom retriever.lib.engine import filename_from_url\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import DATA_DIR, HOME_DIR\n\nclass DummyConnection:\n def cursor(self):\n pass\n def commit(self):\n pass\n def rollback(self):\n pass\n def close(self):\n pass\n\nclass DummyCursor(DummyConnection):\n pass\n\n\nclass engine(Engine):\n \"\"\"Engine instance for writing data to a CSV file.\"\"\"\n name = \"Download Only\"\n abbreviation = \"download\"\n required_opts = [(\"path\",\n \"File path to copy data files\",\n \"./\"),\n ]\n\n def table_exists(self, dbname, tablename):\n try:\n tablename = self.table_name(name=tablename, dbname=dbname)\n return os.path.exists(tablename)\n except:\n return False\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n self.get_input()\n return DummyConnection()\n\n def final_cleanup(self):\n data_dir = self.format_data_dir()\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n if file_path == DATA_DIR:\n print (\"%s is already in the working directory\" % file_name_nopath)\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n if os.path.isdir(self.opts['path']):\n try:\n shutil.copy(file_name, self.opts['path'])\n except:\n print(\"Couldn't copy file to %s\" % self.opts['path'])\n else:\n try:\n print(\"Creating directory %s\" % self.opts['path'])\n os.mkdir(self.opts['path'])\n shutil.copy(file_name, self.opts['path'])\n except:\n print(\"Couldn't create directory %s\" % self.opts['path'])\n self.all_files = set()\n\n def auto_create_table(self, table, url=None, filename=None, pk=None):\n if url and not filename:\n filename = filename_from_url(url)\n\n if url and not self.find_file(filename):\n # If the file doesn't exist, download it\n self.download_file(url, filename)\n\n def insert_data_from_url(self, url):\n filename = filename_from_url(url)\n find = self.find_file(filename)\n if not find:\n self.create_raw_data_dir()\n self.download_file(url, filename)\n\n def find_file(self, filename):\n result = Engine.find_file(self, filename)\n if not hasattr(self, \"all_files\"): self.all_files = set()\n if result: self.all_files.add(result)\n return result\n\n def register_files(self, filenames):\n \"\"\"Identify a list of files to be moved by the download\n\n When downloading archives with multiple files the engine needs to be\n informed of all of the file names so that it can move them.\n\n \"\"\"\n full_filenames = {self.find_file(filename) for filename in filenames}\n self.all_files = self.all_files.union(full_filenames)\n\n\n# replace all other methods with a function that does nothing\ndef dummy_method(self, *args, **kwargs):\n pass\nmethods = inspect.getmembers(engine, predicate=inspect.ismethod)\nkeep_methods = {'table_exists',\n 'get_connection',\n 'final_cleanup',\n 'auto_create_table',\n 'insert_data_from_url',\n }\nremove_methods = ['insert_data_from_file']\nfor name, method in methods:\n if (not name in keep_methods\n and not 'download' in name\n and not 'file' in name\n and not 'dir' in name):\n\n setattr(engine, name, dummy_method)\nfor name in remove_methods:\n setattr(engine, name, dummy_method)\n", "path": "engines/download_only.py"}]} | 1,322 | 231 |
gh_patches_debug_12065 | rasdani/github-patches | git_diff | tinygrad__tinygrad-65 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EOFError: Ran out of input
When running example and solving "Can't import fetch from utils" issue, this one comes up:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tinygrad/utils.py`
Content:
```
1 import numpy as np
2
3 def mask_like(like, mask_inx, mask_value = 1.0):
4 mask = np.zeros_like(like).reshape(-1)
5 mask[mask_inx] = mask_value
6 return mask.reshape(like.shape)
7
8 def layer_init_uniform(*x):
9 ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))
10 return ret.astype(np.float32)
11
12 def fetch(url):
13 import requests, os, hashlib, tempfile
14 fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest())
15 if os.path.isfile(fp):
16 with open(fp, "rb") as f:
17 dat = f.read()
18 else:
19 print("fetching %s" % url)
20 with open(fp+".tmp", "wb") as f:
21 dat = requests.get(url).content
22 f.write(dat)
23 os.rename(fp+".tmp", fp)
24 return dat
25
26 def fetch_mnist():
27 import gzip
28 parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()
29 X_train = parse(fetch("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"))[0x10:].reshape((-1, 28, 28))
30 Y_train = parse(fetch("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"))[8:]
31 X_test = parse(fetch("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz"))[0x10:].reshape((-1, 28, 28))
32 Y_test = parse(fetch("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"))[8:]
33 return X_train, Y_train, X_test, Y_test
34
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tinygrad/utils.py b/tinygrad/utils.py
--- a/tinygrad/utils.py
+++ b/tinygrad/utils.py
@@ -1,4 +1,5 @@
import numpy as np
+import os
def mask_like(like, mask_inx, mask_value = 1.0):
mask = np.zeros_like(like).reshape(-1)
@@ -12,7 +13,7 @@
def fetch(url):
import requests, os, hashlib, tempfile
fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest())
- if os.path.isfile(fp):
+ if os.path.isfile(fp) and os.stat(fp).st_size > 0:
with open(fp, "rb") as f:
dat = f.read()
else:
| {"golden_diff": "diff --git a/tinygrad/utils.py b/tinygrad/utils.py\n--- a/tinygrad/utils.py\n+++ b/tinygrad/utils.py\n@@ -1,4 +1,5 @@\n import numpy as np\n+import os\n \n def mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n@@ -12,7 +13,7 @@\n def fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n- if os.path.isfile(fp):\n+ if os.path.isfile(fp) and os.stat(fp).st_size > 0:\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n", "issue": "EOFError: Ran out of input\nWhen running example and solving \"Can't import fetch from utils\" issue, this one comes up:\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\ndef mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n mask[mask_inx] = mask_value\n return mask.reshape(like.shape)\n\ndef layer_init_uniform(*x):\n ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))\n return ret.astype(np.float32)\n\ndef fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n if os.path.isfile(fp):\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n print(\"fetching %s\" % url)\n with open(fp+\".tmp\", \"wb\") as f:\n dat = requests.get(url).content\n f.write(dat)\n os.rename(fp+\".tmp\", fp)\n return dat\n\ndef fetch_mnist():\n import gzip\n parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()\n X_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\"))[8:]\n X_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\"))[8:]\n return X_train, Y_train, X_test, Y_test\n\n", "path": "tinygrad/utils.py"}], "after_files": [{"content": "import numpy as np\nimport os\n\ndef mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n mask[mask_inx] = mask_value\n return mask.reshape(like.shape)\n\ndef layer_init_uniform(*x):\n ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))\n return ret.astype(np.float32)\n\ndef fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n if os.path.isfile(fp) and os.stat(fp).st_size > 0:\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n print(\"fetching %s\" % url)\n with open(fp+\".tmp\", \"wb\") as f:\n dat = requests.get(url).content\n f.write(dat)\n os.rename(fp+\".tmp\", fp)\n return dat\n\ndef fetch_mnist():\n import gzip\n parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()\n X_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\"))[8:]\n X_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\"))[8:]\n return X_train, Y_train, X_test, Y_test\n\n", "path": "tinygrad/utils.py"}]} | 830 | 176 |
gh_patches_debug_20502 | rasdani/github-patches | git_diff | cloudtools__troposphere-1205 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add 'PermissionsBoundary' properties to AWS::IAM::Role and AWS::IAM::User
This property has been released on November 9 by AWS.
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/ReleaseHistory.html
```
PermissionsBoundary
The ARN of the policy that is used to set the permissions boundary for the role. Minimum length of 20. Maximum length of 2048.
Required: No
Type: String
Update requires: No interruption
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/iam.py`
Content:
```
1 # Copyright (c) 2012-2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty
7 from .validators import integer, boolean, status
8 from .validators import iam_path, iam_role_name, iam_group_name, iam_user_name
9
10 try:
11 from awacs.aws import Policy
12 policytypes = (dict, Policy)
13 except ImportError:
14 policytypes = dict,
15
16
17 Active = "Active"
18 Inactive = "Inactive"
19
20
21 class AccessKey(AWSObject):
22 resource_type = "AWS::IAM::AccessKey"
23
24 props = {
25 'Serial': (integer, False),
26 'Status': (status, False),
27 'UserName': (basestring, True),
28 }
29
30
31 class PolicyType(AWSObject):
32 resource_type = "AWS::IAM::Policy"
33
34 props = {
35 'Groups': ([basestring], False),
36 'PolicyDocument': (policytypes, True),
37 'PolicyName': (basestring, True),
38 'Roles': ([basestring], False),
39 'Users': ([basestring], False),
40 }
41
42
43 class Policy(AWSProperty):
44 props = {
45 'PolicyDocument': (policytypes, True),
46 'PolicyName': (basestring, True),
47 }
48
49
50 PolicyProperty = Policy
51
52
53 class Group(AWSObject):
54 resource_type = "AWS::IAM::Group"
55
56 props = {
57 'GroupName': (iam_group_name, False),
58 'ManagedPolicyArns': ([basestring], False),
59 'Path': (iam_path, False),
60 'Policies': ([Policy], False),
61 }
62
63
64 class InstanceProfile(AWSObject):
65 resource_type = "AWS::IAM::InstanceProfile"
66
67 props = {
68 'Path': (iam_path, False),
69 'Roles': (list, True),
70 'InstanceProfileName': (basestring, False),
71 }
72
73
74 class Role(AWSObject):
75 resource_type = "AWS::IAM::Role"
76
77 props = {
78 'AssumeRolePolicyDocument': (policytypes, True),
79 'ManagedPolicyArns': ([basestring], False),
80 'MaxSessionDuration': (integer, False),
81 'Path': (iam_path, False),
82 'Policies': ([Policy], False),
83 'RoleName': (iam_role_name, False),
84 }
85
86
87 class ServiceLinkedRole(AWSObject):
88 resource_type = "AWS::IAM::ServiceLinkedRole"
89
90 props = {
91 'AWSServiceName': (basestring, True),
92 'CustomSuffix': (basestring, False),
93 'Description': (basestring, False),
94 }
95
96
97 class LoginProfile(AWSProperty):
98 props = {
99 'Password': (basestring, True),
100 'PasswordResetRequired': (boolean, False),
101 }
102
103
104 class User(AWSObject):
105 resource_type = "AWS::IAM::User"
106
107 props = {
108 'Path': (iam_path, False),
109 'Groups': ([basestring], False),
110 'ManagedPolicyArns': ([basestring], False),
111 'LoginProfile': (LoginProfile, False),
112 'Policies': ([Policy], False),
113 'UserName': (iam_user_name, False),
114 }
115
116
117 class UserToGroupAddition(AWSObject):
118 resource_type = "AWS::IAM::UserToGroupAddition"
119
120 props = {
121 'GroupName': (basestring, True),
122 'Users': (list, True),
123 }
124
125
126 class ManagedPolicy(AWSObject):
127 resource_type = "AWS::IAM::ManagedPolicy"
128
129 props = {
130 'Description': (basestring, False),
131 'Groups': ([basestring], False),
132 'ManagedPolicyName': (basestring, False),
133 'Path': (iam_path, False),
134 'PolicyDocument': (policytypes, True),
135 'Roles': ([basestring], False),
136 'Users': ([basestring], False),
137 }
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/iam.py b/troposphere/iam.py
--- a/troposphere/iam.py
+++ b/troposphere/iam.py
@@ -79,6 +79,7 @@
'ManagedPolicyArns': ([basestring], False),
'MaxSessionDuration': (integer, False),
'Path': (iam_path, False),
+ 'PermissionsBoundary': (basestring, False),
'Policies': ([Policy], False),
'RoleName': (iam_role_name, False),
}
@@ -105,10 +106,11 @@
resource_type = "AWS::IAM::User"
props = {
- 'Path': (iam_path, False),
'Groups': ([basestring], False),
- 'ManagedPolicyArns': ([basestring], False),
'LoginProfile': (LoginProfile, False),
+ 'ManagedPolicyArns': ([basestring], False),
+ 'Path': (iam_path, False),
+ 'PermissionsBoundary': (basestring, False),
'Policies': ([Policy], False),
'UserName': (iam_user_name, False),
}
| {"golden_diff": "diff --git a/troposphere/iam.py b/troposphere/iam.py\n--- a/troposphere/iam.py\n+++ b/troposphere/iam.py\n@@ -79,6 +79,7 @@\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n+ 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n@@ -105,10 +106,11 @@\n resource_type = \"AWS::IAM::User\"\n \n props = {\n- 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n- 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n+ 'ManagedPolicyArns': ([basestring], False),\n+ 'Path': (iam_path, False),\n+ 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n", "issue": "Add 'PermissionsBoundary' properties to AWS::IAM::Role and AWS::IAM::User\nThis property has been released on November 9 by AWS.\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/ReleaseHistory.html\r\n```\r\nPermissionsBoundary\r\n\r\n The ARN of the policy that is used to set the permissions boundary for the role. Minimum length of 20. Maximum length of 2048.\r\n\r\n Required: No\r\n\r\n Type: String\r\n\r\n Update requires: No interruption\r\n\r\n```\n", "before_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n 'InstanceProfileName': (basestring, False),\n }\n\n\nclass Role(AWSObject):\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass ServiceLinkedRole(AWSObject):\n resource_type = \"AWS::IAM::ServiceLinkedRole\"\n\n props = {\n 'AWSServiceName': (basestring, True),\n 'CustomSuffix': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyName': (basestring, False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}], "after_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n 'InstanceProfileName': (basestring, False),\n }\n\n\nclass Role(AWSObject):\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass ServiceLinkedRole(AWSObject):\n resource_type = \"AWS::IAM::ServiceLinkedRole\"\n\n props = {\n 'AWSServiceName': (basestring, True),\n 'CustomSuffix': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Groups': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyName': (basestring, False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}]} | 1,541 | 256 |
gh_patches_debug_26528 | rasdani/github-patches | git_diff | ESMCI__cime-1048 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing f19_g16_r01 high resolution river flow grid
Between cime5.2.0-alpha.9 and cime5.2.0-alpha.20 the config_grids file format was changed, and one grid needed for CLM testing was removed. The change to add it back again is as follows...
```
[erik@yslogin4 scripts]$ svn diff ../cime_config/cesm/config_grids.xml
Index: ../cime_config/cesm/config_grids.xml
===================================================================
--- ../cime_config/cesm/config_grids.xml (revision 7095)
+++ ../cime_config/cesm/config_grids.xml (working copy)
@@ -294,6 +294,15 @@
<grid name="ocnice">gx1v6</grid>
</model_grid>
+ <model_grid alias="f19_g16_r01">
+ <grid name="atm">1.9x2.5</grid>
+ <grid name="lnd">1.9x2.5</grid>
+ <grid name="ocnice">gx1v6</grid>
+ <grid name="rof">r01</grid>
+ <mask>gx1v6</mask>
+ </model_grid>
+
+
<model_grid alias="f19_g16_gl4" compset="_CISM">
<grid name="atm">1.9x2.5</grid>
<grid name="lnd">1.9x2.5</grid>
```
@mvertens @jedwards4b @billsacks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/python/CIME/BuildTools/configure.py`
Content:
```
1 #!/usr/bin/env python
2
3 """This script writes CIME build information to a directory.
4
5 The pieces of information that will be written include:
6
7 1. Machine-specific build settings (i.e. the "Macros" file).
8 2. File-specific build settings (i.e. "Depends" files).
9 3. Environment variable loads (i.e. the env_mach_specific files).
10
11 The .env_mach_specific.sh and .env_mach_specific.csh files are specific to a
12 given compiler, MPI library, and DEBUG setting. By default, these will be the
13 machine's default compiler, the machine's default MPI library, and FALSE,
14 respectively. These can be changed by setting the environment variables
15 COMPILER, MPILIB, and DEBUG, respectively.
16 """
17
18 import shutil
19 from CIME.XML.standard_module_setup import *
20 from CIME.utils import expect
21 from CIME.XML.compilers import Compilers
22 from CIME.XML.env_mach_specific import EnvMachSpecific
23
24 logger = logging.getLogger(__name__)
25
26 def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos):
27 """Add Macros, Depends, and env_mach_specific files to a directory.
28
29 Arguments:
30 machobj - Machines argument for this machine.
31 output_dir - Directory in which to place output.
32 macros_format - Container containing the string 'Makefile' to produce
33 Makefile Macros output, and/or 'CMake' for CMake output.
34 compiler - String containing the compiler vendor to configure for.
35 mpilib - String containing the MPI implementation to configure for.
36 debug - Boolean specifying whether debugging options are enabled.
37 """
38 # Macros generation.
39 suffixes = {'Makefile': 'make', 'CMake': 'cmake'}
40 macro_maker = Compilers(machobj)
41 for form in macros_format:
42 out_file_name = os.path.join(output_dir,"Macros."+suffixes[form])
43 macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form])
44
45 _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler)
46 _generate_env_mach_specific(output_dir, machobj, compiler, mpilib,
47 debug, sysos)
48
49 def _copy_depends_files(machine_name, machines_dir, output_dir, compiler):
50 """
51 Copy any system or compiler Depends files if they do not exist in the output directory
52 """
53 for dep in (machine_name, compiler):
54 dfile = os.path.join(machines_dir, "Depends.%s"%dep)
55 outputdfile = os.path.join(output_dir, "Depends.%s"%dep)
56 if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
57 shutil.copyfile(dfile, outputdfile)
58 dfile = os.path.join(machines_dir, "Depends.%s.%s"%(machine_name,compiler))
59 outputdfile = os.path.join(output_dir, "Depends.%s.%s"%(machine_name,compiler))
60 if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
61 shutil.copyfile(dfile, outputdfile)
62
63 def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):
64 """
65 env_mach_specific generation.
66 """
67 ems_path = os.path.join(output_dir, "env_mach_specific.xml")
68 if os.path.exists(ems_path):
69 logger.warn("%s already exists, delete to replace"%ems_path)
70 return
71 ems_file = EnvMachSpecific(output_dir)
72 ems_file.populate(machobj)
73 ems_file.write()
74 for shell in ('sh', 'csh'):
75 ems_file.make_env_mach_specific_file(compiler, debug, mpilib, shell)
76 shell_path = os.path.join(output_dir, ".env_mach_specific." + shell)
77 with open(shell_path, 'a') as shell_file:
78 if shell == 'sh':
79 shell_file.write("\nexport COMPILER=%s\n" % compiler)
80 shell_file.write("export MPILIB=%s\n" % mpilib)
81 shell_file.write("export DEBUG=%s\n" % repr(debug).upper())
82 shell_file.write("export OS=%s\n" % sysos)
83 else:
84 shell_file.write("\nsetenv COMPILER %s\n" % compiler)
85 shell_file.write("setenv MPILIB %s\n" % mpilib)
86 shell_file.write("setenv DEBUG %s\n" % repr(debug).upper())
87 shell_file.write("setenv OS %s\n" % sysos)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/utils/python/CIME/BuildTools/configure.py b/utils/python/CIME/BuildTools/configure.py
--- a/utils/python/CIME/BuildTools/configure.py
+++ b/utils/python/CIME/BuildTools/configure.py
@@ -49,16 +49,20 @@
def _copy_depends_files(machine_name, machines_dir, output_dir, compiler):
"""
Copy any system or compiler Depends files if they do not exist in the output directory
+ If there is a match for Depends.machine_name.compiler copy that and ignore the others
"""
- for dep in (machine_name, compiler):
- dfile = os.path.join(machines_dir, "Depends.%s"%dep)
- outputdfile = os.path.join(output_dir, "Depends.%s"%dep)
- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
- shutil.copyfile(dfile, outputdfile)
dfile = os.path.join(machines_dir, "Depends.%s.%s"%(machine_name,compiler))
outputdfile = os.path.join(output_dir, "Depends.%s.%s"%(machine_name,compiler))
- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
- shutil.copyfile(dfile, outputdfile)
+ if os.path.isfile(dfile):
+ if not os.path.isfile(outputdfile):
+ shutil.copyfile(dfile, outputdfile)
+ else:
+ for dep in (machine_name, compiler):
+ dfile = os.path.join(machines_dir, "Depends.%s"%dep)
+ outputdfile = os.path.join(output_dir, "Depends.%s"%dep)
+ if os.path.isfile(dfile) and not os.path.isfile(outputdfile):
+ shutil.copyfile(dfile, outputdfile)
+
def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):
"""
| {"golden_diff": "diff --git a/utils/python/CIME/BuildTools/configure.py b/utils/python/CIME/BuildTools/configure.py\n--- a/utils/python/CIME/BuildTools/configure.py\n+++ b/utils/python/CIME/BuildTools/configure.py\n@@ -49,16 +49,20 @@\n def _copy_depends_files(machine_name, machines_dir, output_dir, compiler):\n \"\"\"\n Copy any system or compiler Depends files if they do not exist in the output directory\n+ If there is a match for Depends.machine_name.compiler copy that and ignore the others\n \"\"\"\n- for dep in (machine_name, compiler):\n- dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n- outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n- shutil.copyfile(dfile, outputdfile)\n dfile = os.path.join(machines_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n outputdfile = os.path.join(output_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n- if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n- shutil.copyfile(dfile, outputdfile)\n+ if os.path.isfile(dfile):\n+ if not os.path.isfile(outputdfile):\n+ shutil.copyfile(dfile, outputdfile)\n+ else:\n+ for dep in (machine_name, compiler):\n+ dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n+ outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n+ if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n+ shutil.copyfile(dfile, outputdfile)\n+\n \n def _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):\n \"\"\"\n", "issue": "Missing f19_g16_r01 high resolution river flow grid\nBetween cime5.2.0-alpha.9 and cime5.2.0-alpha.20 the config_grids file format was changed, and one grid needed for CLM testing was removed. The change to add it back again is as follows...\r\n\r\n```\r\n[erik@yslogin4 scripts]$ svn diff ../cime_config/cesm/config_grids.xml \r\nIndex: ../cime_config/cesm/config_grids.xml\r\n===================================================================\r\n--- ../cime_config/cesm/config_grids.xml\t(revision 7095)\r\n+++ ../cime_config/cesm/config_grids.xml\t(working copy)\r\n@@ -294,6 +294,15 @@\r\n <grid name=\"ocnice\">gx1v6</grid>\r\n </model_grid>\r\n \r\n+ <model_grid alias=\"f19_g16_r01\">\r\n+ <grid name=\"atm\">1.9x2.5</grid>\r\n+ <grid name=\"lnd\">1.9x2.5</grid>\r\n+ <grid name=\"ocnice\">gx1v6</grid>\r\n+ <grid name=\"rof\">r01</grid>\r\n+ <mask>gx1v6</mask>\r\n+ </model_grid>\r\n+\r\n+\r\n <model_grid alias=\"f19_g16_gl4\" compset=\"_CISM\">\r\n <grid name=\"atm\">1.9x2.5</grid>\r\n <grid name=\"lnd\">1.9x2.5</grid>\r\n\r\n```\r\n@mvertens @jedwards4b @billsacks\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"This script writes CIME build information to a directory.\n\nThe pieces of information that will be written include:\n\n1. Machine-specific build settings (i.e. the \"Macros\" file).\n2. File-specific build settings (i.e. \"Depends\" files).\n3. Environment variable loads (i.e. the env_mach_specific files).\n\nThe .env_mach_specific.sh and .env_mach_specific.csh files are specific to a\ngiven compiler, MPI library, and DEBUG setting. By default, these will be the\nmachine's default compiler, the machine's default MPI library, and FALSE,\nrespectively. These can be changed by setting the environment variables\nCOMPILER, MPILIB, and DEBUG, respectively.\n\"\"\"\n\nimport shutil\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect\nfrom CIME.XML.compilers import Compilers\nfrom CIME.XML.env_mach_specific import EnvMachSpecific\n\nlogger = logging.getLogger(__name__)\n\ndef configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos):\n \"\"\"Add Macros, Depends, and env_mach_specific files to a directory.\n\n Arguments:\n machobj - Machines argument for this machine.\n output_dir - Directory in which to place output.\n macros_format - Container containing the string 'Makefile' to produce\n Makefile Macros output, and/or 'CMake' for CMake output.\n compiler - String containing the compiler vendor to configure for.\n mpilib - String containing the MPI implementation to configure for.\n debug - Boolean specifying whether debugging options are enabled.\n \"\"\"\n # Macros generation.\n suffixes = {'Makefile': 'make', 'CMake': 'cmake'}\n macro_maker = Compilers(machobj)\n for form in macros_format:\n out_file_name = os.path.join(output_dir,\"Macros.\"+suffixes[form])\n macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form])\n\n _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler)\n _generate_env_mach_specific(output_dir, machobj, compiler, mpilib,\n debug, sysos)\n\ndef _copy_depends_files(machine_name, machines_dir, output_dir, compiler):\n \"\"\"\n Copy any system or compiler Depends files if they do not exist in the output directory\n \"\"\"\n for dep in (machine_name, compiler):\n dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n dfile = os.path.join(machines_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n outputdfile = os.path.join(output_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n\ndef _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):\n \"\"\"\n env_mach_specific generation.\n \"\"\"\n ems_path = os.path.join(output_dir, \"env_mach_specific.xml\")\n if os.path.exists(ems_path):\n logger.warn(\"%s already exists, delete to replace\"%ems_path)\n return\n ems_file = EnvMachSpecific(output_dir)\n ems_file.populate(machobj)\n ems_file.write()\n for shell in ('sh', 'csh'):\n ems_file.make_env_mach_specific_file(compiler, debug, mpilib, shell)\n shell_path = os.path.join(output_dir, \".env_mach_specific.\" + shell)\n with open(shell_path, 'a') as shell_file:\n if shell == 'sh':\n shell_file.write(\"\\nexport COMPILER=%s\\n\" % compiler)\n shell_file.write(\"export MPILIB=%s\\n\" % mpilib)\n shell_file.write(\"export DEBUG=%s\\n\" % repr(debug).upper())\n shell_file.write(\"export OS=%s\\n\" % sysos)\n else:\n shell_file.write(\"\\nsetenv COMPILER %s\\n\" % compiler)\n shell_file.write(\"setenv MPILIB %s\\n\" % mpilib)\n shell_file.write(\"setenv DEBUG %s\\n\" % repr(debug).upper())\n shell_file.write(\"setenv OS %s\\n\" % sysos)\n", "path": "utils/python/CIME/BuildTools/configure.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"This script writes CIME build information to a directory.\n\nThe pieces of information that will be written include:\n\n1. Machine-specific build settings (i.e. the \"Macros\" file).\n2. File-specific build settings (i.e. \"Depends\" files).\n3. Environment variable loads (i.e. the env_mach_specific files).\n\nThe .env_mach_specific.sh and .env_mach_specific.csh files are specific to a\ngiven compiler, MPI library, and DEBUG setting. By default, these will be the\nmachine's default compiler, the machine's default MPI library, and FALSE,\nrespectively. These can be changed by setting the environment variables\nCOMPILER, MPILIB, and DEBUG, respectively.\n\"\"\"\n\nimport shutil\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect\nfrom CIME.XML.compilers import Compilers\nfrom CIME.XML.env_mach_specific import EnvMachSpecific\n\nlogger = logging.getLogger(__name__)\n\ndef configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos):\n \"\"\"Add Macros, Depends, and env_mach_specific files to a directory.\n\n Arguments:\n machobj - Machines argument for this machine.\n output_dir - Directory in which to place output.\n macros_format - Container containing the string 'Makefile' to produce\n Makefile Macros output, and/or 'CMake' for CMake output.\n compiler - String containing the compiler vendor to configure for.\n mpilib - String containing the MPI implementation to configure for.\n debug - Boolean specifying whether debugging options are enabled.\n \"\"\"\n # Macros generation.\n suffixes = {'Makefile': 'make', 'CMake': 'cmake'}\n macro_maker = Compilers(machobj)\n for form in macros_format:\n out_file_name = os.path.join(output_dir,\"Macros.\"+suffixes[form])\n macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form])\n\n _copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler)\n _generate_env_mach_specific(output_dir, machobj, compiler, mpilib,\n debug, sysos)\n\ndef _copy_depends_files(machine_name, machines_dir, output_dir, compiler):\n \"\"\"\n Copy any system or compiler Depends files if they do not exist in the output directory\n If there is a match for Depends.machine_name.compiler copy that and ignore the others\n \"\"\"\n dfile = os.path.join(machines_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n outputdfile = os.path.join(output_dir, \"Depends.%s.%s\"%(machine_name,compiler))\n if os.path.isfile(dfile):\n if not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n else:\n for dep in (machine_name, compiler):\n dfile = os.path.join(machines_dir, \"Depends.%s\"%dep)\n outputdfile = os.path.join(output_dir, \"Depends.%s\"%dep)\n if os.path.isfile(dfile) and not os.path.isfile(outputdfile):\n shutil.copyfile(dfile, outputdfile)\n\n\ndef _generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, sysos):\n \"\"\"\n env_mach_specific generation.\n \"\"\"\n ems_path = os.path.join(output_dir, \"env_mach_specific.xml\")\n if os.path.exists(ems_path):\n logger.warn(\"%s already exists, delete to replace\"%ems_path)\n return\n ems_file = EnvMachSpecific(output_dir)\n ems_file.populate(machobj)\n ems_file.write()\n for shell in ('sh', 'csh'):\n ems_file.make_env_mach_specific_file(compiler, debug, mpilib, shell)\n shell_path = os.path.join(output_dir, \".env_mach_specific.\" + shell)\n with open(shell_path, 'a') as shell_file:\n if shell == 'sh':\n shell_file.write(\"\\nexport COMPILER=%s\\n\" % compiler)\n shell_file.write(\"export MPILIB=%s\\n\" % mpilib)\n shell_file.write(\"export DEBUG=%s\\n\" % repr(debug).upper())\n shell_file.write(\"export OS=%s\\n\" % sysos)\n else:\n shell_file.write(\"\\nsetenv COMPILER %s\\n\" % compiler)\n shell_file.write(\"setenv MPILIB %s\\n\" % mpilib)\n shell_file.write(\"setenv DEBUG %s\\n\" % repr(debug).upper())\n shell_file.write(\"setenv OS %s\\n\" % sysos)\n", "path": "utils/python/CIME/BuildTools/configure.py"}]} | 1,787 | 427 |
gh_patches_debug_19916 | rasdani/github-patches | git_diff | weecology__retriever-1121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a description field(s) to setup.py
This populates the description on PYPI:
https://packaging.python.org/tutorials/distributing-packages/#description
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Use the following command to install retriever: python setup.py install"""
2 from __future__ import absolute_import
3
4 import os
5 import platform
6
7 from pkg_resources import parse_version
8 from setuptools import setup, find_packages
9
10 current_platform = platform.system().lower()
11 extra_includes = []
12 if current_platform == "windows":
13 extra_includes += ["pypyodbc"]
14
15 if os.path.exists(".git/hooks"): # check if we are in git repo
16 os.system("cp hooks/pre-commit .git/hooks/pre-commit")
17 os.system("chmod +x .git/hooks/pre-commit")
18
19 app_data = "~/.retriever/scripts"
20 if os.path.exists(app_data):
21 os.system("rm -r {}".format(app_data))
22
23 __version__ = 'v2.1.dev'
24 with open(os.path.join("retriever", "_version.py"), "w") as version_file:
25 version_file.write("__version__ = " + "'" + __version__ + "'\n")
26 version_file.close()
27
28
29 def clean_version(v):
30 return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
31
32 includes = [
33 'xlrd',
34 'future',
35 'argcomplete',
36 'pymysql',
37 'psycopg2',
38 'sqlite3',
39 ] + extra_includes
40
41 excludes = [
42 'pyreadline',
43 'doctest',
44 'pickle',
45 'pdb',
46 'pywin', 'pywin.debugger',
47 'pywin.debugger.dbgcon',
48 'pywin.dialogs', 'pywin.dialogs.list',
49 'Tkconstants', 'Tkinter', 'tcl', 'tk'
50 ]
51
52 setup(name='retriever',
53 version=clean_version(__version__),
54 description='Data Retriever',
55 author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
56 author_email='[email protected]',
57 url='https://github.com/weecology/retriever',
58 classifiers=['Intended Audience :: Science/Research',
59 'License :: OSI Approved :: MIT License',
60 'Programming Language :: Python',
61 'Programming Language :: Python :: 2',
62 'Programming Language :: Python :: 3', ],
63 packages=find_packages(
64 exclude=['hooks',
65 'docs',
66 'tests',
67 'scripts',
68 'docker',
69 ".cache"]),
70 entry_points={
71 'console_scripts': [
72 'retriever = retriever.__main__:main',
73 ],
74 },
75 install_requires=[
76 'xlrd',
77 'future',
78 'argcomplete',
79 'tqdm'
80 ],
81 data_files=[('', ['CITATION'])],
82 setup_requires=[],
83 )
84
85 # windows doesn't have bash. No point in using bash-completion
86 if current_platform != "windows":
87 # if platform is OS X use "~/.bash_profile"
88 if current_platform == "darwin":
89 bash_file = "~/.bash_profile"
90 # if platform is Linux use "~/.bashrc
91 elif current_platform == "linux":
92 bash_file = "~/.bashrc"
93 # else write and discard
94 else:
95 bash_file = "/dev/null"
96
97 argcomplete_command = 'eval "$(register-python-argcomplete retriever)"'
98 with open(os.path.expanduser(bash_file), "a+") as bashrc:
99 bashrc.seek(0)
100 # register retriever for arg-completion if not already registered
101 # whenever a new shell is spawned
102 if argcomplete_command not in bashrc.read():
103 bashrc.write(argcomplete_command + "\n")
104 bashrc.close()
105 os.system("activate-global-python-argcomplete")
106 # register for the current shell
107 os.system(argcomplete_command)
108
109 try:
110 from retriever.compile import compile
111 from retriever.lib.repository import check_for_updates
112
113 check_for_updates(False)
114 compile()
115 except:
116 pass
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,7 @@
def clean_version(v):
return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
+
includes = [
'xlrd',
'future',
@@ -52,6 +53,10 @@
setup(name='retriever',
version=clean_version(__version__),
description='Data Retriever',
+ long_description=('The Data Retriever is a package manager for data. '
+ 'It downloads, cleans, and stores publicly available data, '
+ 'so that analysts spend less time cleaning and managing data, '
+ 'and more time analyzing it.'),
author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
author_email='[email protected]',
url='https://github.com/weecology/retriever',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,6 +29,7 @@\n def clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n \n+\n includes = [\n 'xlrd',\n 'future',\n@@ -52,6 +53,10 @@\n setup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n+ long_description=('The Data Retriever is a package manager for data. '\n+ 'It downloads, cleans, and stores publicly available data, '\n+ 'so that analysts spend less time cleaning and managing data, '\n+ 'and more time analyzing it.'),\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n", "issue": "Add a description field(s) to setup.py\nThis populates the description on PYPI:\r\n\r\nhttps://packaging.python.org/tutorials/distributing-packages/#description\n", "before_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport platform\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"windows\":\n extra_includes += [\"pypyodbc\"]\n\nif os.path.exists(\".git/hooks\"): # check if we are in git repo\n os.system(\"cp hooks/pre-commit .git/hooks/pre-commit\")\n os.system(\"chmod +x .git/hooks/pre-commit\")\n\napp_data = \"~/.retriever/scripts\"\nif os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n\n__version__ = 'v2.1.dev'\nwith open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\nincludes = [\n 'xlrd',\n 'future',\n 'argcomplete',\n 'pymysql',\n 'psycopg2',\n 'sqlite3',\n ] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'pickle',\n 'pdb',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl', 'tk'\n]\n\nsetup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3', ],\n packages=find_packages(\n exclude=['hooks',\n 'docs',\n 'tests',\n 'scripts',\n 'docker',\n \".cache\"]),\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future',\n 'argcomplete',\n 'tqdm'\n ],\n data_files=[('', ['CITATION'])],\n setup_requires=[],\n )\n\n# windows doesn't have bash. No point in using bash-completion\nif current_platform != \"windows\":\n # if platform is OS X use \"~/.bash_profile\"\n if current_platform == \"darwin\":\n bash_file = \"~/.bash_profile\"\n # if platform is Linux use \"~/.bashrc\n elif current_platform == \"linux\":\n bash_file = \"~/.bashrc\"\n # else write and discard\n else:\n bash_file = \"/dev/null\"\n\n argcomplete_command = 'eval \"$(register-python-argcomplete retriever)\"'\n with open(os.path.expanduser(bash_file), \"a+\") as bashrc:\n bashrc.seek(0)\n # register retriever for arg-completion if not already registered\n # whenever a new shell is spawned\n if argcomplete_command not in bashrc.read():\n bashrc.write(argcomplete_command + \"\\n\")\n bashrc.close()\n os.system(\"activate-global-python-argcomplete\")\n # register for the current shell\n os.system(argcomplete_command)\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n\n check_for_updates(False)\n compile()\nexcept:\n pass\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport platform\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"windows\":\n extra_includes += [\"pypyodbc\"]\n\nif os.path.exists(\".git/hooks\"): # check if we are in git repo\n os.system(\"cp hooks/pre-commit .git/hooks/pre-commit\")\n os.system(\"chmod +x .git/hooks/pre-commit\")\n\napp_data = \"~/.retriever/scripts\"\nif os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n\n__version__ = 'v2.1.dev'\nwith open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\n\nincludes = [\n 'xlrd',\n 'future',\n 'argcomplete',\n 'pymysql',\n 'psycopg2',\n 'sqlite3',\n ] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'pickle',\n 'pdb',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl', 'tk'\n]\n\nsetup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n long_description=('The Data Retriever is a package manager for data. '\n 'It downloads, cleans, and stores publicly available data, '\n 'so that analysts spend less time cleaning and managing data, '\n 'and more time analyzing it.'),\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3', ],\n packages=find_packages(\n exclude=['hooks',\n 'docs',\n 'tests',\n 'scripts',\n 'docker',\n \".cache\"]),\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future',\n 'argcomplete',\n 'tqdm'\n ],\n data_files=[('', ['CITATION'])],\n setup_requires=[],\n )\n\n# windows doesn't have bash. No point in using bash-completion\nif current_platform != \"windows\":\n # if platform is OS X use \"~/.bash_profile\"\n if current_platform == \"darwin\":\n bash_file = \"~/.bash_profile\"\n # if platform is Linux use \"~/.bashrc\n elif current_platform == \"linux\":\n bash_file = \"~/.bashrc\"\n # else write and discard\n else:\n bash_file = \"/dev/null\"\n\n argcomplete_command = 'eval \"$(register-python-argcomplete retriever)\"'\n with open(os.path.expanduser(bash_file), \"a+\") as bashrc:\n bashrc.seek(0)\n # register retriever for arg-completion if not already registered\n # whenever a new shell is spawned\n if argcomplete_command not in bashrc.read():\n bashrc.write(argcomplete_command + \"\\n\")\n bashrc.close()\n os.system(\"activate-global-python-argcomplete\")\n # register for the current shell\n os.system(argcomplete_command)\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n\n check_for_updates(False)\n compile()\nexcept:\n pass\n", "path": "setup.py"}]} | 1,372 | 217 |
gh_patches_debug_37237 | rasdani/github-patches | git_diff | Parsl__parsl-1075 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Condor provider has a hard-coded `cmd_timeout`.
This timeout should be configurable and probably default to something much longer.
https://github.com/Parsl/parsl/blob/1d8c9e35752274af6ae2ce2f07107474ece4561c/parsl/providers/condor/condor.py#L225
cc @ZhuozhaoLi who noted this in a comment to #889
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/providers/condor/condor.py`
Content:
```
1 import logging
2 import os
3 import re
4 import time
5
6 from parsl.channels import LocalChannel
7 from parsl.utils import RepresentationMixin
8 from parsl.launchers import SingleNodeLauncher
9 from parsl.providers.condor.template import template_string
10 from parsl.providers.cluster_provider import ClusterProvider
11
12 logger = logging.getLogger(__name__)
13
14 # See http://pages.cs.wisc.edu/~adesmet/status.html
15 translate_table = {
16 '1': 'PENDING',
17 '2': 'RUNNING',
18 '3': 'CANCELLED',
19 '4': 'COMPLETED',
20 '5': 'FAILED',
21 '6': 'FAILED',
22 }
23
24
25 class CondorProvider(RepresentationMixin, ClusterProvider):
26 """HTCondor Execution Provider.
27
28 Parameters
29 ----------
30 channel : Channel
31 Channel for accessing this provider. Possible channels include
32 :class:`~parsl.channels.LocalChannel` (the default),
33 :class:`~parsl.channels.SSHChannel`, or
34 :class:`~parsl.channels.SSHInteractiveLoginChannel`.
35 nodes_per_block : int
36 Nodes to provision per block.
37 init_blocks : int
38 Number of blocks to provision at time of initialization
39 min_blocks : int
40 Minimum number of blocks to maintain
41 max_blocks : int
42 Maximum number of blocks to maintain.
43 parallelism : float
44 Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
45 scaling where as many resources as possible are used; parallelism close to 0 represents
46 the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
47 environment : dict of str
48 A dictionary of environmant variable name and value pairs which will be set before
49 running a task.
50 project : str
51 Project which the job will be charged against
52 scheduler_options : str
53 String to add specific condor attributes to the HTCondor submit script.
54 transfer_input_files : list(str)
55 List of strings of paths to additional files or directories to transfer to the job
56 worker_init : str
57 Command to be run before starting a worker.
58 requirements : str
59 Condor requirements.
60 launcher : Launcher
61 Launcher for this provider. Possible launchers include
62 :class:`~parsl.launchers.SingleNodeLauncher` (the default),
63 """
64 def __init__(self,
65 channel=LocalChannel(),
66 nodes_per_block=1,
67 init_blocks=1,
68 min_blocks=0,
69 max_blocks=10,
70 parallelism=1,
71 environment=None,
72 project='',
73 scheduler_options='',
74 transfer_input_files=[],
75 walltime="00:10:00",
76 worker_init='',
77 launcher=SingleNodeLauncher(),
78 requirements=''):
79
80 label = 'condor'
81 super().__init__(label,
82 channel,
83 nodes_per_block,
84 init_blocks,
85 min_blocks,
86 max_blocks,
87 parallelism,
88 walltime,
89 launcher)
90
91 self.provisioned_blocks = 0
92
93 self.environment = environment if environment is not None else {}
94 for key, value in self.environment.items():
95 # To escape literal quote marks, double them
96 # See: http://research.cs.wisc.edu/htcondor/manual/v8.6/condor_submit.html
97 try:
98 self.environment[key] = "'{}'".format(value.replace("'", '"').replace('"', '""'))
99 except AttributeError:
100 pass
101
102 self.project = project
103 self.scheduler_options = scheduler_options
104 self.worker_init = worker_init
105 self.requirements = requirements
106 self.transfer_input_files = transfer_input_files
107
108 def _status(self):
109 """Update the resource dictionary with job statuses."""
110
111 job_id_list = ' '.join(self.resources.keys())
112 cmd = "condor_q {0} -af:jr JobStatus".format(job_id_list)
113 retcode, stdout, stderr = super().execute_wait(cmd)
114 """
115 Example output:
116
117 $ condor_q 34524642.0 34524643.0 -af:jr JobStatus
118 34524642.0 2
119 34524643.0 1
120 """
121
122 for line in stdout.strip().split('\n'):
123 parts = line.split()
124 job_id = parts[0]
125 status = translate_table.get(parts[1], 'UNKNOWN')
126 self.resources[job_id]['status'] = status
127
128 def status(self, job_ids):
129 """Get the status of a list of jobs identified by their ids.
130
131 Parameters
132 ----------
133 job_ids : list of int
134 Identifiers of jobs for which the status will be returned.
135
136 Returns
137 -------
138 List of int
139 Status codes for the requested jobs.
140
141 """
142 self._status()
143 return [self.resources[jid]['status'] for jid in job_ids]
144
145 def submit(self, command, blocksize, tasks_per_node, job_name="parsl.auto"):
146 """Submits the command onto an Local Resource Manager job of blocksize parallel elements.
147
148 example file with the complex case of multiple submits per job:
149 Universe =vanilla
150 output = out.$(Cluster).$(Process)
151 error = err.$(Cluster).$(Process)
152 log = log.$(Cluster)
153 leave_in_queue = true
154 executable = test.sh
155 queue 5
156 executable = foo
157 queue 1
158
159 $ condor_submit test.sub
160 Submitting job(s)......
161 5 job(s) submitted to cluster 118907.
162 1 job(s) submitted to cluster 118908.
163
164 Parameters
165 ----------
166 command : str
167 Command to execute
168 blocksize : int
169 Number of blocks to request.
170 job_name : str
171 Job name prefix.
172 tasks_per_node : int
173 command invocations to be launched per node
174 Returns
175 -------
176 None or str
177 None if at capacity and cannot provision more; otherwise the identifier for the job.
178 """
179
180 logger.debug("Attempting to launch with blocksize: {}".format(blocksize))
181 if self.provisioned_blocks >= self.max_blocks:
182 template = "Provider {} is currently using {} blocks while max_blocks is {}; no blocks will be added"
183 logger.warn(template.format(self.label, self.provisioned_blocks, self.max_blocks))
184 return None
185
186 # Note: Fix this later to avoid confusing behavior.
187 # We should always allocate blocks in integer counts of node_granularity
188 blocksize = max(self.nodes_per_block, blocksize)
189
190 job_name = "parsl.{0}.{1}".format(job_name, time.time())
191
192 script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
193 script_path = os.path.abspath(script_path)
194 userscript_path = "{0}/{1}.script".format(self.script_dir, job_name)
195 userscript_path = os.path.abspath(userscript_path)
196
197 self.environment["JOBNAME"] = "'{}'".format(job_name)
198
199 job_config = {}
200 job_config["job_name"] = job_name
201 job_config["submit_script_dir"] = self.channel.script_dir
202 job_config["project"] = self.project
203 job_config["nodes"] = self.nodes_per_block
204 job_config["scheduler_options"] = self.scheduler_options
205 job_config["worker_init"] = self.worker_init
206 job_config["user_script"] = command
207 job_config["tasks_per_node"] = tasks_per_node
208 job_config["requirements"] = self.requirements
209 job_config["environment"] = ' '.join(['{}={}'.format(key, value) for key, value in self.environment.items()])
210
211 # Move the user script
212 # This is where the command should be wrapped by the launchers.
213 wrapped_command = self.launcher(command,
214 tasks_per_node,
215 self.nodes_per_block)
216
217 with open(userscript_path, 'w') as f:
218 f.write(job_config["worker_init"] + '\n' + wrapped_command)
219
220 user_script_path = self.channel.push_file(userscript_path, self.channel.script_dir)
221 the_input_files = [user_script_path] + self.transfer_input_files
222 job_config["input_files"] = ','.join(the_input_files)
223 job_config["job_script"] = os.path.basename(user_script_path)
224
225 # Construct and move the submit script
226 self._write_submit_script(template_string, script_path, job_name, job_config)
227 channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
228
229 cmd = "condor_submit {0}".format(channel_script_path)
230 retcode, stdout, stderr = super().execute_wait(cmd, 30)
231 logger.debug("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
232
233 job_id = []
234
235 if retcode == 0:
236 for line in stdout.split('\n'):
237 if re.match('^[0-9]', line) is not None:
238 cluster = line.split(" ")[5]
239 # We know the first job id ("process" in condor terms) within a
240 # cluster is 0 and we know the total number of jobs from
241 # condor_submit, so we use some list comprehensions to expand
242 # the condor_submit output into job IDs
243 # e.g., ['118907.0', '118907.1', '118907.2', '118907.3', '118907.4', '118908.0']
244 processes = [str(x) for x in range(0, int(line[0]))]
245 job_id += [cluster + process for process in processes]
246
247 self._add_resource(job_id)
248 return job_id[0]
249
250 def cancel(self, job_ids):
251 """Cancels the jobs specified by a list of job IDs.
252
253 Parameters
254 ----------
255 job_ids : list of str
256 The job IDs to cancel.
257
258 Returns
259 -------
260 list of bool
261 Each entry in the list will be True if the job is cancelled succesfully, otherwise False.
262 """
263
264 job_id_list = ' '.join(job_ids)
265 cmd = "condor_rm {0}; condor_rm -forcex {0}".format(job_id_list)
266 logger.debug("Attempting removal of jobs : {0}".format(cmd))
267 retcode, stdout, stderr = self.channel.execute_wait(cmd, 30)
268 rets = None
269 if retcode == 0:
270 for jid in job_ids:
271 self.resources[jid]['status'] = 'CANCELLED'
272 rets = [True for i in job_ids]
273 else:
274 rets = [False for i in job_ids]
275
276 return rets
277
278 @property
279 def scaling_enabled(self):
280 return True
281
282 @property
283 def current_capacity(self):
284 return self
285
286 def _add_resource(self, job_id):
287 for jid in job_id:
288 self.resources[jid] = {'status': 'PENDING', 'size': 1}
289 return True
290
291
292 if __name__ == "__main__":
293
294 print("None")
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/providers/condor/condor.py b/parsl/providers/condor/condor.py
--- a/parsl/providers/condor/condor.py
+++ b/parsl/providers/condor/condor.py
@@ -60,6 +60,8 @@
launcher : Launcher
Launcher for this provider. Possible launchers include
:class:`~parsl.launchers.SingleNodeLauncher` (the default),
+ cmd_timeout : int
+ Timeout for commands made to the scheduler in seconds
"""
def __init__(self,
channel=LocalChannel(),
@@ -75,7 +77,8 @@
walltime="00:10:00",
worker_init='',
launcher=SingleNodeLauncher(),
- requirements=''):
+ requirements='',
+ cmd_timeout=60):
label = 'condor'
super().__init__(label,
@@ -86,8 +89,8 @@
max_blocks,
parallelism,
walltime,
- launcher)
-
+ launcher,
+ cmd_timeout=cmd_timeout)
self.provisioned_blocks = 0
self.environment = environment if environment is not None else {}
@@ -227,7 +230,7 @@
channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
cmd = "condor_submit {0}".format(channel_script_path)
- retcode, stdout, stderr = super().execute_wait(cmd, 30)
+ retcode, stdout, stderr = super().execute_wait(cmd)
logger.debug("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
job_id = []
@@ -264,7 +267,7 @@
job_id_list = ' '.join(job_ids)
cmd = "condor_rm {0}; condor_rm -forcex {0}".format(job_id_list)
logger.debug("Attempting removal of jobs : {0}".format(cmd))
- retcode, stdout, stderr = self.channel.execute_wait(cmd, 30)
+ retcode, stdout, stderr = super().execute_wait(cmd)
rets = None
if retcode == 0:
for jid in job_ids:
| {"golden_diff": "diff --git a/parsl/providers/condor/condor.py b/parsl/providers/condor/condor.py\n--- a/parsl/providers/condor/condor.py\n+++ b/parsl/providers/condor/condor.py\n@@ -60,6 +60,8 @@\n launcher : Launcher\n Launcher for this provider. Possible launchers include\n :class:`~parsl.launchers.SingleNodeLauncher` (the default),\n+ cmd_timeout : int\n+ Timeout for commands made to the scheduler in seconds\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n@@ -75,7 +77,8 @@\n walltime=\"00:10:00\",\n worker_init='',\n launcher=SingleNodeLauncher(),\n- requirements=''):\n+ requirements='',\n+ cmd_timeout=60):\n \n label = 'condor'\n super().__init__(label,\n@@ -86,8 +89,8 @@\n max_blocks,\n parallelism,\n walltime,\n- launcher)\n-\n+ launcher,\n+ cmd_timeout=cmd_timeout)\n self.provisioned_blocks = 0\n \n self.environment = environment if environment is not None else {}\n@@ -227,7 +230,7 @@\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n \n cmd = \"condor_submit {0}\".format(channel_script_path)\n- retcode, stdout, stderr = super().execute_wait(cmd, 30)\n+ retcode, stdout, stderr = super().execute_wait(cmd)\n logger.debug(\"Retcode:%s STDOUT:%s STDERR:%s\", retcode, stdout.strip(), stderr.strip())\n \n job_id = []\n@@ -264,7 +267,7 @@\n job_id_list = ' '.join(job_ids)\n cmd = \"condor_rm {0}; condor_rm -forcex {0}\".format(job_id_list)\n logger.debug(\"Attempting removal of jobs : {0}\".format(cmd))\n- retcode, stdout, stderr = self.channel.execute_wait(cmd, 30)\n+ retcode, stdout, stderr = super().execute_wait(cmd)\n rets = None\n if retcode == 0:\n for jid in job_ids:\n", "issue": "Condor provider has a hard-coded `cmd_timeout`. \nThis timeout should be configurable and probably default to something much longer.\r\n\r\nhttps://github.com/Parsl/parsl/blob/1d8c9e35752274af6ae2ce2f07107474ece4561c/parsl/providers/condor/condor.py#L225\r\n\r\ncc @ZhuozhaoLi who noted this in a comment to #889 \n", "before_files": [{"content": "import logging\nimport os\nimport re\nimport time\n\nfrom parsl.channels import LocalChannel\nfrom parsl.utils import RepresentationMixin\nfrom parsl.launchers import SingleNodeLauncher\nfrom parsl.providers.condor.template import template_string\nfrom parsl.providers.cluster_provider import ClusterProvider\n\nlogger = logging.getLogger(__name__)\n\n# See http://pages.cs.wisc.edu/~adesmet/status.html\ntranslate_table = {\n '1': 'PENDING',\n '2': 'RUNNING',\n '3': 'CANCELLED',\n '4': 'COMPLETED',\n '5': 'FAILED',\n '6': 'FAILED',\n}\n\n\nclass CondorProvider(RepresentationMixin, ClusterProvider):\n \"\"\"HTCondor Execution Provider.\n\n Parameters\n ----------\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n nodes_per_block : int\n Nodes to provision per block.\n init_blocks : int\n Number of blocks to provision at time of initialization\n min_blocks : int\n Minimum number of blocks to maintain\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n environment : dict of str\n A dictionary of environmant variable name and value pairs which will be set before\n running a task.\n project : str\n Project which the job will be charged against\n scheduler_options : str\n String to add specific condor attributes to the HTCondor submit script.\n transfer_input_files : list(str)\n List of strings of paths to additional files or directories to transfer to the job\n worker_init : str\n Command to be run before starting a worker.\n requirements : str\n Condor requirements.\n launcher : Launcher\n Launcher for this provider. Possible launchers include\n :class:`~parsl.launchers.SingleNodeLauncher` (the default),\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n nodes_per_block=1,\n init_blocks=1,\n min_blocks=0,\n max_blocks=10,\n parallelism=1,\n environment=None,\n project='',\n scheduler_options='',\n transfer_input_files=[],\n walltime=\"00:10:00\",\n worker_init='',\n launcher=SingleNodeLauncher(),\n requirements=''):\n\n label = 'condor'\n super().__init__(label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n launcher)\n\n self.provisioned_blocks = 0\n\n self.environment = environment if environment is not None else {}\n for key, value in self.environment.items():\n # To escape literal quote marks, double them\n # See: http://research.cs.wisc.edu/htcondor/manual/v8.6/condor_submit.html\n try:\n self.environment[key] = \"'{}'\".format(value.replace(\"'\", '\"').replace('\"', '\"\"'))\n except AttributeError:\n pass\n\n self.project = project\n self.scheduler_options = scheduler_options\n self.worker_init = worker_init\n self.requirements = requirements\n self.transfer_input_files = transfer_input_files\n\n def _status(self):\n \"\"\"Update the resource dictionary with job statuses.\"\"\"\n\n job_id_list = ' '.join(self.resources.keys())\n cmd = \"condor_q {0} -af:jr JobStatus\".format(job_id_list)\n retcode, stdout, stderr = super().execute_wait(cmd)\n \"\"\"\n Example output:\n\n $ condor_q 34524642.0 34524643.0 -af:jr JobStatus\n 34524642.0 2\n 34524643.0 1\n \"\"\"\n\n for line in stdout.strip().split('\\n'):\n parts = line.split()\n job_id = parts[0]\n status = translate_table.get(parts[1], 'UNKNOWN')\n self.resources[job_id]['status'] = status\n\n def status(self, job_ids):\n \"\"\"Get the status of a list of jobs identified by their ids.\n\n Parameters\n ----------\n job_ids : list of int\n Identifiers of jobs for which the status will be returned.\n\n Returns\n -------\n List of int\n Status codes for the requested jobs.\n\n \"\"\"\n self._status()\n return [self.resources[jid]['status'] for jid in job_ids]\n\n def submit(self, command, blocksize, tasks_per_node, job_name=\"parsl.auto\"):\n \"\"\"Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n\n example file with the complex case of multiple submits per job:\n Universe =vanilla\n output = out.$(Cluster).$(Process)\n error = err.$(Cluster).$(Process)\n log = log.$(Cluster)\n leave_in_queue = true\n executable = test.sh\n queue 5\n executable = foo\n queue 1\n\n $ condor_submit test.sub\n Submitting job(s)......\n 5 job(s) submitted to cluster 118907.\n 1 job(s) submitted to cluster 118908.\n\n Parameters\n ----------\n command : str\n Command to execute\n blocksize : int\n Number of blocks to request.\n job_name : str\n Job name prefix.\n tasks_per_node : int\n command invocations to be launched per node\n Returns\n -------\n None or str\n None if at capacity and cannot provision more; otherwise the identifier for the job.\n \"\"\"\n\n logger.debug(\"Attempting to launch with blocksize: {}\".format(blocksize))\n if self.provisioned_blocks >= self.max_blocks:\n template = \"Provider {} is currently using {} blocks while max_blocks is {}; no blocks will be added\"\n logger.warn(template.format(self.label, self.provisioned_blocks, self.max_blocks))\n return None\n\n # Note: Fix this later to avoid confusing behavior.\n # We should always allocate blocks in integer counts of node_granularity\n blocksize = max(self.nodes_per_block, blocksize)\n\n job_name = \"parsl.{0}.{1}\".format(job_name, time.time())\n\n script_path = \"{0}/{1}.submit\".format(self.script_dir, job_name)\n script_path = os.path.abspath(script_path)\n userscript_path = \"{0}/{1}.script\".format(self.script_dir, job_name)\n userscript_path = os.path.abspath(userscript_path)\n\n self.environment[\"JOBNAME\"] = \"'{}'\".format(job_name)\n\n job_config = {}\n job_config[\"job_name\"] = job_name\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"project\"] = self.project\n job_config[\"nodes\"] = self.nodes_per_block\n job_config[\"scheduler_options\"] = self.scheduler_options\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n job_config[\"tasks_per_node\"] = tasks_per_node\n job_config[\"requirements\"] = self.requirements\n job_config[\"environment\"] = ' '.join(['{}={}'.format(key, value) for key, value in self.environment.items()])\n\n # Move the user script\n # This is where the command should be wrapped by the launchers.\n wrapped_command = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n with open(userscript_path, 'w') as f:\n f.write(job_config[\"worker_init\"] + '\\n' + wrapped_command)\n\n user_script_path = self.channel.push_file(userscript_path, self.channel.script_dir)\n the_input_files = [user_script_path] + self.transfer_input_files\n job_config[\"input_files\"] = ','.join(the_input_files)\n job_config[\"job_script\"] = os.path.basename(user_script_path)\n\n # Construct and move the submit script\n self._write_submit_script(template_string, script_path, job_name, job_config)\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n\n cmd = \"condor_submit {0}\".format(channel_script_path)\n retcode, stdout, stderr = super().execute_wait(cmd, 30)\n logger.debug(\"Retcode:%s STDOUT:%s STDERR:%s\", retcode, stdout.strip(), stderr.strip())\n\n job_id = []\n\n if retcode == 0:\n for line in stdout.split('\\n'):\n if re.match('^[0-9]', line) is not None:\n cluster = line.split(\" \")[5]\n # We know the first job id (\"process\" in condor terms) within a\n # cluster is 0 and we know the total number of jobs from\n # condor_submit, so we use some list comprehensions to expand\n # the condor_submit output into job IDs\n # e.g., ['118907.0', '118907.1', '118907.2', '118907.3', '118907.4', '118908.0']\n processes = [str(x) for x in range(0, int(line[0]))]\n job_id += [cluster + process for process in processes]\n\n self._add_resource(job_id)\n return job_id[0]\n\n def cancel(self, job_ids):\n \"\"\"Cancels the jobs specified by a list of job IDs.\n\n Parameters\n ----------\n job_ids : list of str\n The job IDs to cancel.\n\n Returns\n -------\n list of bool\n Each entry in the list will be True if the job is cancelled succesfully, otherwise False.\n \"\"\"\n\n job_id_list = ' '.join(job_ids)\n cmd = \"condor_rm {0}; condor_rm -forcex {0}\".format(job_id_list)\n logger.debug(\"Attempting removal of jobs : {0}\".format(cmd))\n retcode, stdout, stderr = self.channel.execute_wait(cmd, 30)\n rets = None\n if retcode == 0:\n for jid in job_ids:\n self.resources[jid]['status'] = 'CANCELLED'\n rets = [True for i in job_ids]\n else:\n rets = [False for i in job_ids]\n\n return rets\n\n @property\n def scaling_enabled(self):\n return True\n\n @property\n def current_capacity(self):\n return self\n\n def _add_resource(self, job_id):\n for jid in job_id:\n self.resources[jid] = {'status': 'PENDING', 'size': 1}\n return True\n\n\nif __name__ == \"__main__\":\n\n print(\"None\")\n", "path": "parsl/providers/condor/condor.py"}], "after_files": [{"content": "import logging\nimport os\nimport re\nimport time\n\nfrom parsl.channels import LocalChannel\nfrom parsl.utils import RepresentationMixin\nfrom parsl.launchers import SingleNodeLauncher\nfrom parsl.providers.condor.template import template_string\nfrom parsl.providers.cluster_provider import ClusterProvider\n\nlogger = logging.getLogger(__name__)\n\n# See http://pages.cs.wisc.edu/~adesmet/status.html\ntranslate_table = {\n '1': 'PENDING',\n '2': 'RUNNING',\n '3': 'CANCELLED',\n '4': 'COMPLETED',\n '5': 'FAILED',\n '6': 'FAILED',\n}\n\n\nclass CondorProvider(RepresentationMixin, ClusterProvider):\n \"\"\"HTCondor Execution Provider.\n\n Parameters\n ----------\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n nodes_per_block : int\n Nodes to provision per block.\n init_blocks : int\n Number of blocks to provision at time of initialization\n min_blocks : int\n Minimum number of blocks to maintain\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n environment : dict of str\n A dictionary of environmant variable name and value pairs which will be set before\n running a task.\n project : str\n Project which the job will be charged against\n scheduler_options : str\n String to add specific condor attributes to the HTCondor submit script.\n transfer_input_files : list(str)\n List of strings of paths to additional files or directories to transfer to the job\n worker_init : str\n Command to be run before starting a worker.\n requirements : str\n Condor requirements.\n launcher : Launcher\n Launcher for this provider. Possible launchers include\n :class:`~parsl.launchers.SingleNodeLauncher` (the default),\n cmd_timeout : int\n Timeout for commands made to the scheduler in seconds\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n nodes_per_block=1,\n init_blocks=1,\n min_blocks=0,\n max_blocks=10,\n parallelism=1,\n environment=None,\n project='',\n scheduler_options='',\n transfer_input_files=[],\n walltime=\"00:10:00\",\n worker_init='',\n launcher=SingleNodeLauncher(),\n requirements='',\n cmd_timeout=60):\n\n label = 'condor'\n super().__init__(label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n launcher,\n cmd_timeout=cmd_timeout)\n self.provisioned_blocks = 0\n\n self.environment = environment if environment is not None else {}\n for key, value in self.environment.items():\n # To escape literal quote marks, double them\n # See: http://research.cs.wisc.edu/htcondor/manual/v8.6/condor_submit.html\n try:\n self.environment[key] = \"'{}'\".format(value.replace(\"'\", '\"').replace('\"', '\"\"'))\n except AttributeError:\n pass\n\n self.project = project\n self.scheduler_options = scheduler_options\n self.worker_init = worker_init\n self.requirements = requirements\n self.transfer_input_files = transfer_input_files\n\n def _status(self):\n \"\"\"Update the resource dictionary with job statuses.\"\"\"\n\n job_id_list = ' '.join(self.resources.keys())\n cmd = \"condor_q {0} -af:jr JobStatus\".format(job_id_list)\n retcode, stdout, stderr = super().execute_wait(cmd)\n \"\"\"\n Example output:\n\n $ condor_q 34524642.0 34524643.0 -af:jr JobStatus\n 34524642.0 2\n 34524643.0 1\n \"\"\"\n\n for line in stdout.strip().split('\\n'):\n parts = line.split()\n job_id = parts[0]\n status = translate_table.get(parts[1], 'UNKNOWN')\n self.resources[job_id]['status'] = status\n\n def status(self, job_ids):\n \"\"\"Get the status of a list of jobs identified by their ids.\n\n Parameters\n ----------\n job_ids : list of int\n Identifiers of jobs for which the status will be returned.\n\n Returns\n -------\n List of int\n Status codes for the requested jobs.\n\n \"\"\"\n self._status()\n return [self.resources[jid]['status'] for jid in job_ids]\n\n def submit(self, command, blocksize, tasks_per_node, job_name=\"parsl.auto\"):\n \"\"\"Submits the command onto an Local Resource Manager job of blocksize parallel elements.\n\n example file with the complex case of multiple submits per job:\n Universe =vanilla\n output = out.$(Cluster).$(Process)\n error = err.$(Cluster).$(Process)\n log = log.$(Cluster)\n leave_in_queue = true\n executable = test.sh\n queue 5\n executable = foo\n queue 1\n\n $ condor_submit test.sub\n Submitting job(s)......\n 5 job(s) submitted to cluster 118907.\n 1 job(s) submitted to cluster 118908.\n\n Parameters\n ----------\n command : str\n Command to execute\n blocksize : int\n Number of blocks to request.\n job_name : str\n Job name prefix.\n tasks_per_node : int\n command invocations to be launched per node\n Returns\n -------\n None or str\n None if at capacity and cannot provision more; otherwise the identifier for the job.\n \"\"\"\n\n logger.debug(\"Attempting to launch with blocksize: {}\".format(blocksize))\n if self.provisioned_blocks >= self.max_blocks:\n template = \"Provider {} is currently using {} blocks while max_blocks is {}; no blocks will be added\"\n logger.warn(template.format(self.label, self.provisioned_blocks, self.max_blocks))\n return None\n\n # Note: Fix this later to avoid confusing behavior.\n # We should always allocate blocks in integer counts of node_granularity\n blocksize = max(self.nodes_per_block, blocksize)\n\n job_name = \"parsl.{0}.{1}\".format(job_name, time.time())\n\n script_path = \"{0}/{1}.submit\".format(self.script_dir, job_name)\n script_path = os.path.abspath(script_path)\n userscript_path = \"{0}/{1}.script\".format(self.script_dir, job_name)\n userscript_path = os.path.abspath(userscript_path)\n\n self.environment[\"JOBNAME\"] = \"'{}'\".format(job_name)\n\n job_config = {}\n job_config[\"job_name\"] = job_name\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"project\"] = self.project\n job_config[\"nodes\"] = self.nodes_per_block\n job_config[\"scheduler_options\"] = self.scheduler_options\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n job_config[\"tasks_per_node\"] = tasks_per_node\n job_config[\"requirements\"] = self.requirements\n job_config[\"environment\"] = ' '.join(['{}={}'.format(key, value) for key, value in self.environment.items()])\n\n # Move the user script\n # This is where the command should be wrapped by the launchers.\n wrapped_command = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n with open(userscript_path, 'w') as f:\n f.write(job_config[\"worker_init\"] + '\\n' + wrapped_command)\n\n user_script_path = self.channel.push_file(userscript_path, self.channel.script_dir)\n the_input_files = [user_script_path] + self.transfer_input_files\n job_config[\"input_files\"] = ','.join(the_input_files)\n job_config[\"job_script\"] = os.path.basename(user_script_path)\n\n # Construct and move the submit script\n self._write_submit_script(template_string, script_path, job_name, job_config)\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n\n cmd = \"condor_submit {0}\".format(channel_script_path)\n retcode, stdout, stderr = super().execute_wait(cmd)\n logger.debug(\"Retcode:%s STDOUT:%s STDERR:%s\", retcode, stdout.strip(), stderr.strip())\n\n job_id = []\n\n if retcode == 0:\n for line in stdout.split('\\n'):\n if re.match('^[0-9]', line) is not None:\n cluster = line.split(\" \")[5]\n # We know the first job id (\"process\" in condor terms) within a\n # cluster is 0 and we know the total number of jobs from\n # condor_submit, so we use some list comprehensions to expand\n # the condor_submit output into job IDs\n # e.g., ['118907.0', '118907.1', '118907.2', '118907.3', '118907.4', '118908.0']\n processes = [str(x) for x in range(0, int(line[0]))]\n job_id += [cluster + process for process in processes]\n\n self._add_resource(job_id)\n return job_id[0]\n\n def cancel(self, job_ids):\n \"\"\"Cancels the jobs specified by a list of job IDs.\n\n Parameters\n ----------\n job_ids : list of str\n The job IDs to cancel.\n\n Returns\n -------\n list of bool\n Each entry in the list will be True if the job is cancelled succesfully, otherwise False.\n \"\"\"\n\n job_id_list = ' '.join(job_ids)\n cmd = \"condor_rm {0}; condor_rm -forcex {0}\".format(job_id_list)\n logger.debug(\"Attempting removal of jobs : {0}\".format(cmd))\n retcode, stdout, stderr = super().execute_wait(cmd)\n rets = None\n if retcode == 0:\n for jid in job_ids:\n self.resources[jid]['status'] = 'CANCELLED'\n rets = [True for i in job_ids]\n else:\n rets = [False for i in job_ids]\n\n return rets\n\n @property\n def scaling_enabled(self):\n return True\n\n @property\n def current_capacity(self):\n return self\n\n def _add_resource(self, job_id):\n for jid in job_id:\n self.resources[jid] = {'status': 'PENDING', 'size': 1}\n return True\n\n\nif __name__ == \"__main__\":\n\n print(\"None\")\n", "path": "parsl/providers/condor/condor.py"}]} | 3,584 | 494 |
gh_patches_debug_51313 | rasdani/github-patches | git_diff | scikit-image__scikit-image-5128 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
filters.farid missing from skimage.filters documentation
## Description
The `filters.farid{,_h,_v}` functions are missing from the [`skimage.filters` documentation](https://scikit-image.org/docs/dev/api/skimage.filters.html). I presume this is because they are not it `__all__`? (No time to investigate right now.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/filters/__init__.py`
Content:
```
1 from .lpi_filter import inverse, wiener, LPIFilter2D
2 from ._gaussian import (gaussian, _guess_spatial_dimensions,
3 difference_of_gaussians)
4 from .edges import (sobel, sobel_h, sobel_v,
5 scharr, scharr_h, scharr_v,
6 prewitt, prewitt_h, prewitt_v,
7 roberts, roberts_pos_diag, roberts_neg_diag,
8 laplace,
9 farid, farid_h, farid_v)
10 from ._rank_order import rank_order
11 from ._gabor import gabor_kernel, gabor
12 from .thresholding import (threshold_local, threshold_otsu, threshold_yen,
13 threshold_isodata, threshold_li, threshold_minimum,
14 threshold_mean, threshold_triangle,
15 threshold_niblack, threshold_sauvola,
16 threshold_multiotsu, try_all_threshold,
17 apply_hysteresis_threshold)
18 from .ridges import (meijering, sato, frangi, hessian)
19 from . import rank
20 from ._median import median
21 from ._sparse import correlate_sparse
22 from ._unsharp_mask import unsharp_mask
23 from ._window import window
24
25
26 __all__ = ['inverse',
27 'correlate_sparse',
28 'wiener',
29 'LPIFilter2D',
30 'gaussian',
31 'difference_of_gaussians',
32 'median',
33 'sobel',
34 'sobel_h',
35 'sobel_v',
36 'scharr',
37 'scharr_h',
38 'scharr_v',
39 'prewitt',
40 'prewitt_h',
41 'prewitt_v',
42 'roberts',
43 'roberts_pos_diag',
44 'roberts_neg_diag',
45 'laplace',
46 'rank_order',
47 'gabor_kernel',
48 'gabor',
49 'try_all_threshold',
50 'meijering',
51 'sato',
52 'frangi',
53 'hessian',
54 'threshold_otsu',
55 'threshold_yen',
56 'threshold_isodata',
57 'threshold_li',
58 'threshold_local',
59 'threshold_minimum',
60 'threshold_mean',
61 'threshold_niblack',
62 'threshold_sauvola',
63 'threshold_triangle',
64 'threshold_multiotsu',
65 'apply_hysteresis_threshold',
66 'rank',
67 'unsharp_mask',
68 'window']
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py
--- a/skimage/filters/__init__.py
+++ b/skimage/filters/__init__.py
@@ -43,6 +43,9 @@
'roberts_pos_diag',
'roberts_neg_diag',
'laplace',
+ 'farid',
+ 'farid_h',
+ 'farid_v',
'rank_order',
'gabor_kernel',
'gabor',
| {"golden_diff": "diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py\n--- a/skimage/filters/__init__.py\n+++ b/skimage/filters/__init__.py\n@@ -43,6 +43,9 @@\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n+ 'farid',\n+ 'farid_h',\n+ 'farid_v',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n", "issue": "filters.farid missing from skimage.filters documentation\n## Description\r\n\r\nThe `filters.farid{,_h,_v}` functions are missing from the [`skimage.filters` documentation](https://scikit-image.org/docs/dev/api/skimage.filters.html). I presume this is because they are not it `__all__`? (No time to investigate right now.)\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import (gaussian, _guess_spatial_dimensions,\n difference_of_gaussians)\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace,\n farid, farid_h, farid_v)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom .thresholding import (threshold_local, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n threshold_multiotsu, try_all_threshold,\n apply_hysteresis_threshold)\nfrom .ridges import (meijering, sato, frangi, hessian)\nfrom . import rank\nfrom ._median import median\nfrom ._sparse import correlate_sparse\nfrom ._unsharp_mask import unsharp_mask\nfrom ._window import window\n\n\n__all__ = ['inverse',\n 'correlate_sparse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'difference_of_gaussians',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'meijering',\n 'sato',\n 'frangi',\n 'hessian',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_local',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'threshold_multiotsu',\n 'apply_hysteresis_threshold',\n 'rank',\n 'unsharp_mask',\n 'window']\n", "path": "skimage/filters/__init__.py"}], "after_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import (gaussian, _guess_spatial_dimensions,\n difference_of_gaussians)\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace,\n farid, farid_h, farid_v)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom .thresholding import (threshold_local, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n threshold_multiotsu, try_all_threshold,\n apply_hysteresis_threshold)\nfrom .ridges import (meijering, sato, frangi, hessian)\nfrom . import rank\nfrom ._median import median\nfrom ._sparse import correlate_sparse\nfrom ._unsharp_mask import unsharp_mask\nfrom ._window import window\n\n\n__all__ = ['inverse',\n 'correlate_sparse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'difference_of_gaussians',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'farid',\n 'farid_h',\n 'farid_v',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'meijering',\n 'sato',\n 'frangi',\n 'hessian',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_local',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'threshold_multiotsu',\n 'apply_hysteresis_threshold',\n 'rank',\n 'unsharp_mask',\n 'window']\n", "path": "skimage/filters/__init__.py"}]} | 978 | 117 |
gh_patches_debug_43025 | rasdani/github-patches | git_diff | azavea__raster-vision-641 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include per-scene metrics in eval.json
It would be useful to see metrics for each scene in addition to metrics averaged over all scenes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/evaluation/classification_evaluation.py`
Content:
```
1 from abc import (ABC, abstractmethod)
2
3 import json
4
5 from rastervision.evaluation import ClassEvaluationItem
6 from rastervision.utils.files import str_to_file
7
8
9 class ClassificationEvaluation(ABC):
10 """Base class for evaluating predictions for tasks that have classes.
11
12 Evaluations can be keyed, for instance, if evaluations happen per class.
13 """
14
15 def __init__(self):
16 self.clear()
17
18 def clear(self):
19 """Clear the Evaluation."""
20 self.class_to_eval_item = {}
21 self.avg_item = None
22
23 def set_class_to_eval_item(self, class_to_eval_item):
24 self.class_to_eval_item = class_to_eval_item
25
26 def get_by_id(self, key):
27 """Gets the evaluation for a particular EvaluationItem key"""
28 return self.class_to_eval_item[key]
29
30 def has_id(self, key):
31 """Answers whether or not the EvaluationItem key is represented"""
32 return key in self.class_to_eval_item
33
34 def to_json(self):
35 json_rep = []
36 for eval_item in self.class_to_eval_item.values():
37 json_rep.append(eval_item.to_json())
38 json_rep.append(self.avg_item.to_json())
39 return json_rep
40
41 def save(self, output_uri):
42 """Save this Evaluation to a file.
43
44 Args:
45 output_uri: string URI for the file to write.
46 """
47 json_str = json.dumps(self.to_json(), indent=4)
48 str_to_file(json_str, output_uri)
49
50 def merge(self, evaluation):
51 """Merge Evaluation for another Scene into this one.
52
53 This is useful for computing the average metrics of a set of scenes.
54 The results of the averaging are stored in this Evaluation.
55
56 Args:
57 evaluation: Evaluation to merge into this one
58 """
59 if len(self.class_to_eval_item) == 0:
60 self.class_to_eval_item = evaluation.class_to_eval_item
61 else:
62 for key, other_eval_item in \
63 evaluation.class_to_eval_item.items():
64 if self.has_id(key):
65 self.get_by_id(key).merge(other_eval_item)
66 else:
67 self.class_to_eval_item[key] = other_eval_item
68
69 self.compute_avg()
70
71 def compute_avg(self):
72 """Compute average metrics over all keys."""
73 self.avg_item = ClassEvaluationItem(class_name='average')
74 for eval_item in self.class_to_eval_item.values():
75 self.avg_item.merge(eval_item)
76
77 @abstractmethod
78 def compute(self, ground_truth_labels, prediction_labels):
79 """Compute metrics for a single scene.
80
81 Args:
82 ground_truth_labels: Ground Truth labels to evaluate against.
83 prediction_labels: The predicted labels to evaluate.
84 """
85 pass
86
```
Path: `rastervision/evaluation/semantic_segmentation_evaluator.py`
Content:
```
1 import logging
2
3 from rastervision.data import ActivateMixin
4 from rastervision.rv_config import RVConfig
5 from rastervision.utils.files import (download_if_needed)
6 from rastervision.evaluation import (ClassificationEvaluator,
7 SemanticSegmentationEvaluation)
8
9 log = logging.getLogger(__name__)
10
11
12 class SemanticSegmentationEvaluator(ClassificationEvaluator):
13 """Evaluates predictions for a set of scenes.
14 """
15
16 def __init__(self, class_map, output_uri):
17 super().__init__(class_map, output_uri)
18
19 def create_evaluation(self):
20 return SemanticSegmentationEvaluation(self.class_map)
21
22 def process(self, scenes, tmp_dir):
23 evaluation = self.create_evaluation()
24 for scene in scenes:
25 log.info('Computing evaluation for scene {}...'.format(scene.id))
26 label_source = scene.ground_truth_label_source
27 label_store = scene.prediction_label_store
28 with ActivateMixin.compose(label_source, label_store):
29 ground_truth = label_source.get_labels()
30 predictions = label_store.get_labels()
31
32 if scene.aoi_polygons:
33 # Filter labels based on AOI.
34 ground_truth = ground_truth.filter_by_aoi(
35 scene.aoi_polygons)
36 predictions = predictions.filter_by_aoi(scene.aoi_polygons)
37 scene_evaluation = self.create_evaluation()
38 scene_evaluation.compute(ground_truth, predictions)
39 evaluation.merge(scene_evaluation)
40
41 if hasattr(label_source, 'source') and hasattr(
42 label_source.source, 'vector_source') and hasattr(
43 label_store, 'vector_output'):
44 tmp_dir = RVConfig.get_tmp_dir().name
45 gt_geojson = label_source.source.vector_source.get_geojson()
46 for vo in label_store.vector_output:
47 pred_geojson = vo['uri']
48 mode = vo['mode']
49 class_id = vo['class_id']
50 pred_geojson_local = download_if_needed(
51 pred_geojson, tmp_dir)
52 scene_evaluation = self.create_evaluation()
53 scene_evaluation.compute_vector(
54 gt_geojson, pred_geojson_local, mode, class_id)
55 evaluation.merge(scene_evaluation)
56
57 evaluation.save(self.output_uri)
58
```
Path: `rastervision/evaluation/classification_evaluator.py`
Content:
```
1 from abc import (abstractmethod)
2 import logging
3
4 from rastervision.evaluation import Evaluator
5 from rastervision.data import ActivateMixin
6
7 log = logging.getLogger(__name__)
8
9
10 class ClassificationEvaluator(Evaluator):
11 """Evaluates predictions for a set of scenes.
12 """
13
14 def __init__(self, class_map, output_uri):
15 self.class_map = class_map
16 self.output_uri = output_uri
17
18 @abstractmethod
19 def create_evaluation(self):
20 pass
21
22 def process(self, scenes, tmp_dir):
23 evaluation = self.create_evaluation()
24 for scene in scenes:
25 log.info('Computing evaluation for scene {}...'.format(scene.id))
26 label_source = scene.ground_truth_label_source
27 label_store = scene.prediction_label_store
28 with ActivateMixin.compose(label_source, label_store):
29 ground_truth = label_source.get_labels()
30 predictions = label_store.get_labels()
31
32 if scene.aoi_polygons:
33 # Filter labels based on AOI.
34 ground_truth = ground_truth.filter_by_aoi(
35 scene.aoi_polygons)
36 predictions = predictions.filter_by_aoi(scene.aoi_polygons)
37 scene_evaluation = self.create_evaluation()
38 scene_evaluation.compute(ground_truth, predictions)
39 evaluation.merge(scene_evaluation)
40
41 evaluation.save(self.output_uri)
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision/evaluation/classification_evaluation.py b/rastervision/evaluation/classification_evaluation.py
--- a/rastervision/evaluation/classification_evaluation.py
+++ b/rastervision/evaluation/classification_evaluation.py
@@ -1,4 +1,5 @@
from abc import (ABC, abstractmethod)
+import copy
import json
@@ -18,6 +19,7 @@
def clear(self):
"""Clear the Evaluation."""
self.class_to_eval_item = {}
+ self.scene_to_eval = {}
self.avg_item = None
def set_class_to_eval_item(self, class_to_eval_item):
@@ -36,6 +38,14 @@
for eval_item in self.class_to_eval_item.values():
json_rep.append(eval_item.to_json())
json_rep.append(self.avg_item.to_json())
+
+ if self.scene_to_eval:
+ json_rep = {'overall': json_rep}
+ scene_to_eval_json = {}
+ for scene_id, eval in self.scene_to_eval.items():
+ scene_to_eval_json[scene_id] = eval.to_json()
+ json_rep['per_scene'] = scene_to_eval_json
+
return json_rep
def save(self, output_uri):
@@ -47,7 +57,7 @@
json_str = json.dumps(self.to_json(), indent=4)
str_to_file(json_str, output_uri)
- def merge(self, evaluation):
+ def merge(self, evaluation, scene_id=None):
"""Merge Evaluation for another Scene into this one.
This is useful for computing the average metrics of a set of scenes.
@@ -68,6 +78,9 @@
self.compute_avg()
+ if scene_id is not None:
+ self.scene_to_eval[scene_id] = copy.deepcopy(evaluation)
+
def compute_avg(self):
"""Compute average metrics over all keys."""
self.avg_item = ClassEvaluationItem(class_name='average')
diff --git a/rastervision/evaluation/classification_evaluator.py b/rastervision/evaluation/classification_evaluator.py
--- a/rastervision/evaluation/classification_evaluator.py
+++ b/rastervision/evaluation/classification_evaluator.py
@@ -36,6 +36,5 @@
predictions = predictions.filter_by_aoi(scene.aoi_polygons)
scene_evaluation = self.create_evaluation()
scene_evaluation.compute(ground_truth, predictions)
- evaluation.merge(scene_evaluation)
-
+ evaluation.merge(scene_evaluation, scene_id=scene.id)
evaluation.save(self.output_uri)
diff --git a/rastervision/evaluation/semantic_segmentation_evaluator.py b/rastervision/evaluation/semantic_segmentation_evaluator.py
--- a/rastervision/evaluation/semantic_segmentation_evaluator.py
+++ b/rastervision/evaluation/semantic_segmentation_evaluator.py
@@ -36,7 +36,7 @@
predictions = predictions.filter_by_aoi(scene.aoi_polygons)
scene_evaluation = self.create_evaluation()
scene_evaluation.compute(ground_truth, predictions)
- evaluation.merge(scene_evaluation)
+ evaluation.merge(scene_evaluation, scene_id=scene.id)
if hasattr(label_source, 'source') and hasattr(
label_source.source, 'vector_source') and hasattr(
@@ -52,6 +52,6 @@
scene_evaluation = self.create_evaluation()
scene_evaluation.compute_vector(
gt_geojson, pred_geojson_local, mode, class_id)
- evaluation.merge(scene_evaluation)
+ evaluation.merge(scene_evaluation, scene_id=scene.id)
evaluation.save(self.output_uri)
| {"golden_diff": "diff --git a/rastervision/evaluation/classification_evaluation.py b/rastervision/evaluation/classification_evaluation.py\n--- a/rastervision/evaluation/classification_evaluation.py\n+++ b/rastervision/evaluation/classification_evaluation.py\n@@ -1,4 +1,5 @@\n from abc import (ABC, abstractmethod)\n+import copy\n \n import json\n \n@@ -18,6 +19,7 @@\n def clear(self):\n \"\"\"Clear the Evaluation.\"\"\"\n self.class_to_eval_item = {}\n+ self.scene_to_eval = {}\n self.avg_item = None\n \n def set_class_to_eval_item(self, class_to_eval_item):\n@@ -36,6 +38,14 @@\n for eval_item in self.class_to_eval_item.values():\n json_rep.append(eval_item.to_json())\n json_rep.append(self.avg_item.to_json())\n+\n+ if self.scene_to_eval:\n+ json_rep = {'overall': json_rep}\n+ scene_to_eval_json = {}\n+ for scene_id, eval in self.scene_to_eval.items():\n+ scene_to_eval_json[scene_id] = eval.to_json()\n+ json_rep['per_scene'] = scene_to_eval_json\n+\n return json_rep\n \n def save(self, output_uri):\n@@ -47,7 +57,7 @@\n json_str = json.dumps(self.to_json(), indent=4)\n str_to_file(json_str, output_uri)\n \n- def merge(self, evaluation):\n+ def merge(self, evaluation, scene_id=None):\n \"\"\"Merge Evaluation for another Scene into this one.\n \n This is useful for computing the average metrics of a set of scenes.\n@@ -68,6 +78,9 @@\n \n self.compute_avg()\n \n+ if scene_id is not None:\n+ self.scene_to_eval[scene_id] = copy.deepcopy(evaluation)\n+\n def compute_avg(self):\n \"\"\"Compute average metrics over all keys.\"\"\"\n self.avg_item = ClassEvaluationItem(class_name='average')\ndiff --git a/rastervision/evaluation/classification_evaluator.py b/rastervision/evaluation/classification_evaluator.py\n--- a/rastervision/evaluation/classification_evaluator.py\n+++ b/rastervision/evaluation/classification_evaluator.py\n@@ -36,6 +36,5 @@\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n- evaluation.merge(scene_evaluation)\n-\n+ evaluation.merge(scene_evaluation, scene_id=scene.id)\n evaluation.save(self.output_uri)\ndiff --git a/rastervision/evaluation/semantic_segmentation_evaluator.py b/rastervision/evaluation/semantic_segmentation_evaluator.py\n--- a/rastervision/evaluation/semantic_segmentation_evaluator.py\n+++ b/rastervision/evaluation/semantic_segmentation_evaluator.py\n@@ -36,7 +36,7 @@\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n- evaluation.merge(scene_evaluation)\n+ evaluation.merge(scene_evaluation, scene_id=scene.id)\n \n if hasattr(label_source, 'source') and hasattr(\n label_source.source, 'vector_source') and hasattr(\n@@ -52,6 +52,6 @@\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute_vector(\n gt_geojson, pred_geojson_local, mode, class_id)\n- evaluation.merge(scene_evaluation)\n+ evaluation.merge(scene_evaluation, scene_id=scene.id)\n \n evaluation.save(self.output_uri)\n", "issue": "Include per-scene metrics in eval.json\nIt would be useful to see metrics for each scene in addition to metrics averaged over all scenes. \n", "before_files": [{"content": "from abc import (ABC, abstractmethod)\n\nimport json\n\nfrom rastervision.evaluation import ClassEvaluationItem\nfrom rastervision.utils.files import str_to_file\n\n\nclass ClassificationEvaluation(ABC):\n \"\"\"Base class for evaluating predictions for tasks that have classes.\n\n Evaluations can be keyed, for instance, if evaluations happen per class.\n \"\"\"\n\n def __init__(self):\n self.clear()\n\n def clear(self):\n \"\"\"Clear the Evaluation.\"\"\"\n self.class_to_eval_item = {}\n self.avg_item = None\n\n def set_class_to_eval_item(self, class_to_eval_item):\n self.class_to_eval_item = class_to_eval_item\n\n def get_by_id(self, key):\n \"\"\"Gets the evaluation for a particular EvaluationItem key\"\"\"\n return self.class_to_eval_item[key]\n\n def has_id(self, key):\n \"\"\"Answers whether or not the EvaluationItem key is represented\"\"\"\n return key in self.class_to_eval_item\n\n def to_json(self):\n json_rep = []\n for eval_item in self.class_to_eval_item.values():\n json_rep.append(eval_item.to_json())\n json_rep.append(self.avg_item.to_json())\n return json_rep\n\n def save(self, output_uri):\n \"\"\"Save this Evaluation to a file.\n\n Args:\n output_uri: string URI for the file to write.\n \"\"\"\n json_str = json.dumps(self.to_json(), indent=4)\n str_to_file(json_str, output_uri)\n\n def merge(self, evaluation):\n \"\"\"Merge Evaluation for another Scene into this one.\n\n This is useful for computing the average metrics of a set of scenes.\n The results of the averaging are stored in this Evaluation.\n\n Args:\n evaluation: Evaluation to merge into this one\n \"\"\"\n if len(self.class_to_eval_item) == 0:\n self.class_to_eval_item = evaluation.class_to_eval_item\n else:\n for key, other_eval_item in \\\n evaluation.class_to_eval_item.items():\n if self.has_id(key):\n self.get_by_id(key).merge(other_eval_item)\n else:\n self.class_to_eval_item[key] = other_eval_item\n\n self.compute_avg()\n\n def compute_avg(self):\n \"\"\"Compute average metrics over all keys.\"\"\"\n self.avg_item = ClassEvaluationItem(class_name='average')\n for eval_item in self.class_to_eval_item.values():\n self.avg_item.merge(eval_item)\n\n @abstractmethod\n def compute(self, ground_truth_labels, prediction_labels):\n \"\"\"Compute metrics for a single scene.\n\n Args:\n ground_truth_labels: Ground Truth labels to evaluate against.\n prediction_labels: The predicted labels to evaluate.\n \"\"\"\n pass\n", "path": "rastervision/evaluation/classification_evaluation.py"}, {"content": "import logging\n\nfrom rastervision.data import ActivateMixin\nfrom rastervision.rv_config import RVConfig\nfrom rastervision.utils.files import (download_if_needed)\nfrom rastervision.evaluation import (ClassificationEvaluator,\n SemanticSegmentationEvaluation)\n\nlog = logging.getLogger(__name__)\n\n\nclass SemanticSegmentationEvaluator(ClassificationEvaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n super().__init__(class_map, output_uri)\n\n def create_evaluation(self):\n return SemanticSegmentationEvaluation(self.class_map)\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation)\n\n if hasattr(label_source, 'source') and hasattr(\n label_source.source, 'vector_source') and hasattr(\n label_store, 'vector_output'):\n tmp_dir = RVConfig.get_tmp_dir().name\n gt_geojson = label_source.source.vector_source.get_geojson()\n for vo in label_store.vector_output:\n pred_geojson = vo['uri']\n mode = vo['mode']\n class_id = vo['class_id']\n pred_geojson_local = download_if_needed(\n pred_geojson, tmp_dir)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute_vector(\n gt_geojson, pred_geojson_local, mode, class_id)\n evaluation.merge(scene_evaluation)\n\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/semantic_segmentation_evaluator.py"}, {"content": "from abc import (abstractmethod)\nimport logging\n\nfrom rastervision.evaluation import Evaluator\nfrom rastervision.data import ActivateMixin\n\nlog = logging.getLogger(__name__)\n\n\nclass ClassificationEvaluator(Evaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n self.class_map = class_map\n self.output_uri = output_uri\n\n @abstractmethod\n def create_evaluation(self):\n pass\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation)\n\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/classification_evaluator.py"}], "after_files": [{"content": "from abc import (ABC, abstractmethod)\nimport copy\n\nimport json\n\nfrom rastervision.evaluation import ClassEvaluationItem\nfrom rastervision.utils.files import str_to_file\n\n\nclass ClassificationEvaluation(ABC):\n \"\"\"Base class for evaluating predictions for tasks that have classes.\n\n Evaluations can be keyed, for instance, if evaluations happen per class.\n \"\"\"\n\n def __init__(self):\n self.clear()\n\n def clear(self):\n \"\"\"Clear the Evaluation.\"\"\"\n self.class_to_eval_item = {}\n self.scene_to_eval = {}\n self.avg_item = None\n\n def set_class_to_eval_item(self, class_to_eval_item):\n self.class_to_eval_item = class_to_eval_item\n\n def get_by_id(self, key):\n \"\"\"Gets the evaluation for a particular EvaluationItem key\"\"\"\n return self.class_to_eval_item[key]\n\n def has_id(self, key):\n \"\"\"Answers whether or not the EvaluationItem key is represented\"\"\"\n return key in self.class_to_eval_item\n\n def to_json(self):\n json_rep = []\n for eval_item in self.class_to_eval_item.values():\n json_rep.append(eval_item.to_json())\n json_rep.append(self.avg_item.to_json())\n\n if self.scene_to_eval:\n json_rep = {'overall': json_rep}\n scene_to_eval_json = {}\n for scene_id, eval in self.scene_to_eval.items():\n scene_to_eval_json[scene_id] = eval.to_json()\n json_rep['per_scene'] = scene_to_eval_json\n\n return json_rep\n\n def save(self, output_uri):\n \"\"\"Save this Evaluation to a file.\n\n Args:\n output_uri: string URI for the file to write.\n \"\"\"\n json_str = json.dumps(self.to_json(), indent=4)\n str_to_file(json_str, output_uri)\n\n def merge(self, evaluation, scene_id=None):\n \"\"\"Merge Evaluation for another Scene into this one.\n\n This is useful for computing the average metrics of a set of scenes.\n The results of the averaging are stored in this Evaluation.\n\n Args:\n evaluation: Evaluation to merge into this one\n \"\"\"\n if len(self.class_to_eval_item) == 0:\n self.class_to_eval_item = evaluation.class_to_eval_item\n else:\n for key, other_eval_item in \\\n evaluation.class_to_eval_item.items():\n if self.has_id(key):\n self.get_by_id(key).merge(other_eval_item)\n else:\n self.class_to_eval_item[key] = other_eval_item\n\n self.compute_avg()\n\n if scene_id is not None:\n self.scene_to_eval[scene_id] = copy.deepcopy(evaluation)\n\n def compute_avg(self):\n \"\"\"Compute average metrics over all keys.\"\"\"\n self.avg_item = ClassEvaluationItem(class_name='average')\n for eval_item in self.class_to_eval_item.values():\n self.avg_item.merge(eval_item)\n\n @abstractmethod\n def compute(self, ground_truth_labels, prediction_labels):\n \"\"\"Compute metrics for a single scene.\n\n Args:\n ground_truth_labels: Ground Truth labels to evaluate against.\n prediction_labels: The predicted labels to evaluate.\n \"\"\"\n pass\n", "path": "rastervision/evaluation/classification_evaluation.py"}, {"content": "import logging\n\nfrom rastervision.data import ActivateMixin\nfrom rastervision.rv_config import RVConfig\nfrom rastervision.utils.files import (download_if_needed)\nfrom rastervision.evaluation import (ClassificationEvaluator,\n SemanticSegmentationEvaluation)\n\nlog = logging.getLogger(__name__)\n\n\nclass SemanticSegmentationEvaluator(ClassificationEvaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n super().__init__(class_map, output_uri)\n\n def create_evaluation(self):\n return SemanticSegmentationEvaluation(self.class_map)\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation, scene_id=scene.id)\n\n if hasattr(label_source, 'source') and hasattr(\n label_source.source, 'vector_source') and hasattr(\n label_store, 'vector_output'):\n tmp_dir = RVConfig.get_tmp_dir().name\n gt_geojson = label_source.source.vector_source.get_geojson()\n for vo in label_store.vector_output:\n pred_geojson = vo['uri']\n mode = vo['mode']\n class_id = vo['class_id']\n pred_geojson_local = download_if_needed(\n pred_geojson, tmp_dir)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute_vector(\n gt_geojson, pred_geojson_local, mode, class_id)\n evaluation.merge(scene_evaluation, scene_id=scene.id)\n\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/semantic_segmentation_evaluator.py"}, {"content": "from abc import (abstractmethod)\nimport logging\n\nfrom rastervision.evaluation import Evaluator\nfrom rastervision.data import ActivateMixin\n\nlog = logging.getLogger(__name__)\n\n\nclass ClassificationEvaluator(Evaluator):\n \"\"\"Evaluates predictions for a set of scenes.\n \"\"\"\n\n def __init__(self, class_map, output_uri):\n self.class_map = class_map\n self.output_uri = output_uri\n\n @abstractmethod\n def create_evaluation(self):\n pass\n\n def process(self, scenes, tmp_dir):\n evaluation = self.create_evaluation()\n for scene in scenes:\n log.info('Computing evaluation for scene {}...'.format(scene.id))\n label_source = scene.ground_truth_label_source\n label_store = scene.prediction_label_store\n with ActivateMixin.compose(label_source, label_store):\n ground_truth = label_source.get_labels()\n predictions = label_store.get_labels()\n\n if scene.aoi_polygons:\n # Filter labels based on AOI.\n ground_truth = ground_truth.filter_by_aoi(\n scene.aoi_polygons)\n predictions = predictions.filter_by_aoi(scene.aoi_polygons)\n scene_evaluation = self.create_evaluation()\n scene_evaluation.compute(ground_truth, predictions)\n evaluation.merge(scene_evaluation, scene_id=scene.id)\n evaluation.save(self.output_uri)\n", "path": "rastervision/evaluation/classification_evaluator.py"}]} | 1,990 | 778 |
gh_patches_debug_23538 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1884 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The first worker may crash in ALLREDUCE mode
When the worker is the only running worker pod, `_get_peer_set` will get an empty peer set.
Then consensus_init_kwars will set "known_addr_list" as empty.
This will cause an error in ftlib.
```
[2020-03-30 06:16:07,202] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus
[2020-03-30 06:16:09,206] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()
Setting Bind Address as 11.233.87.89
log file: /tmp/memberlist.log
[2020-03-30 06:16:21,713] [WARNING] [communicator.py:37:__init__] Retry building consensus...
[2020-03-30 06:16:21,713] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus
[2020-03-30 06:16:21,714] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()
Traceback (most recent call last):
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/elasticdl/elasticdl/python/worker/main.py", line 76, in <module>
main()
File "/elasticdl/elasticdl/python/worker/main.py", line 70, in main
set_parallelism=True,
File "/elasticdl/elasticdl/python/worker/worker.py", line 122, in __init__
self._init_from_args(args)
File "/elasticdl/elasticdl/python/worker/worker.py", line 159, in _init_from_args
if self._distribution_strategy == DistributionStrategy.ALLREDUCE
File "/elasticdl/elasticdl/python/collective_ops/communicator.py", line 39, in __init__
known_addr_list=list(self._get_peer_set(service_name))
File "/usr/local/lib/python3.6/dist-packages/ftlib/impl.py", line 137, in manual_join
return self.consensus.manual_join(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py", line 85, in manual_join
self.joined = self._join(known_addr_list, wait_time=wait_time)
File "/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py", line 92, in _join
assert addr_list_len >= 1
AssertionError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/collective_ops/communicator.py`
Content:
```
1 import socket
2
3 from elasticdl.python.common.constants import CollectiveCommunicatorStatus
4 from elasticdl.python.common.log_utils import default_logger as logger
5
6 try:
7 from ftlib import BasicFTLib
8 from ftlib.ftlib_status import FTAllReduceStatus
9
10 _FTLIB_INSTALLED = True
11 except ImportError:
12 BasicFTLib = object
13 FTAllReduceStatus = object
14 _FTLIB_INSTALLED = False
15
16
17 _SUPPORTED_ALLREDUCE_OPS = ["MEAN"]
18 _FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE = (
19 "FTLib is not installed. Default to succeeded for testing purposes"
20 )
21
22
23 class CollectiveCommunicator(object):
24 def __init__(self, service_name=None):
25 if _FTLIB_INSTALLED:
26 self._ftlib = BasicFTLib(
27 consensus="gossip",
28 commlib="pytorch",
29 consensus_init_kwargs={
30 "known_addr_list": list(self._get_peer_set(service_name)),
31 "custom_bind_addr": socket.gethostbyname(
32 socket.gethostname()
33 ),
34 },
35 )
36 while not self._ftlib.consensus_joined():
37 logger.warning("Retry building consensus...")
38 self._ftlib.manual_join(
39 known_addr_list=list(self._get_peer_set(service_name))
40 )
41 else:
42 logger.warning(
43 "FTLib is not installed. The CollectiveCommunicator "
44 "may not work as expected"
45 )
46 self._ftlib = None
47
48 def allreduce(self, data, op="MEAN"):
49 if data is None:
50 logger.error("Data is required for allreduce operation")
51 return CollectiveCommunicatorStatus.FAILED, data
52 if op not in _SUPPORTED_ALLREDUCE_OPS:
53 logger.error(
54 "%s is not in list of supported allreduce operations: %s"
55 % (op, _SUPPORTED_ALLREDUCE_OPS)
56 )
57 return CollectiveCommunicatorStatus.FAILED, data
58 if self._ftlib is not None:
59 res = self._ftlib.wait_gradients_ready(data)
60 if res == FTAllReduceStatus.SUCCESS:
61 return CollectiveCommunicatorStatus.SUCCEEDED, data
62 else:
63 return CollectiveCommunicatorStatus.FAILED, data
64 else:
65 logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)
66 return CollectiveCommunicatorStatus.SUCCEEDED, data
67
68 def broadcast(self, data, src_rank):
69 if self._ftlib is not None:
70 res = self._ftlib.broadcast(data, src_rank)
71 if res == FTAllReduceStatus.SUCCESS:
72 return CollectiveCommunicatorStatus.SUCCEEDED, data
73 else:
74 return CollectiveCommunicatorStatus.FAILED, data
75 else:
76 logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)
77 return CollectiveCommunicatorStatus.SUCCEEDED, data
78
79 def barrier(self):
80 if self._ftlib is not None:
81 res = self._ftlib.barrier()
82 if res == FTAllReduceStatus.SUCCESS:
83 return CollectiveCommunicatorStatus.SUCCEEDED
84 else:
85 return CollectiveCommunicatorStatus.FAILED
86 else:
87 logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)
88 return CollectiveCommunicatorStatus.SUCCEEDED
89
90 def is_initialized(self):
91 """This will be `False` under three occasions:
92 * New workers report joining in
93 * Collective-communication operations fail or time out
94 * Liveness probe fails for existing workers
95 """
96 if self._ftlib is not None:
97 return self._ftlib.initialized
98 else:
99 return True
100
101 def _get_peer_set(self, svc_name):
102 if svc_name is None:
103 return None
104 my_ip = socket.gethostbyname(socket.gethostname())
105 temp_set = socket.getaddrinfo(svc_name, 0, proto=socket.IPPROTO_TCP)
106 peer_set = {peer[-1][0] for peer in temp_set if peer[-1][0] != my_ip}
107 return peer_set
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/collective_ops/communicator.py b/elasticdl/python/collective_ops/communicator.py
--- a/elasticdl/python/collective_ops/communicator.py
+++ b/elasticdl/python/collective_ops/communicator.py
@@ -23,17 +23,18 @@
class CollectiveCommunicator(object):
def __init__(self, service_name=None):
if _FTLIB_INSTALLED:
+ peer_list = list(self._get_peer_set(service_name))
self._ftlib = BasicFTLib(
consensus="gossip",
commlib="pytorch",
consensus_init_kwargs={
- "known_addr_list": list(self._get_peer_set(service_name)),
+ "known_addr_list": peer_list,
"custom_bind_addr": socket.gethostbyname(
socket.gethostname()
),
},
)
- while not self._ftlib.consensus_joined():
+ while peer_list and not self._ftlib.consensus_joined():
logger.warning("Retry building consensus...")
self._ftlib.manual_join(
known_addr_list=list(self._get_peer_set(service_name))
| {"golden_diff": "diff --git a/elasticdl/python/collective_ops/communicator.py b/elasticdl/python/collective_ops/communicator.py\n--- a/elasticdl/python/collective_ops/communicator.py\n+++ b/elasticdl/python/collective_ops/communicator.py\n@@ -23,17 +23,18 @@\n class CollectiveCommunicator(object):\n def __init__(self, service_name=None):\n if _FTLIB_INSTALLED:\n+ peer_list = list(self._get_peer_set(service_name))\n self._ftlib = BasicFTLib(\n consensus=\"gossip\",\n commlib=\"pytorch\",\n consensus_init_kwargs={\n- \"known_addr_list\": list(self._get_peer_set(service_name)),\n+ \"known_addr_list\": peer_list,\n \"custom_bind_addr\": socket.gethostbyname(\n socket.gethostname()\n ),\n },\n )\n- while not self._ftlib.consensus_joined():\n+ while peer_list and not self._ftlib.consensus_joined():\n logger.warning(\"Retry building consensus...\")\n self._ftlib.manual_join(\n known_addr_list=list(self._get_peer_set(service_name))\n", "issue": "The first worker may crash in ALLREDUCE mode \nWhen the worker is the only running worker pod, `_get_peer_set` will get an empty peer set.\r\nThen consensus_init_kwars will set \"known_addr_list\" as empty.\r\nThis will cause an error in ftlib.\r\n\r\n\r\n```\r\n[2020-03-30 06:16:07,202] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus\r\n[2020-03-30 06:16:09,206] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()\r\nSetting Bind Address as 11.233.87.89\r\nlog file: /tmp/memberlist.log\r\n[2020-03-30 06:16:21,713] [WARNING] [communicator.py:37:__init__] Retry building consensus...\r\n[2020-03-30 06:16:21,713] [WARNING] [communicator.py:102:_get_peer_set] svc_name is edl-allreduce-haitao-ftlib-consensus\r\n[2020-03-30 06:16:21,714] [WARNING] [communicator.py:108:_get_peer_set] peer_set is set()\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib/python3.6/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/elasticdl/elasticdl/python/worker/main.py\", line 76, in <module>\r\n main()\r\n File \"/elasticdl/elasticdl/python/worker/main.py\", line 70, in main\r\n set_parallelism=True,\r\n File \"/elasticdl/elasticdl/python/worker/worker.py\", line 122, in __init__\r\n self._init_from_args(args)\r\n File \"/elasticdl/elasticdl/python/worker/worker.py\", line 159, in _init_from_args\r\n if self._distribution_strategy == DistributionStrategy.ALLREDUCE\r\n File \"/elasticdl/elasticdl/python/collective_ops/communicator.py\", line 39, in __init__\r\n known_addr_list=list(self._get_peer_set(service_name))\r\n File \"/usr/local/lib/python3.6/dist-packages/ftlib/impl.py\", line 137, in manual_join\r\n return self.consensus.manual_join(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py\", line 85, in manual_join\r\n self.joined = self._join(known_addr_list, wait_time=wait_time)\r\n File \"/usr/local/lib/python3.6/dist-packages/ftlib/consensus/gossip/impl.py\", line 92, in _join\r\n assert addr_list_len >= 1\r\nAssertionError\r\n```\n", "before_files": [{"content": "import socket\n\nfrom elasticdl.python.common.constants import CollectiveCommunicatorStatus\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\ntry:\n from ftlib import BasicFTLib\n from ftlib.ftlib_status import FTAllReduceStatus\n\n _FTLIB_INSTALLED = True\nexcept ImportError:\n BasicFTLib = object\n FTAllReduceStatus = object\n _FTLIB_INSTALLED = False\n\n\n_SUPPORTED_ALLREDUCE_OPS = [\"MEAN\"]\n_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE = (\n \"FTLib is not installed. Default to succeeded for testing purposes\"\n)\n\n\nclass CollectiveCommunicator(object):\n def __init__(self, service_name=None):\n if _FTLIB_INSTALLED:\n self._ftlib = BasicFTLib(\n consensus=\"gossip\",\n commlib=\"pytorch\",\n consensus_init_kwargs={\n \"known_addr_list\": list(self._get_peer_set(service_name)),\n \"custom_bind_addr\": socket.gethostbyname(\n socket.gethostname()\n ),\n },\n )\n while not self._ftlib.consensus_joined():\n logger.warning(\"Retry building consensus...\")\n self._ftlib.manual_join(\n known_addr_list=list(self._get_peer_set(service_name))\n )\n else:\n logger.warning(\n \"FTLib is not installed. The CollectiveCommunicator \"\n \"may not work as expected\"\n )\n self._ftlib = None\n\n def allreduce(self, data, op=\"MEAN\"):\n if data is None:\n logger.error(\"Data is required for allreduce operation\")\n return CollectiveCommunicatorStatus.FAILED, data\n if op not in _SUPPORTED_ALLREDUCE_OPS:\n logger.error(\n \"%s is not in list of supported allreduce operations: %s\"\n % (op, _SUPPORTED_ALLREDUCE_OPS)\n )\n return CollectiveCommunicatorStatus.FAILED, data\n if self._ftlib is not None:\n res = self._ftlib.wait_gradients_ready(data)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def broadcast(self, data, src_rank):\n if self._ftlib is not None:\n res = self._ftlib.broadcast(data, src_rank)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def barrier(self):\n if self._ftlib is not None:\n res = self._ftlib.barrier()\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED\n else:\n return CollectiveCommunicatorStatus.FAILED\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED\n\n def is_initialized(self):\n \"\"\"This will be `False` under three occasions:\n * New workers report joining in\n * Collective-communication operations fail or time out\n * Liveness probe fails for existing workers\n \"\"\"\n if self._ftlib is not None:\n return self._ftlib.initialized\n else:\n return True\n\n def _get_peer_set(self, svc_name):\n if svc_name is None:\n return None\n my_ip = socket.gethostbyname(socket.gethostname())\n temp_set = socket.getaddrinfo(svc_name, 0, proto=socket.IPPROTO_TCP)\n peer_set = {peer[-1][0] for peer in temp_set if peer[-1][0] != my_ip}\n return peer_set\n", "path": "elasticdl/python/collective_ops/communicator.py"}], "after_files": [{"content": "import socket\n\nfrom elasticdl.python.common.constants import CollectiveCommunicatorStatus\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\ntry:\n from ftlib import BasicFTLib\n from ftlib.ftlib_status import FTAllReduceStatus\n\n _FTLIB_INSTALLED = True\nexcept ImportError:\n BasicFTLib = object\n FTAllReduceStatus = object\n _FTLIB_INSTALLED = False\n\n\n_SUPPORTED_ALLREDUCE_OPS = [\"MEAN\"]\n_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE = (\n \"FTLib is not installed. Default to succeeded for testing purposes\"\n)\n\n\nclass CollectiveCommunicator(object):\n def __init__(self, service_name=None):\n if _FTLIB_INSTALLED:\n peer_list = list(self._get_peer_set(service_name))\n self._ftlib = BasicFTLib(\n consensus=\"gossip\",\n commlib=\"pytorch\",\n consensus_init_kwargs={\n \"known_addr_list\": peer_list,\n \"custom_bind_addr\": socket.gethostbyname(\n socket.gethostname()\n ),\n },\n )\n while peer_list and not self._ftlib.consensus_joined():\n logger.warning(\"Retry building consensus...\")\n self._ftlib.manual_join(\n known_addr_list=list(self._get_peer_set(service_name))\n )\n else:\n logger.warning(\n \"FTLib is not installed. The CollectiveCommunicator \"\n \"may not work as expected\"\n )\n self._ftlib = None\n\n def allreduce(self, data, op=\"MEAN\"):\n if data is None:\n logger.error(\"Data is required for allreduce operation\")\n return CollectiveCommunicatorStatus.FAILED, data\n if op not in _SUPPORTED_ALLREDUCE_OPS:\n logger.error(\n \"%s is not in list of supported allreduce operations: %s\"\n % (op, _SUPPORTED_ALLREDUCE_OPS)\n )\n return CollectiveCommunicatorStatus.FAILED, data\n if self._ftlib is not None:\n res = self._ftlib.wait_gradients_ready(data)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def broadcast(self, data, src_rank):\n if self._ftlib is not None:\n res = self._ftlib.broadcast(data, src_rank)\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n else:\n return CollectiveCommunicatorStatus.FAILED, data\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED, data\n\n def barrier(self):\n if self._ftlib is not None:\n res = self._ftlib.barrier()\n if res == FTAllReduceStatus.SUCCESS:\n return CollectiveCommunicatorStatus.SUCCEEDED\n else:\n return CollectiveCommunicatorStatus.FAILED\n else:\n logger.warning(_FTLIB_UNINSTALLED_DEFAULT_STATUS_MESSAGE)\n return CollectiveCommunicatorStatus.SUCCEEDED\n\n def is_initialized(self):\n \"\"\"This will be `False` under three occasions:\n * New workers report joining in\n * Collective-communication operations fail or time out\n * Liveness probe fails for existing workers\n \"\"\"\n if self._ftlib is not None:\n return self._ftlib.initialized\n else:\n return True\n\n def _get_peer_set(self, svc_name):\n if svc_name is None:\n return None\n my_ip = socket.gethostbyname(socket.gethostname())\n temp_set = socket.getaddrinfo(svc_name, 0, proto=socket.IPPROTO_TCP)\n peer_set = {peer[-1][0] for peer in temp_set if peer[-1][0] != my_ip}\n return peer_set\n", "path": "elasticdl/python/collective_ops/communicator.py"}]} | 2,041 | 248 |
gh_patches_debug_53980 | rasdani/github-patches | git_diff | scikit-hep__pyhf-2135 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Matplotlib broken in Pyodide demo in docs
In the docs https://pyhf.readthedocs.io/en/v0.7.0/, the Pyodide example is broken for me:
```pytb
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
Cell In[1], line 3
1 import piplite
2 await piplite.install(["pyhf==0.7.0"])
----> 3 get_ipython().run_line_magic('matplotlib', 'inline')
4 import pyhf
File /lib/python3.10/site-packages/IPython/core/interactiveshell.py:2369, in InteractiveShell.run_line_magic(self, magic_name, line, _stack_depth)
2367 kwargs['local_ns'] = self.get_local_scope(stack_depth)
2368 with self.builtin_trap:
-> 2369 result = fn(*args, **kwargs)
2371 # The code below prevents the output from being displayed
2372 # when using magics with decodator @output_can_be_silenced
2373 # when the last Python token in the expression is a ';'.
2374 if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
File /lib/python3.10/site-packages/IPython/core/magics/pylab.py:99, in PylabMagics.matplotlib(self, line)
97 print("Available matplotlib backends: %s" % backends_list)
98 else:
---> 99 gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)
100 self._show_matplotlib_backend(args.gui, backend)
File /lib/python3.10/site-packages/IPython/core/interactiveshell.py:3540, in InteractiveShell.enable_matplotlib(self, gui)
3519 def enable_matplotlib(self, gui=None):
3520 """Enable interactive matplotlib and inline figure support.
3521
3522 This takes the following steps:
(...)
3538 display figures inline.
3539 """
-> 3540 from matplotlib_inline.backend_inline import configure_inline_support
3542 from IPython.core import pylabtools as pt
3543 gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
File /lib/python3.10/site-packages/matplotlib_inline/__init__.py:1
----> 1 from . import backend_inline, config # noqa
2 __version__ = "0.1.6" # noqa
File /lib/python3.10/site-packages/matplotlib_inline/backend_inline.py:6
1 """A matplotlib backend for publishing figures via display_data"""
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the BSD 3-Clause License.
----> 6 import matplotlib
7 from matplotlib import colors
8 from matplotlib.backends import backend_agg
ModuleNotFoundError: The module 'matplotlib' is included in the Pyodide distribution, but it is not installed.
You can install it by calling:
await micropip.install("matplotlib") in Python, or
await pyodide.loadPackage("matplotlib") in JavaScript
See https://pyodide.org/en/stable/usage/loading-packages.html for more details.
```
It used to work previously, though I can not say for sure when it last worked for me. Running on MacOS (ARM), tried Firefox and Chrome (resulting in the above), while Safari seems stuck executing the import commands provided.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/generate_jupyterlite_iframe.py`
Content:
```
1 import urllib.parse
2
3
4 def main():
5 code = """\
6 import piplite
7 await piplite.install(["pyhf==0.7.0"])
8 %matplotlib inline
9 import pyhf\
10 """
11
12 parsed_url = urllib.parse.quote(code)
13 url_base = "https://jupyterlite.github.io/demo/repl/index.html"
14 jupyterlite_options = "?kernel=python&toolbar=1&code="
15 jupyterlite_url = url_base + jupyterlite_options + parsed_url
16
17 print(f"# jupyterlite URL:\n{jupyterlite_url}")
18
19 jupyterlite_iframe_rst = f"""\
20 <iframe
21 src="{jupyterlite_url}"
22 width="100%"
23 height="500px"
24 ></iframe>\
25 """
26 print(f"\n# RST for iframe for jupyterlite.rst:\n{jupyterlite_iframe_rst}")
27
28
29 if __name__ == "__main__":
30 raise SystemExit(main())
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/generate_jupyterlite_iframe.py b/docs/generate_jupyterlite_iframe.py
--- a/docs/generate_jupyterlite_iframe.py
+++ b/docs/generate_jupyterlite_iframe.py
@@ -4,7 +4,7 @@
def main():
code = """\
import piplite
-await piplite.install(["pyhf==0.7.0"])
+await piplite.install(["pyhf==0.7.0", "matplotlib>=3.0.0"])
%matplotlib inline
import pyhf\
"""
| {"golden_diff": "diff --git a/docs/generate_jupyterlite_iframe.py b/docs/generate_jupyterlite_iframe.py\n--- a/docs/generate_jupyterlite_iframe.py\n+++ b/docs/generate_jupyterlite_iframe.py\n@@ -4,7 +4,7 @@\n def main():\n code = \"\"\"\\\n import piplite\n-await piplite.install([\"pyhf==0.7.0\"])\n+await piplite.install([\"pyhf==0.7.0\", \"matplotlib>=3.0.0\"])\n %matplotlib inline\n import pyhf\\\n \"\"\"\n", "issue": "Matplotlib broken in Pyodide demo in docs\nIn the docs https://pyhf.readthedocs.io/en/v0.7.0/, the Pyodide example is broken for me:\r\n```pytb\r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\nCell In[1], line 3\r\n 1 import piplite\r\n 2 await piplite.install([\"pyhf==0.7.0\"])\r\n----> 3 get_ipython().run_line_magic('matplotlib', 'inline')\r\n 4 import pyhf\r\n\r\nFile /lib/python3.10/site-packages/IPython/core/interactiveshell.py:2369, in InteractiveShell.run_line_magic(self, magic_name, line, _stack_depth)\r\n 2367 kwargs['local_ns'] = self.get_local_scope(stack_depth)\r\n 2368 with self.builtin_trap:\r\n-> 2369 result = fn(*args, **kwargs)\r\n 2371 # The code below prevents the output from being displayed\r\n 2372 # when using magics with decodator @output_can_be_silenced\r\n 2373 # when the last Python token in the expression is a ';'.\r\n 2374 if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):\r\n\r\nFile /lib/python3.10/site-packages/IPython/core/magics/pylab.py:99, in PylabMagics.matplotlib(self, line)\r\n 97 print(\"Available matplotlib backends: %s\" % backends_list)\r\n 98 else:\r\n---> 99 gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)\r\n 100 self._show_matplotlib_backend(args.gui, backend)\r\n\r\nFile /lib/python3.10/site-packages/IPython/core/interactiveshell.py:3540, in InteractiveShell.enable_matplotlib(self, gui)\r\n 3519 def enable_matplotlib(self, gui=None):\r\n 3520 \"\"\"Enable interactive matplotlib and inline figure support.\r\n 3521 \r\n 3522 This takes the following steps:\r\n (...)\r\n 3538 display figures inline.\r\n 3539 \"\"\"\r\n-> 3540 from matplotlib_inline.backend_inline import configure_inline_support\r\n 3542 from IPython.core import pylabtools as pt\r\n 3543 gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)\r\n\r\nFile /lib/python3.10/site-packages/matplotlib_inline/__init__.py:1\r\n----> 1 from . import backend_inline, config # noqa\r\n 2 __version__ = \"0.1.6\" # noqa\r\n\r\nFile /lib/python3.10/site-packages/matplotlib_inline/backend_inline.py:6\r\n 1 \"\"\"A matplotlib backend for publishing figures via display_data\"\"\"\r\n 3 # Copyright (c) IPython Development Team.\r\n 4 # Distributed under the terms of the BSD 3-Clause License.\r\n----> 6 import matplotlib\r\n 7 from matplotlib import colors\r\n 8 from matplotlib.backends import backend_agg\r\n\r\nModuleNotFoundError: The module 'matplotlib' is included in the Pyodide distribution, but it is not installed.\r\nYou can install it by calling:\r\n await micropip.install(\"matplotlib\") in Python, or\r\n await pyodide.loadPackage(\"matplotlib\") in JavaScript\r\nSee https://pyodide.org/en/stable/usage/loading-packages.html for more details.\r\n```\r\nIt used to work previously, though I can not say for sure when it last worked for me. Running on MacOS (ARM), tried Firefox and Chrome (resulting in the above), while Safari seems stuck executing the import commands provided.\n", "before_files": [{"content": "import urllib.parse\n\n\ndef main():\n code = \"\"\"\\\nimport piplite\nawait piplite.install([\"pyhf==0.7.0\"])\n%matplotlib inline\nimport pyhf\\\n\"\"\"\n\n parsed_url = urllib.parse.quote(code)\n url_base = \"https://jupyterlite.github.io/demo/repl/index.html\"\n jupyterlite_options = \"?kernel=python&toolbar=1&code=\"\n jupyterlite_url = url_base + jupyterlite_options + parsed_url\n\n print(f\"# jupyterlite URL:\\n{jupyterlite_url}\")\n\n jupyterlite_iframe_rst = f\"\"\"\\\n <iframe\n src=\"{jupyterlite_url}\"\n width=\"100%\"\n height=\"500px\"\n ></iframe>\\\n\"\"\"\n print(f\"\\n# RST for iframe for jupyterlite.rst:\\n{jupyterlite_iframe_rst}\")\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "docs/generate_jupyterlite_iframe.py"}], "after_files": [{"content": "import urllib.parse\n\n\ndef main():\n code = \"\"\"\\\nimport piplite\nawait piplite.install([\"pyhf==0.7.0\", \"matplotlib>=3.0.0\"])\n%matplotlib inline\nimport pyhf\\\n\"\"\"\n\n parsed_url = urllib.parse.quote(code)\n url_base = \"https://jupyterlite.github.io/demo/repl/index.html\"\n jupyterlite_options = \"?kernel=python&toolbar=1&code=\"\n jupyterlite_url = url_base + jupyterlite_options + parsed_url\n\n print(f\"# jupyterlite URL:\\n{jupyterlite_url}\")\n\n jupyterlite_iframe_rst = f\"\"\"\\\n <iframe\n src=\"{jupyterlite_url}\"\n width=\"100%\"\n height=\"500px\"\n ></iframe>\\\n\"\"\"\n print(f\"\\n# RST for iframe for jupyterlite.rst:\\n{jupyterlite_iframe_rst}\")\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "docs/generate_jupyterlite_iframe.py"}]} | 1,352 | 120 |
gh_patches_debug_22217 | rasdani/github-patches | git_diff | OCA__bank-payment-107 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test fails with Odoo, not OCB
https://travis-ci.org/OCA/bank-payment/builds/47806067
File "/home/travis/build/OCA/bank-payment/account_direct_debit/models/account_invoice.py", line 140, in __ init __
invoice_obj._columns['state'].selection.append(
KeyError: 'state'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `account_direct_debit/models/account_invoice.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 ##############################################################################
3 #
4 # Copyright (C) 2011 - 2013 Therp BV (<http://therp.nl>).
5 #
6 # All other contributions are (C) by their respective contributors
7 #
8 # All Rights Reserved
9 #
10 # This program is free software: you can redistribute it and/or modify
11 # it under the terms of the GNU Affero General Public License as
12 # published by the Free Software Foundation, either version 3 of the
13 # License, or (at your option) any later version.
14 #
15 # This program is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU Affero General Public License for more details.
19 #
20 # You should have received a copy of the GNU Affero General Public License
21 # along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #
23 ##############################################################################
24
25 """
26 This module adds support for Direct debit orders as applicable
27 in the Netherlands. Debit orders are advanced in total by the bank.
28 Amounts that cannot be debited or are canceled by account owners are
29 credited afterwards. Such a creditation is called a storno.
30
31 Invoice workflow:
32
33 1 the sale leads to
34 1300 Debtors 100
35 8000 Sales 100
36
37 Balance:
38 Debtors 2000 |
39 Sales | 2000
40
41 2 an external booking takes place
42 1100 Bank 100
43 1300 Debtors 100
44 This booking is reconciled with [1]
45 The invoice gets set to state 'paid', and 'reconciled' = True
46
47 Balance:
48 Debtors 1900 |
49 Bank 100 |
50 Sales | 2000
51
52 This module implements the following diversion:
53
54 2a the invoice is included in a direct debit order. When the order is
55 confirmed, a move is created per invoice:
56
57 2000 Transfer account 100 |
58 1300 Debtors | 100
59 Reconciliation takes place between 1 and 2a.
60 The invoice gets set to state 'paid', and 'reconciled' = True
61
62 Balance:
63 Debtors 0 |
64 Transfer account 2000 |
65 Bank 0 |
66 Sales | 2000
67
68 3a the direct debit order is booked on the bank account
69
70 Balance:
71 1100 Bank 2000 |
72 2000 Transfer account | 2000
73 Reconciliation takes place between 3a and 2a
74
75 Balance:
76 Debtors 0 |
77 Transfer account 0 |
78 Bank 2000 |
79 Sales | 2000
80
81 4 a storno from invoice [1] triggers a new booking on the bank account
82 1300 Debtors 100 |
83 1100 Bank | 100
84
85 Balance:
86 Debtors 100 |
87 Transfer account 0 |
88 Bank 1900 |
89 Sales | 2000
90
91 The reconciliation of 2a is undone. The booking of 2a is reconciled
92 with the booking of 4 instead.
93 The payment line attribute 'storno' is set to True and the invoice
94 state is no longer 'paid'.
95
96 Two cases need to be distinguisted:
97 1) If the storno is a manual storno from the partner, the invoice is set to
98 state 'debit_denied', with 'reconciled' = False
99 This module implements this option by allowing the bank module to call
100
101 netsvc.LocalService("workflow").trg_validate(
102 uid, 'account.invoice', ids, 'debit_denied', cr)
103
104 2) If the storno is an error generated by the bank (assumingly non-fatal),
105 the invoice is reopened for the next debit run. This is a call to
106 existing
107
108 netsvc.LocalService("workflow").trg_validate(
109 uid, 'account.invoice', ids, 'open_test', cr)
110
111 Should also be adding a log entry on the invoice for tracing purposes
112
113 self._log_event(cr, uid, ids, -1.0, 'Debit denied')
114
115 If not for that funny comment
116 "#TODO: implement messages system" in account/invoice.py
117
118 Repeating non-fatal fatal errors need to be dealt with manually by checking
119 open invoices with a matured invoice- or due date.
120 """
121
122 from openerp.osv import orm
123 from openerp.tools.translate import _
124
125
126 class AccountInvoice(orm.Model):
127 _inherit = "account.invoice"
128
129 def __init__(self, pool, cr):
130 """
131 Adding a state to the hardcoded state list of the inherited
132 model. The alternative is duplicating the field definition
133 in columns but only one module can do that!
134
135 Maybe apply a similar trick when overriding the buttons' 'states'
136 attributes in the form view, manipulating the xml in fields_view_get().
137 """
138 super(AccountInvoice, self).__init__(pool, cr)
139 invoice_obj = pool.get('account.invoice')
140 invoice_obj._columns['state'].selection.append(
141 ('debit_denied', 'Debit denied'))
142
143 def action_debit_denied(self, cr, uid, ids, context=None):
144 for invoice_id in ids:
145 if self.test_paid(cr, uid, [invoice_id], context):
146 number = self.read(
147 cr, uid, invoice_id, ['number'], context=context)['number']
148 raise orm.except_orm(
149 _('Error !'),
150 _("You cannot set invoice '%s' to state 'debit "
151 "denied', as it is still reconciled.") % number)
152 self.write(cr, uid, ids, {'state': 'debit_denied'}, context=context)
153 for inv_id, name in self.name_get(cr, uid, ids, context=context):
154 message = _("Invoice '%s': direct debit is denied.") % name
155 self.log(cr, uid, inv_id, message)
156 return True
157
158 def test_undo_debit_denied(self, cr, uid, ids, context=None):
159 """
160 Called from the workflow. Used to unset paid state on
161 invoices that were paid with bank transfers which are being cancelled
162 """
163 for invoice in self.read(cr, uid, ids, ['reconciled'], context):
164 if not invoice['reconciled']:
165 return False
166 return True
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/account_direct_debit/models/account_invoice.py b/account_direct_debit/models/account_invoice.py
--- a/account_direct_debit/models/account_invoice.py
+++ b/account_direct_debit/models/account_invoice.py
@@ -126,7 +126,7 @@
class AccountInvoice(orm.Model):
_inherit = "account.invoice"
- def __init__(self, pool, cr):
+ def _register_hook(self, cr):
"""
Adding a state to the hardcoded state list of the inherited
model. The alternative is duplicating the field definition
@@ -135,9 +135,7 @@
Maybe apply a similar trick when overriding the buttons' 'states'
attributes in the form view, manipulating the xml in fields_view_get().
"""
- super(AccountInvoice, self).__init__(pool, cr)
- invoice_obj = pool.get('account.invoice')
- invoice_obj._columns['state'].selection.append(
+ self._columns['state'].selection.append(
('debit_denied', 'Debit denied'))
def action_debit_denied(self, cr, uid, ids, context=None):
| {"golden_diff": "diff --git a/account_direct_debit/models/account_invoice.py b/account_direct_debit/models/account_invoice.py\n--- a/account_direct_debit/models/account_invoice.py\n+++ b/account_direct_debit/models/account_invoice.py\n@@ -126,7 +126,7 @@\n class AccountInvoice(orm.Model):\n _inherit = \"account.invoice\"\n \n- def __init__(self, pool, cr):\n+ def _register_hook(self, cr):\n \"\"\"\n Adding a state to the hardcoded state list of the inherited\n model. The alternative is duplicating the field definition\n@@ -135,9 +135,7 @@\n Maybe apply a similar trick when overriding the buttons' 'states'\n attributes in the form view, manipulating the xml in fields_view_get().\n \"\"\"\n- super(AccountInvoice, self).__init__(pool, cr)\n- invoice_obj = pool.get('account.invoice')\n- invoice_obj._columns['state'].selection.append(\n+ self._columns['state'].selection.append(\n ('debit_denied', 'Debit denied'))\n \n def action_debit_denied(self, cr, uid, ids, context=None):\n", "issue": "Test fails with Odoo, not OCB\nhttps://travis-ci.org/OCA/bank-payment/builds/47806067\n\nFile \"/home/travis/build/OCA/bank-payment/account_direct_debit/models/account_invoice.py\", line 140, in __ init __\ninvoice_obj._columns['state'].selection.append(\nKeyError: 'state'\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2011 - 2013 Therp BV (<http://therp.nl>).\n#\n# All other contributions are (C) by their respective contributors\n#\n# All Rights Reserved\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\n\"\"\"\nThis module adds support for Direct debit orders as applicable\nin the Netherlands. Debit orders are advanced in total by the bank.\nAmounts that cannot be debited or are canceled by account owners are\ncredited afterwards. Such a creditation is called a storno.\n\nInvoice workflow:\n\n1 the sale leads to\n 1300 Debtors 100\n 8000 Sales 100\n\nBalance:\n Debtors 2000 |\n Sales | 2000\n\n2 an external booking takes place\n 1100 Bank 100\n 1300 Debtors 100\n This booking is reconciled with [1]\n The invoice gets set to state 'paid', and 'reconciled' = True\n\nBalance:\n Debtors 1900 |\n Bank 100 |\n Sales | 2000\n\nThis module implements the following diversion:\n\n2a the invoice is included in a direct debit order. When the order is\n confirmed, a move is created per invoice:\n\n 2000 Transfer account 100 |\n 1300 Debtors | 100\n Reconciliation takes place between 1 and 2a.\n The invoice gets set to state 'paid', and 'reconciled' = True\n\nBalance:\n Debtors 0 |\n Transfer account 2000 |\n Bank 0 |\n Sales | 2000\n\n3a the direct debit order is booked on the bank account\n\nBalance:\n 1100 Bank 2000 |\n 2000 Transfer account | 2000\n Reconciliation takes place between 3a and 2a\n\nBalance:\n Debtors 0 |\n Transfer account 0 |\n Bank 2000 |\n Sales | 2000\n\n4 a storno from invoice [1] triggers a new booking on the bank account\n 1300 Debtors 100 |\n 1100 Bank | 100\n\nBalance:\n Debtors 100 |\n Transfer account 0 |\n Bank 1900 |\n Sales | 2000\n\n The reconciliation of 2a is undone. The booking of 2a is reconciled\n with the booking of 4 instead.\n The payment line attribute 'storno' is set to True and the invoice\n state is no longer 'paid'.\n\nTwo cases need to be distinguisted:\n 1) If the storno is a manual storno from the partner, the invoice is set to\n state 'debit_denied', with 'reconciled' = False\n This module implements this option by allowing the bank module to call\n\n netsvc.LocalService(\"workflow\").trg_validate(\n uid, 'account.invoice', ids, 'debit_denied', cr)\n\n 2) If the storno is an error generated by the bank (assumingly non-fatal),\n the invoice is reopened for the next debit run. This is a call to\n existing\n\n netsvc.LocalService(\"workflow\").trg_validate(\n uid, 'account.invoice', ids, 'open_test', cr)\n\n Should also be adding a log entry on the invoice for tracing purposes\n\n self._log_event(cr, uid, ids, -1.0, 'Debit denied')\n\n If not for that funny comment\n \"#TODO: implement messages system\" in account/invoice.py\n\n Repeating non-fatal fatal errors need to be dealt with manually by checking\n open invoices with a matured invoice- or due date.\n\"\"\"\n\nfrom openerp.osv import orm\nfrom openerp.tools.translate import _\n\n\nclass AccountInvoice(orm.Model):\n _inherit = \"account.invoice\"\n\n def __init__(self, pool, cr):\n \"\"\"\n Adding a state to the hardcoded state list of the inherited\n model. The alternative is duplicating the field definition\n in columns but only one module can do that!\n\n Maybe apply a similar trick when overriding the buttons' 'states'\n attributes in the form view, manipulating the xml in fields_view_get().\n \"\"\"\n super(AccountInvoice, self).__init__(pool, cr)\n invoice_obj = pool.get('account.invoice')\n invoice_obj._columns['state'].selection.append(\n ('debit_denied', 'Debit denied'))\n\n def action_debit_denied(self, cr, uid, ids, context=None):\n for invoice_id in ids:\n if self.test_paid(cr, uid, [invoice_id], context):\n number = self.read(\n cr, uid, invoice_id, ['number'], context=context)['number']\n raise orm.except_orm(\n _('Error !'),\n _(\"You cannot set invoice '%s' to state 'debit \"\n \"denied', as it is still reconciled.\") % number)\n self.write(cr, uid, ids, {'state': 'debit_denied'}, context=context)\n for inv_id, name in self.name_get(cr, uid, ids, context=context):\n message = _(\"Invoice '%s': direct debit is denied.\") % name\n self.log(cr, uid, inv_id, message)\n return True\n\n def test_undo_debit_denied(self, cr, uid, ids, context=None):\n \"\"\"\n Called from the workflow. Used to unset paid state on\n invoices that were paid with bank transfers which are being cancelled\n \"\"\"\n for invoice in self.read(cr, uid, ids, ['reconciled'], context):\n if not invoice['reconciled']:\n return False\n return True\n", "path": "account_direct_debit/models/account_invoice.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2011 - 2013 Therp BV (<http://therp.nl>).\n#\n# All other contributions are (C) by their respective contributors\n#\n# All Rights Reserved\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\n\n\"\"\"\nThis module adds support for Direct debit orders as applicable\nin the Netherlands. Debit orders are advanced in total by the bank.\nAmounts that cannot be debited or are canceled by account owners are\ncredited afterwards. Such a creditation is called a storno.\n\nInvoice workflow:\n\n1 the sale leads to\n 1300 Debtors 100\n 8000 Sales 100\n\nBalance:\n Debtors 2000 |\n Sales | 2000\n\n2 an external booking takes place\n 1100 Bank 100\n 1300 Debtors 100\n This booking is reconciled with [1]\n The invoice gets set to state 'paid', and 'reconciled' = True\n\nBalance:\n Debtors 1900 |\n Bank 100 |\n Sales | 2000\n\nThis module implements the following diversion:\n\n2a the invoice is included in a direct debit order. When the order is\n confirmed, a move is created per invoice:\n\n 2000 Transfer account 100 |\n 1300 Debtors | 100\n Reconciliation takes place between 1 and 2a.\n The invoice gets set to state 'paid', and 'reconciled' = True\n\nBalance:\n Debtors 0 |\n Transfer account 2000 |\n Bank 0 |\n Sales | 2000\n\n3a the direct debit order is booked on the bank account\n\nBalance:\n 1100 Bank 2000 |\n 2000 Transfer account | 2000\n Reconciliation takes place between 3a and 2a\n\nBalance:\n Debtors 0 |\n Transfer account 0 |\n Bank 2000 |\n Sales | 2000\n\n4 a storno from invoice [1] triggers a new booking on the bank account\n 1300 Debtors 100 |\n 1100 Bank | 100\n\nBalance:\n Debtors 100 |\n Transfer account 0 |\n Bank 1900 |\n Sales | 2000\n\n The reconciliation of 2a is undone. The booking of 2a is reconciled\n with the booking of 4 instead.\n The payment line attribute 'storno' is set to True and the invoice\n state is no longer 'paid'.\n\nTwo cases need to be distinguisted:\n 1) If the storno is a manual storno from the partner, the invoice is set to\n state 'debit_denied', with 'reconciled' = False\n This module implements this option by allowing the bank module to call\n\n netsvc.LocalService(\"workflow\").trg_validate(\n uid, 'account.invoice', ids, 'debit_denied', cr)\n\n 2) If the storno is an error generated by the bank (assumingly non-fatal),\n the invoice is reopened for the next debit run. This is a call to\n existing\n\n netsvc.LocalService(\"workflow\").trg_validate(\n uid, 'account.invoice', ids, 'open_test', cr)\n\n Should also be adding a log entry on the invoice for tracing purposes\n\n self._log_event(cr, uid, ids, -1.0, 'Debit denied')\n\n If not for that funny comment\n \"#TODO: implement messages system\" in account/invoice.py\n\n Repeating non-fatal fatal errors need to be dealt with manually by checking\n open invoices with a matured invoice- or due date.\n\"\"\"\n\nfrom openerp.osv import orm\nfrom openerp.tools.translate import _\n\n\nclass AccountInvoice(orm.Model):\n _inherit = \"account.invoice\"\n\n def _register_hook(self, cr):\n \"\"\"\n Adding a state to the hardcoded state list of the inherited\n model. The alternative is duplicating the field definition\n in columns but only one module can do that!\n\n Maybe apply a similar trick when overriding the buttons' 'states'\n attributes in the form view, manipulating the xml in fields_view_get().\n \"\"\"\n self._columns['state'].selection.append(\n ('debit_denied', 'Debit denied'))\n\n def action_debit_denied(self, cr, uid, ids, context=None):\n for invoice_id in ids:\n if self.test_paid(cr, uid, [invoice_id], context):\n number = self.read(\n cr, uid, invoice_id, ['number'], context=context)['number']\n raise orm.except_orm(\n _('Error !'),\n _(\"You cannot set invoice '%s' to state 'debit \"\n \"denied', as it is still reconciled.\") % number)\n self.write(cr, uid, ids, {'state': 'debit_denied'}, context=context)\n for inv_id, name in self.name_get(cr, uid, ids, context=context):\n message = _(\"Invoice '%s': direct debit is denied.\") % name\n self.log(cr, uid, inv_id, message)\n return True\n\n def test_undo_debit_denied(self, cr, uid, ids, context=None):\n \"\"\"\n Called from the workflow. Used to unset paid state on\n invoices that were paid with bank transfers which are being cancelled\n \"\"\"\n for invoice in self.read(cr, uid, ids, ['reconciled'], context):\n if not invoice['reconciled']:\n return False\n return True\n", "path": "account_direct_debit/models/account_invoice.py"}]} | 2,216 | 246 |
gh_patches_debug_20110 | rasdani/github-patches | git_diff | pytorch__ignite-2639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Code formatting issue with latest flake8
https://github.com/pytorch/ignite/runs/7781175697?check_suite_focus=true#step:11:84
```
Collecting flake8
Downloading flake8-5.0.4-py2.py3-none-any.whl (61 kB)
+ flake8 ignite tests examples --config setup.cfg
ignite/metrics/psnr.py:12:121: E501 line too long (121 > 120 characters)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/psnr.py`
Content:
```
1 from typing import Callable, Sequence, Union
2
3 import torch
4
5 from ignite.exceptions import NotComputableError
6 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
7
8 __all__ = ["PSNR"]
9
10
11 class PSNR(Metric):
12 r"""Computes average `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
13
14 .. math::
15 \text{PSNR}(I, J) = 10 * \log_{10}\left(\frac{ MAX_{I}^2 }{ \text{ MSE } }\right)
16
17 where :math:`\text{MSE}` is `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.
18
19 - `y_pred` and `y` **must** have (batch_size, ...) shape.
20 - `y_pred` and `y` **must** have same dtype and same shape.
21
22 Args:
23 data_range: The data range of the target image (distance between minimum
24 and maximum possible values).
25 For other data types, please set the data range, otherwise an exception will be raised.
26 output_transform: A callable that is used to transform the Engine’s
27 process_function’s output into the form expected by the metric.
28 device: specifies which device updates are accumulated on.
29 Setting the metric’s device to be the same as your update arguments ensures
30 the update method is non-blocking. By default, CPU.
31
32 Examples:
33 To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
34 The output of the engine's ``process_function`` needs to be in format of
35 ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
36
37 For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
38
39 .. include:: defaults.rst
40 :start-after: :orphan:
41
42 .. testcode::
43
44 psnr = PSNR(data_range=1.0)
45 psnr.attach(default_evaluator, 'psnr')
46 preds = torch.rand([4, 3, 16, 16])
47 target = preds * 0.75
48 state = default_evaluator.run([[preds, target]])
49 print(state.metrics['psnr'])
50
51 .. testoutput::
52
53 16.8671405...
54
55 This metric by default accepts Grayscale or RGB images. But if you have YCbCr or YUV images, only
56 Y channel is needed for computing PSNR. And, this can be done with ``output_transform``. For instance,
57
58 .. testcode::
59
60 def get_y_channel(output):
61 y_pred, y = output
62 # y_pred and y are (B, 3, H, W) and YCbCr or YUV images
63 # let's select y channel
64 return y_pred[:, 0, ...], y[:, 0, ...]
65
66 psnr = PSNR(data_range=219, output_transform=get_y_channel)
67 psnr.attach(default_evaluator, 'psnr')
68 preds = 219 * torch.rand([4, 3, 16, 16])
69 target = preds * 0.75
70 state = default_evaluator.run([[preds, target]])
71 print(state.metrics['psnr'])
72
73 .. testoutput::
74
75 16.7027966...
76
77 .. versionadded:: 0.4.3
78 """
79
80 def __init__(
81 self,
82 data_range: Union[int, float],
83 output_transform: Callable = lambda x: x,
84 device: Union[str, torch.device] = torch.device("cpu"),
85 ):
86 super().__init__(output_transform=output_transform, device=device)
87 self.data_range = data_range
88
89 def _check_shape_dtype(self, output: Sequence[torch.Tensor]) -> None:
90 y_pred, y = output
91 if y_pred.dtype != y.dtype:
92 raise TypeError(
93 f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
94 )
95
96 if y_pred.shape != y.shape:
97 raise ValueError(
98 f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
99 )
100
101 @reinit__is_reduced
102 def reset(self) -> None:
103 self._sum_of_batchwise_psnr = torch.tensor(0.0, dtype=torch.float64, device=self._device)
104 self._num_examples = 0
105
106 @reinit__is_reduced
107 def update(self, output: Sequence[torch.Tensor]) -> None:
108 self._check_shape_dtype(output)
109 y_pred, y = output[0].detach(), output[1].detach()
110
111 dim = tuple(range(1, y.ndim))
112 mse_error = torch.pow(y_pred.double() - y.view_as(y_pred).double(), 2).mean(dim=dim)
113 self._sum_of_batchwise_psnr += torch.sum(10.0 * torch.log10(self.data_range ** 2 / (mse_error + 1e-10))).to(
114 device=self._device
115 )
116 self._num_examples += y.shape[0]
117
118 @sync_all_reduce("_sum_of_batchwise_psnr", "_num_examples")
119 def compute(self) -> torch.Tensor:
120 if self._num_examples == 0:
121 raise NotComputableError("PSNR must have at least one example before it can be computed.")
122 return self._sum_of_batchwise_psnr / self._num_examples
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/metrics/psnr.py b/ignite/metrics/psnr.py
--- a/ignite/metrics/psnr.py
+++ b/ignite/metrics/psnr.py
@@ -9,7 +9,8 @@
class PSNR(Metric):
- r"""Computes average `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
+ r"""Computes average
+ `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
.. math::
\text{PSNR}(I, J) = 10 * \log_{10}\left(\frac{ MAX_{I}^2 }{ \text{ MSE } }\right)
@@ -34,7 +35,8 @@
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
- For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
+ For more information on how metric works with :class:`~ignite.engine.engine.Engine`,
+ visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
| {"golden_diff": "diff --git a/ignite/metrics/psnr.py b/ignite/metrics/psnr.py\n--- a/ignite/metrics/psnr.py\n+++ b/ignite/metrics/psnr.py\n@@ -9,7 +9,8 @@\n \n \n class PSNR(Metric):\n- r\"\"\"Computes average `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.\n+ r\"\"\"Computes average\n+ `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.\n \n .. math::\n \\text{PSNR}(I, J) = 10 * \\log_{10}\\left(\\frac{ MAX_{I}^2 }{ \\text{ MSE } }\\right)\n@@ -34,7 +35,8 @@\n The output of the engine's ``process_function`` needs to be in format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n \n- For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n+ For more information on how metric works with :class:`~ignite.engine.engine.Engine`,\n+ visit :ref:`attach-engine`.\n \n .. include:: defaults.rst\n :start-after: :orphan:\n", "issue": "Code formatting issue with latest flake8\n\r\nhttps://github.com/pytorch/ignite/runs/7781175697?check_suite_focus=true#step:11:84\r\n\r\n```\r\nCollecting flake8\r\n Downloading flake8-5.0.4-py2.py3-none-any.whl (61 kB)\r\n\r\n+ flake8 ignite tests examples --config setup.cfg\r\nignite/metrics/psnr.py:12:121: E501 line too long (121 > 120 characters)\r\n```\n", "before_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"PSNR\"]\n\n\nclass PSNR(Metric):\n r\"\"\"Computes average `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.\n\n .. math::\n \\text{PSNR}(I, J) = 10 * \\log_{10}\\left(\\frac{ MAX_{I}^2 }{ \\text{ MSE } }\\right)\n\n where :math:`\\text{MSE}` is `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.\n\n - `y_pred` and `y` **must** have (batch_size, ...) shape.\n - `y_pred` and `y` **must** have same dtype and same shape.\n\n Args:\n data_range: The data range of the target image (distance between minimum\n and maximum possible values).\n For other data types, please set the data range, otherwise an exception will be raised.\n output_transform: A callable that is used to transform the Engine\u2019s\n process_function\u2019s output into the form expected by the metric.\n device: specifies which device updates are accumulated on.\n Setting the metric\u2019s device to be the same as your update arguments ensures\n the update method is non-blocking. By default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n psnr = PSNR(data_range=1.0)\n psnr.attach(default_evaluator, 'psnr')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['psnr'])\n\n .. testoutput::\n\n 16.8671405...\n\n This metric by default accepts Grayscale or RGB images. But if you have YCbCr or YUV images, only\n Y channel is needed for computing PSNR. And, this can be done with ``output_transform``. For instance,\n\n .. testcode::\n\n def get_y_channel(output):\n y_pred, y = output\n # y_pred and y are (B, 3, H, W) and YCbCr or YUV images\n # let's select y channel\n return y_pred[:, 0, ...], y[:, 0, ...]\n\n psnr = PSNR(data_range=219, output_transform=get_y_channel)\n psnr.attach(default_evaluator, 'psnr')\n preds = 219 * torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['psnr'])\n\n .. testoutput::\n\n 16.7027966...\n\n .. versionadded:: 0.4.3\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n super().__init__(output_transform=output_transform, device=device)\n self.data_range = data_range\n\n def _check_shape_dtype(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_batchwise_psnr = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self._num_examples = 0\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n self._check_shape_dtype(output)\n y_pred, y = output[0].detach(), output[1].detach()\n\n dim = tuple(range(1, y.ndim))\n mse_error = torch.pow(y_pred.double() - y.view_as(y_pred).double(), 2).mean(dim=dim)\n self._sum_of_batchwise_psnr += torch.sum(10.0 * torch.log10(self.data_range ** 2 / (mse_error + 1e-10))).to(\n device=self._device\n )\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_batchwise_psnr\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"PSNR must have at least one example before it can be computed.\")\n return self._sum_of_batchwise_psnr / self._num_examples\n", "path": "ignite/metrics/psnr.py"}], "after_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"PSNR\"]\n\n\nclass PSNR(Metric):\n r\"\"\"Computes average\n `Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.\n\n .. math::\n \\text{PSNR}(I, J) = 10 * \\log_{10}\\left(\\frac{ MAX_{I}^2 }{ \\text{ MSE } }\\right)\n\n where :math:`\\text{MSE}` is `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.\n\n - `y_pred` and `y` **must** have (batch_size, ...) shape.\n - `y_pred` and `y` **must** have same dtype and same shape.\n\n Args:\n data_range: The data range of the target image (distance between minimum\n and maximum possible values).\n For other data types, please set the data range, otherwise an exception will be raised.\n output_transform: A callable that is used to transform the Engine\u2019s\n process_function\u2019s output into the form expected by the metric.\n device: specifies which device updates are accumulated on.\n Setting the metric\u2019s device to be the same as your update arguments ensures\n the update method is non-blocking. By default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`,\n visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n psnr = PSNR(data_range=1.0)\n psnr.attach(default_evaluator, 'psnr')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['psnr'])\n\n .. testoutput::\n\n 16.8671405...\n\n This metric by default accepts Grayscale or RGB images. But if you have YCbCr or YUV images, only\n Y channel is needed for computing PSNR. And, this can be done with ``output_transform``. For instance,\n\n .. testcode::\n\n def get_y_channel(output):\n y_pred, y = output\n # y_pred and y are (B, 3, H, W) and YCbCr or YUV images\n # let's select y channel\n return y_pred[:, 0, ...], y[:, 0, ...]\n\n psnr = PSNR(data_range=219, output_transform=get_y_channel)\n psnr.attach(default_evaluator, 'psnr')\n preds = 219 * torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['psnr'])\n\n .. testoutput::\n\n 16.7027966...\n\n .. versionadded:: 0.4.3\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n super().__init__(output_transform=output_transform, device=device)\n self.data_range = data_range\n\n def _check_shape_dtype(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_batchwise_psnr = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self._num_examples = 0\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n self._check_shape_dtype(output)\n y_pred, y = output[0].detach(), output[1].detach()\n\n dim = tuple(range(1, y.ndim))\n mse_error = torch.pow(y_pred.double() - y.view_as(y_pred).double(), 2).mean(dim=dim)\n self._sum_of_batchwise_psnr += torch.sum(10.0 * torch.log10(self.data_range ** 2 / (mse_error + 1e-10))).to(\n device=self._device\n )\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_batchwise_psnr\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"PSNR must have at least one example before it can be computed.\")\n return self._sum_of_batchwise_psnr / self._num_examples\n", "path": "ignite/metrics/psnr.py"}]} | 1,906 | 305 |
gh_patches_debug_14337 | rasdani/github-patches | git_diff | horovod__horovod-3830 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spark tests fail with AttributeError: 'pyarrow.lib.Schema' object has no attribute 'to_arrow_schema'
Spark tests fail with
```
Traceback (most recent call last):
File "/horovod/examples/spark/pytorch/pytorch_spark_mnist.py", line 122, in <module>
torch_model = torch_estimator.fit(train_df).setOutputCols(['label_prob'])
File "/usr/local/lib/python3.8/dist-packages/horovod/spark/common/estimator.py", line 35, in fit
return super(HorovodEstimator, self).fit(df, params)
File "/usr/local/lib/python3.8/dist-packages/pyspark/ml/base.py", line 205, in fit
return self._fit(dataset)
File "/usr/local/lib/python3.8/dist-packages/horovod/spark/common/estimator.py", line 68, in _fit
with util.prepare_data(backend.num_processes(),
File "/usr/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/usr/local/lib/python3.8/dist-packages/horovod/spark/common/util.py", line 735, in prepare_data
dataset_idx = _get_or_create_dataset(key, store, df, feature_columns, label_columns,
File "/usr/local/lib/python3.8/dist-packages/horovod/spark/common/util.py", line 672, in _get_or_create_dataset
train_rows, val_rows, pq_metadata, avg_row_size = get_simple_meta_from_parquet(
File "/usr/local/lib/python3.8/dist-packages/horovod/spark/common/util.py", line 495, in get_simple_meta_from_parquet
train_data_schema = train_data.schema.to_arrow_schema()
AttributeError: 'pyarrow.lib.Schema' object has no attribute 'to_arrow_schema'
```
https://github.com/horovod/horovod/actions/runs/4025636527/jobs/6919132641
It's probably related to the 11.0 release of pyarrow. https://pypi.org/project/pyarrow/#history
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
2 # Modifications copyright Microsoft
3 # Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 # ==============================================================================
17
18 import atexit
19 import io
20 import os
21 import re
22 import shutil
23 import subprocess
24 import sys
25 import tempfile
26 import textwrap
27
28 from setuptools import setup, Extension, find_packages
29 from setuptools.command.build_ext import build_ext
30
31 from horovod import __version__
32
33 _FRAMEWORK_METADATA_FILE = 'horovod/metadata.json'
34
35 class CMakeExtension(Extension):
36 def __init__(self, name, cmake_lists_dir='.', sources=None, **kwa):
37 if sources is None:
38 sources = []
39 Extension.__init__(self, name, sources=sources, **kwa)
40 self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)
41
42
43 tensorflow_mpi_lib = CMakeExtension('horovod.tensorflow.mpi_lib',
44 cmake_lists_dir='.', sources=[])
45 torch_mpi_lib_v2 = CMakeExtension('horovod.torch.mpi_lib_v2',
46 cmake_lists_dir='.', sources=[])
47 mxnet_mpi_lib = CMakeExtension('horovod.mxnet.mpi_lib',
48 cmake_lists_dir='.', sources=[])
49
50 def is_build_action():
51 if len(sys.argv) <= 1:
52 return False
53
54 if sys.argv[1].startswith('build'):
55 return True
56
57 if sys.argv[1].startswith('bdist'):
58 return True
59
60 if sys.argv[1].startswith('install'):
61 return True
62
63 if sys.argv[1].startswith('develop'):
64 return True
65
66 def get_cmake_bin():
67 from packaging import version
68
69 if 'HOROVOD_CMAKE' in os.environ:
70 return os.environ['HOROVOD_CMAKE']
71
72 cmake_bin = 'cmake'
73 try:
74 out = subprocess.check_output([cmake_bin, '--version'])
75 except OSError:
76 cmake_installed_version = version.parse("0.0")
77 else:
78 cmake_installed_version = version.parse(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
79
80 if cmake_installed_version < version.parse("3.13.0"):
81 print("Could not find a recent CMake to build Horovod. "
82 "Attempting to install CMake 3.13 to a temporary location via pip.", flush=True)
83 cmake_temp_dir = tempfile.TemporaryDirectory(prefix="horovod-cmake-tmp")
84 atexit.register(cmake_temp_dir.cleanup)
85 try:
86 _ = subprocess.check_output(["pip", "install", "--target", cmake_temp_dir.name, "cmake~=3.13.0"])
87 except Exception:
88 raise RuntimeError("Failed to install temporary CMake. "
89 "Please update your CMake to 3.13+ or set HOROVOD_CMAKE appropriately.")
90 cmake_bin = os.path.join(cmake_temp_dir.name, "bin", "run_cmake")
91 with io.open(cmake_bin, "w") as f_run_cmake:
92 f_run_cmake.write(
93 f"#!/bin/sh\nPYTHONPATH={cmake_temp_dir.name} {os.path.join(cmake_temp_dir.name, 'bin', 'cmake')} \"$@\"")
94 os.chmod(cmake_bin, 0o755)
95
96 return cmake_bin
97
98
99 class custom_build_ext(build_ext):
100 def build_extensions(self):
101 if os.getenv('HOROVOD_SKIP_COMPILE') == '1':
102 # Skip building extensions using CMake
103 print("Horovod is being installed without native libraries")
104 return
105
106 cmake_bin = get_cmake_bin()
107
108 config = 'Debug' if self.debug or os.environ.get('HOROVOD_DEBUG') == "1" else 'RelWithDebInfo'
109
110 ext_name = self.extensions[0].name
111 build_dir = self.get_ext_fullpath(ext_name).replace(self.get_ext_filename(ext_name), '')
112 build_dir = os.path.abspath(build_dir)
113
114 cmake_args = ['-DCMAKE_BUILD_TYPE=' + config,
115 '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(config.upper(), build_dir),
116 '-DPYTHON_EXECUTABLE:FILEPATH=' + sys.executable]
117
118 make_args = ['-j8'] if not os.environ.get('MAKEFLAGS') else []
119 if self.verbose:
120 make_args.append('VERBOSE=1')
121
122 cmake_build_args = ['--config', config]
123 if make_args:
124 # -- specifies that these args are going to the native build tool: make
125 cmake_build_args += ['--'] + make_args
126
127 cmake_build_dir = os.path.join(self.build_temp, config)
128 if not os.path.exists(cmake_build_dir):
129 os.makedirs(cmake_build_dir)
130
131 config_and_build_commands = [
132 [cmake_bin, self.extensions[0].cmake_lists_dir] + cmake_args,
133 [cmake_bin, '--build', '.'] + cmake_build_args
134 ]
135
136 if self.verbose:
137 print(f"Running CMake in {cmake_build_dir}:")
138 for command in config_and_build_commands:
139 print(" ".join(command))
140 sys.stdout.flush()
141
142 # Config and build the extension
143 try:
144 for command in config_and_build_commands:
145 subprocess.check_call(command, cwd=cmake_build_dir)
146 except OSError as e:
147 raise RuntimeError('CMake failed: {}'.format(str(e)))
148
149 if sys.argv[1].startswith('develop'):
150 # Copy over metadata.json file from build directory
151 shutil.copyfile(os.path.join(build_dir, _FRAMEWORK_METADATA_FILE),
152 os.path.join(self.extensions[0].cmake_lists_dir, _FRAMEWORK_METADATA_FILE))
153 # Remove unfound frameworks, otherwise develop mode will fail the install
154 self.extensions = [x for x in self.extensions if os.path.exists(self.get_ext_fullpath(x.name))]
155
156
157 # python packages required to use horovod in general
158 require_list = ['cloudpickle', 'psutil', 'pyyaml', 'dataclasses;python_version<"3.7"', 'packaging']
159
160 # framework dependencies
161 tensorflow_require_list = ['tensorflow']
162 tensorflow_cpu_require_list = ['tensorflow-cpu']
163 tensorflow_gpu_require_list = ['tensorflow-gpu']
164 keras_require_list = ['keras>=2.0.8,!=2.0.9,!=2.1.0,!=2.1.1']
165 # pytorch-lightning 1.3.8 is a stable version to work with horovod
166 pytorch_require_list = ['torch']
167 mxnet_require_list = ['mxnet>=1.4.1']
168 pyspark_require_list = ['pyspark>=2.3.2;python_version<"3.8"',
169 'pyspark>=3.0.0;python_version>="3.8"']
170 spark_require_list = ['numpy', 'petastorm>=0.12.0', 'pyarrow>=0.15.0', 'fsspec>=2021.07.0']
171 # https://github.com/ray-project/ray/pull/17465
172 # google-api-core>=2.9.0 depends on protobuf<5.0.0dev,>=3.20.1, which conflicts with
173 # tensorflow protobuf~=3.20 and pytorch-lightning protobuf<3.20,>=3.9.2
174 ray_require_list = ['ray', 'aioredis<2', 'google-api-core<2.9.0']
175 pytorch_spark_require_list = pytorch_require_list + \
176 spark_require_list + \
177 pyspark_require_list + \
178 ['pytorch_lightning>=1.3.8,<1.5.10']
179
180 # all frameworks' dependencies
181 all_frameworks_require_list = tensorflow_require_list + \
182 keras_require_list + \
183 pytorch_require_list + \
184 mxnet_require_list + \
185 spark_require_list + \
186 pyspark_require_list
187
188 # python packages required / recommended to develop horovod
189 # these are the earliest versions to work with Python 3.8
190 # keep in sync with Dockerfile.test.cpu
191 # NOTE: do not use versions with +cpu or +gpu here as users would need to add --find-links to pip
192 dev_require_list = ['tensorflow-cpu==2.2.0',
193 'keras==2.3.1',
194 'torch==1.4.0',
195 'torchvision==0.5.0',
196 'pytorch_lightning>=1.3.8,<1.5.10',
197 'mxnet==1.5.0',
198 'pyspark==3.0.1'] + spark_require_list
199 # torchvision 0.5.0 depends on torch==1.4.0
200
201 # python packages required only to run tests
202 test_require_list = ['mock', 'pytest', 'pytest-forked', 'pytest-subtests', 'parameterized']
203
204 # Skip cffi if pytorch extension explicitly disabled
205 if not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):
206 require_list.append('cffi>=1.4.0')
207
208
209 def get_package_version():
210 return __version__ + "+" + os.environ['HOROVOD_LOCAL_VERSION'] if 'HOROVOD_LOCAL_VERSION' in os.environ else __version__
211
212
213 setup(name='horovod',
214 version=get_package_version(),
215 packages=find_packages(),
216 description='Distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.',
217 author='The Horovod Authors',
218 license='Apache 2.0',
219 long_description=textwrap.dedent('''\
220 Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.
221 The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),
222 url='https://github.com/horovod/horovod',
223 keywords=['deep learning', 'tensorflow', 'keras', 'pytorch', 'mxnet', 'spark', 'AI'],
224 classifiers=[
225 'License :: OSI Approved :: Apache Software License',
226 'Development Status :: 4 - Beta',
227 'Intended Audience :: Developers',
228 'Topic :: Scientific/Engineering :: Artificial Intelligence',
229 ],
230 ext_modules=[tensorflow_mpi_lib, torch_mpi_lib_v2, mxnet_mpi_lib],
231 cmdclass={'build_ext': custom_build_ext},
232 # cffi is required for PyTorch
233 # If cffi is specified in setup_requires, it will need libffi to be installed on the machine,
234 # which is undesirable. Luckily, `install` action will install cffi before executing build,
235 # so it's only necessary for `build*` or `bdist*` actions.
236 setup_requires=require_list if is_build_action() else [],
237 install_requires=require_list,
238 tests_require=test_require_list,
239 extras_require={
240 'all-frameworks': all_frameworks_require_list,
241 'tensorflow': tensorflow_require_list,
242 'tensorflow-cpu': tensorflow_cpu_require_list,
243 'tensorflow-gpu': tensorflow_gpu_require_list,
244 'keras': keras_require_list,
245 'pytorch': pytorch_require_list,
246 'mxnet': mxnet_require_list,
247 'spark': spark_require_list + pyspark_require_list,
248 'pytorch-spark': pytorch_spark_require_list,
249 'ray': ray_require_list,
250 'dev': dev_require_list,
251 'test': test_require_list,
252 },
253 python_requires='>=3.6',
254 zip_safe=False,
255 entry_points={
256 'console_scripts': [
257 'horovodrun = horovod.runner.launch:run_commandline'
258 ]
259 })
260
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -167,7 +167,7 @@
mxnet_require_list = ['mxnet>=1.4.1']
pyspark_require_list = ['pyspark>=2.3.2;python_version<"3.8"',
'pyspark>=3.0.0;python_version>="3.8"']
-spark_require_list = ['numpy', 'petastorm>=0.12.0', 'pyarrow>=0.15.0', 'fsspec>=2021.07.0']
+spark_require_list = ['numpy', 'petastorm>=0.12.0', 'pyarrow>=0.15.0,<11.0', 'fsspec>=2021.07.0']
# https://github.com/ray-project/ray/pull/17465
# google-api-core>=2.9.0 depends on protobuf<5.0.0dev,>=3.20.1, which conflicts with
# tensorflow protobuf~=3.20 and pytorch-lightning protobuf<3.20,>=3.9.2
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -167,7 +167,7 @@\n mxnet_require_list = ['mxnet>=1.4.1']\n pyspark_require_list = ['pyspark>=2.3.2;python_version<\"3.8\"',\n 'pyspark>=3.0.0;python_version>=\"3.8\"']\n-spark_require_list = ['numpy', 'petastorm>=0.12.0', 'pyarrow>=0.15.0', 'fsspec>=2021.07.0']\n+spark_require_list = ['numpy', 'petastorm>=0.12.0', 'pyarrow>=0.15.0,<11.0', 'fsspec>=2021.07.0']\n # https://github.com/ray-project/ray/pull/17465\n # google-api-core>=2.9.0 depends on protobuf<5.0.0dev,>=3.20.1, which conflicts with\n # tensorflow protobuf~=3.20 and pytorch-lightning protobuf<3.20,>=3.9.2\n", "issue": "Spark tests fail with AttributeError: 'pyarrow.lib.Schema' object has no attribute 'to_arrow_schema'\nSpark tests fail with\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/horovod/examples/spark/pytorch/pytorch_spark_mnist.py\", line 122, in <module>\r\n torch_model = torch_estimator.fit(train_df).setOutputCols(['label_prob'])\r\n File \"/usr/local/lib/python3.8/dist-packages/horovod/spark/common/estimator.py\", line 35, in fit\r\n return super(HorovodEstimator, self).fit(df, params)\r\n File \"/usr/local/lib/python3.8/dist-packages/pyspark/ml/base.py\", line 205, in fit\r\n return self._fit(dataset)\r\n File \"/usr/local/lib/python3.8/dist-packages/horovod/spark/common/estimator.py\", line 68, in _fit\r\n with util.prepare_data(backend.num_processes(),\r\n File \"/usr/lib/python3.8/contextlib.py\", line 113, in __enter__\r\n return next(self.gen)\r\n File \"/usr/local/lib/python3.8/dist-packages/horovod/spark/common/util.py\", line 735, in prepare_data\r\n dataset_idx = _get_or_create_dataset(key, store, df, feature_columns, label_columns,\r\n File \"/usr/local/lib/python3.8/dist-packages/horovod/spark/common/util.py\", line 672, in _get_or_create_dataset\r\n train_rows, val_rows, pq_metadata, avg_row_size = get_simple_meta_from_parquet(\r\n File \"/usr/local/lib/python3.8/dist-packages/horovod/spark/common/util.py\", line 495, in get_simple_meta_from_parquet\r\n train_data_schema = train_data.schema.to_arrow_schema()\r\nAttributeError: 'pyarrow.lib.Schema' object has no attribute 'to_arrow_schema'\r\n```\r\nhttps://github.com/horovod/horovod/actions/runs/4025636527/jobs/6919132641\r\n\r\nIt's probably related to the 11.0 release of pyarrow. https://pypi.org/project/pyarrow/#history\n", "before_files": [{"content": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n# Modifications copyright Microsoft\n# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport atexit\nimport io\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\n\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\n\nfrom horovod import __version__\n\n_FRAMEWORK_METADATA_FILE = 'horovod/metadata.json'\n\nclass CMakeExtension(Extension):\n def __init__(self, name, cmake_lists_dir='.', sources=None, **kwa):\n if sources is None:\n sources = []\n Extension.__init__(self, name, sources=sources, **kwa)\n self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)\n\n\ntensorflow_mpi_lib = CMakeExtension('horovod.tensorflow.mpi_lib',\n cmake_lists_dir='.', sources=[])\ntorch_mpi_lib_v2 = CMakeExtension('horovod.torch.mpi_lib_v2',\n cmake_lists_dir='.', sources=[])\nmxnet_mpi_lib = CMakeExtension('horovod.mxnet.mpi_lib',\n cmake_lists_dir='.', sources=[])\n\ndef is_build_action():\n if len(sys.argv) <= 1:\n return False\n\n if sys.argv[1].startswith('build'):\n return True\n\n if sys.argv[1].startswith('bdist'):\n return True\n\n if sys.argv[1].startswith('install'):\n return True\n\n if sys.argv[1].startswith('develop'):\n return True\n\ndef get_cmake_bin():\n from packaging import version\n\n if 'HOROVOD_CMAKE' in os.environ:\n return os.environ['HOROVOD_CMAKE']\n\n cmake_bin = 'cmake'\n try:\n out = subprocess.check_output([cmake_bin, '--version'])\n except OSError:\n cmake_installed_version = version.parse(\"0.0\")\n else:\n cmake_installed_version = version.parse(re.search(r'version\\s*([\\d.]+)', out.decode()).group(1))\n\n if cmake_installed_version < version.parse(\"3.13.0\"):\n print(\"Could not find a recent CMake to build Horovod. \"\n \"Attempting to install CMake 3.13 to a temporary location via pip.\", flush=True)\n cmake_temp_dir = tempfile.TemporaryDirectory(prefix=\"horovod-cmake-tmp\")\n atexit.register(cmake_temp_dir.cleanup)\n try:\n _ = subprocess.check_output([\"pip\", \"install\", \"--target\", cmake_temp_dir.name, \"cmake~=3.13.0\"])\n except Exception:\n raise RuntimeError(\"Failed to install temporary CMake. \"\n \"Please update your CMake to 3.13+ or set HOROVOD_CMAKE appropriately.\")\n cmake_bin = os.path.join(cmake_temp_dir.name, \"bin\", \"run_cmake\")\n with io.open(cmake_bin, \"w\") as f_run_cmake:\n f_run_cmake.write(\n f\"#!/bin/sh\\nPYTHONPATH={cmake_temp_dir.name} {os.path.join(cmake_temp_dir.name, 'bin', 'cmake')} \\\"$@\\\"\")\n os.chmod(cmake_bin, 0o755)\n\n return cmake_bin\n\n\nclass custom_build_ext(build_ext):\n def build_extensions(self):\n if os.getenv('HOROVOD_SKIP_COMPILE') == '1':\n # Skip building extensions using CMake\n print(\"Horovod is being installed without native libraries\")\n return\n\n cmake_bin = get_cmake_bin()\n\n config = 'Debug' if self.debug or os.environ.get('HOROVOD_DEBUG') == \"1\" else 'RelWithDebInfo'\n\n ext_name = self.extensions[0].name\n build_dir = self.get_ext_fullpath(ext_name).replace(self.get_ext_filename(ext_name), '')\n build_dir = os.path.abspath(build_dir)\n\n cmake_args = ['-DCMAKE_BUILD_TYPE=' + config,\n '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(config.upper(), build_dir),\n '-DPYTHON_EXECUTABLE:FILEPATH=' + sys.executable]\n\n make_args = ['-j8'] if not os.environ.get('MAKEFLAGS') else []\n if self.verbose:\n make_args.append('VERBOSE=1')\n\n cmake_build_args = ['--config', config]\n if make_args:\n # -- specifies that these args are going to the native build tool: make\n cmake_build_args += ['--'] + make_args\n\n cmake_build_dir = os.path.join(self.build_temp, config)\n if not os.path.exists(cmake_build_dir):\n os.makedirs(cmake_build_dir)\n\n config_and_build_commands = [\n [cmake_bin, self.extensions[0].cmake_lists_dir] + cmake_args,\n [cmake_bin, '--build', '.'] + cmake_build_args\n ]\n\n if self.verbose:\n print(f\"Running CMake in {cmake_build_dir}:\")\n for command in config_and_build_commands:\n print(\" \".join(command))\n sys.stdout.flush()\n\n # Config and build the extension\n try:\n for command in config_and_build_commands:\n subprocess.check_call(command, cwd=cmake_build_dir)\n except OSError as e:\n raise RuntimeError('CMake failed: {}'.format(str(e)))\n\n if sys.argv[1].startswith('develop'):\n # Copy over metadata.json file from build directory\n shutil.copyfile(os.path.join(build_dir, _FRAMEWORK_METADATA_FILE),\n os.path.join(self.extensions[0].cmake_lists_dir, _FRAMEWORK_METADATA_FILE))\n # Remove unfound frameworks, otherwise develop mode will fail the install\n self.extensions = [x for x in self.extensions if os.path.exists(self.get_ext_fullpath(x.name))]\n\n\n# python packages required to use horovod in general\nrequire_list = ['cloudpickle', 'psutil', 'pyyaml', 'dataclasses;python_version<\"3.7\"', 'packaging']\n\n# framework dependencies\ntensorflow_require_list = ['tensorflow']\ntensorflow_cpu_require_list = ['tensorflow-cpu']\ntensorflow_gpu_require_list = ['tensorflow-gpu']\nkeras_require_list = ['keras>=2.0.8,!=2.0.9,!=2.1.0,!=2.1.1']\n# pytorch-lightning 1.3.8 is a stable version to work with horovod\npytorch_require_list = ['torch']\nmxnet_require_list = ['mxnet>=1.4.1']\npyspark_require_list = ['pyspark>=2.3.2;python_version<\"3.8\"',\n 'pyspark>=3.0.0;python_version>=\"3.8\"']\nspark_require_list = ['numpy', 'petastorm>=0.12.0', 'pyarrow>=0.15.0', 'fsspec>=2021.07.0']\n# https://github.com/ray-project/ray/pull/17465\n# google-api-core>=2.9.0 depends on protobuf<5.0.0dev,>=3.20.1, which conflicts with\n# tensorflow protobuf~=3.20 and pytorch-lightning protobuf<3.20,>=3.9.2\nray_require_list = ['ray', 'aioredis<2', 'google-api-core<2.9.0']\npytorch_spark_require_list = pytorch_require_list + \\\n spark_require_list + \\\n pyspark_require_list + \\\n ['pytorch_lightning>=1.3.8,<1.5.10']\n\n# all frameworks' dependencies\nall_frameworks_require_list = tensorflow_require_list + \\\n keras_require_list + \\\n pytorch_require_list + \\\n mxnet_require_list + \\\n spark_require_list + \\\n pyspark_require_list\n\n# python packages required / recommended to develop horovod\n# these are the earliest versions to work with Python 3.8\n# keep in sync with Dockerfile.test.cpu\n# NOTE: do not use versions with +cpu or +gpu here as users would need to add --find-links to pip\ndev_require_list = ['tensorflow-cpu==2.2.0',\n 'keras==2.3.1',\n 'torch==1.4.0',\n 'torchvision==0.5.0',\n 'pytorch_lightning>=1.3.8,<1.5.10',\n 'mxnet==1.5.0',\n 'pyspark==3.0.1'] + spark_require_list\n# torchvision 0.5.0 depends on torch==1.4.0\n\n# python packages required only to run tests\ntest_require_list = ['mock', 'pytest', 'pytest-forked', 'pytest-subtests', 'parameterized']\n\n# Skip cffi if pytorch extension explicitly disabled\nif not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):\n require_list.append('cffi>=1.4.0')\n\n\ndef get_package_version():\n return __version__ + \"+\" + os.environ['HOROVOD_LOCAL_VERSION'] if 'HOROVOD_LOCAL_VERSION' in os.environ else __version__\n\n\nsetup(name='horovod',\n version=get_package_version(),\n packages=find_packages(),\n description='Distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.',\n author='The Horovod Authors',\n license='Apache 2.0',\n long_description=textwrap.dedent('''\\\n Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.\n The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),\n url='https://github.com/horovod/horovod',\n keywords=['deep learning', 'tensorflow', 'keras', 'pytorch', 'mxnet', 'spark', 'AI'],\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n ext_modules=[tensorflow_mpi_lib, torch_mpi_lib_v2, mxnet_mpi_lib],\n cmdclass={'build_ext': custom_build_ext},\n # cffi is required for PyTorch\n # If cffi is specified in setup_requires, it will need libffi to be installed on the machine,\n # which is undesirable. Luckily, `install` action will install cffi before executing build,\n # so it's only necessary for `build*` or `bdist*` actions.\n setup_requires=require_list if is_build_action() else [],\n install_requires=require_list,\n tests_require=test_require_list,\n extras_require={\n 'all-frameworks': all_frameworks_require_list,\n 'tensorflow': tensorflow_require_list,\n 'tensorflow-cpu': tensorflow_cpu_require_list,\n 'tensorflow-gpu': tensorflow_gpu_require_list,\n 'keras': keras_require_list,\n 'pytorch': pytorch_require_list,\n 'mxnet': mxnet_require_list,\n 'spark': spark_require_list + pyspark_require_list,\n 'pytorch-spark': pytorch_spark_require_list,\n 'ray': ray_require_list,\n 'dev': dev_require_list,\n 'test': test_require_list,\n },\n python_requires='>=3.6',\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'horovodrun = horovod.runner.launch:run_commandline'\n ]\n })\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n# Modifications copyright Microsoft\n# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport atexit\nimport io\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport textwrap\n\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext\n\nfrom horovod import __version__\n\n_FRAMEWORK_METADATA_FILE = 'horovod/metadata.json'\n\nclass CMakeExtension(Extension):\n def __init__(self, name, cmake_lists_dir='.', sources=None, **kwa):\n if sources is None:\n sources = []\n Extension.__init__(self, name, sources=sources, **kwa)\n self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)\n\n\ntensorflow_mpi_lib = CMakeExtension('horovod.tensorflow.mpi_lib',\n cmake_lists_dir='.', sources=[])\ntorch_mpi_lib_v2 = CMakeExtension('horovod.torch.mpi_lib_v2',\n cmake_lists_dir='.', sources=[])\nmxnet_mpi_lib = CMakeExtension('horovod.mxnet.mpi_lib',\n cmake_lists_dir='.', sources=[])\n\ndef is_build_action():\n if len(sys.argv) <= 1:\n return False\n\n if sys.argv[1].startswith('build'):\n return True\n\n if sys.argv[1].startswith('bdist'):\n return True\n\n if sys.argv[1].startswith('install'):\n return True\n\n if sys.argv[1].startswith('develop'):\n return True\n\ndef get_cmake_bin():\n from packaging import version\n\n if 'HOROVOD_CMAKE' in os.environ:\n return os.environ['HOROVOD_CMAKE']\n\n cmake_bin = 'cmake'\n try:\n out = subprocess.check_output([cmake_bin, '--version'])\n except OSError:\n cmake_installed_version = version.parse(\"0.0\")\n else:\n cmake_installed_version = version.parse(re.search(r'version\\s*([\\d.]+)', out.decode()).group(1))\n\n if cmake_installed_version < version.parse(\"3.13.0\"):\n print(\"Could not find a recent CMake to build Horovod. \"\n \"Attempting to install CMake 3.13 to a temporary location via pip.\", flush=True)\n cmake_temp_dir = tempfile.TemporaryDirectory(prefix=\"horovod-cmake-tmp\")\n atexit.register(cmake_temp_dir.cleanup)\n try:\n _ = subprocess.check_output([\"pip\", \"install\", \"--target\", cmake_temp_dir.name, \"cmake~=3.13.0\"])\n except Exception:\n raise RuntimeError(\"Failed to install temporary CMake. \"\n \"Please update your CMake to 3.13+ or set HOROVOD_CMAKE appropriately.\")\n cmake_bin = os.path.join(cmake_temp_dir.name, \"bin\", \"run_cmake\")\n with io.open(cmake_bin, \"w\") as f_run_cmake:\n f_run_cmake.write(\n f\"#!/bin/sh\\nPYTHONPATH={cmake_temp_dir.name} {os.path.join(cmake_temp_dir.name, 'bin', 'cmake')} \\\"$@\\\"\")\n os.chmod(cmake_bin, 0o755)\n\n return cmake_bin\n\n\nclass custom_build_ext(build_ext):\n def build_extensions(self):\n if os.getenv('HOROVOD_SKIP_COMPILE') == '1':\n # Skip building extensions using CMake\n print(\"Horovod is being installed without native libraries\")\n return\n\n cmake_bin = get_cmake_bin()\n\n config = 'Debug' if self.debug or os.environ.get('HOROVOD_DEBUG') == \"1\" else 'RelWithDebInfo'\n\n ext_name = self.extensions[0].name\n build_dir = self.get_ext_fullpath(ext_name).replace(self.get_ext_filename(ext_name), '')\n build_dir = os.path.abspath(build_dir)\n\n cmake_args = ['-DCMAKE_BUILD_TYPE=' + config,\n '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(config.upper(), build_dir),\n '-DPYTHON_EXECUTABLE:FILEPATH=' + sys.executable]\n\n make_args = ['-j8'] if not os.environ.get('MAKEFLAGS') else []\n if self.verbose:\n make_args.append('VERBOSE=1')\n\n cmake_build_args = ['--config', config]\n if make_args:\n # -- specifies that these args are going to the native build tool: make\n cmake_build_args += ['--'] + make_args\n\n cmake_build_dir = os.path.join(self.build_temp, config)\n if not os.path.exists(cmake_build_dir):\n os.makedirs(cmake_build_dir)\n\n config_and_build_commands = [\n [cmake_bin, self.extensions[0].cmake_lists_dir] + cmake_args,\n [cmake_bin, '--build', '.'] + cmake_build_args\n ]\n\n if self.verbose:\n print(f\"Running CMake in {cmake_build_dir}:\")\n for command in config_and_build_commands:\n print(\" \".join(command))\n sys.stdout.flush()\n\n # Config and build the extension\n try:\n for command in config_and_build_commands:\n subprocess.check_call(command, cwd=cmake_build_dir)\n except OSError as e:\n raise RuntimeError('CMake failed: {}'.format(str(e)))\n\n if sys.argv[1].startswith('develop'):\n # Copy over metadata.json file from build directory\n shutil.copyfile(os.path.join(build_dir, _FRAMEWORK_METADATA_FILE),\n os.path.join(self.extensions[0].cmake_lists_dir, _FRAMEWORK_METADATA_FILE))\n # Remove unfound frameworks, otherwise develop mode will fail the install\n self.extensions = [x for x in self.extensions if os.path.exists(self.get_ext_fullpath(x.name))]\n\n\n# python packages required to use horovod in general\nrequire_list = ['cloudpickle', 'psutil', 'pyyaml', 'dataclasses;python_version<\"3.7\"', 'packaging']\n\n# framework dependencies\ntensorflow_require_list = ['tensorflow']\ntensorflow_cpu_require_list = ['tensorflow-cpu']\ntensorflow_gpu_require_list = ['tensorflow-gpu']\nkeras_require_list = ['keras>=2.0.8,!=2.0.9,!=2.1.0,!=2.1.1']\n# pytorch-lightning 1.3.8 is a stable version to work with horovod\npytorch_require_list = ['torch']\nmxnet_require_list = ['mxnet>=1.4.1']\npyspark_require_list = ['pyspark>=2.3.2;python_version<\"3.8\"',\n 'pyspark>=3.0.0;python_version>=\"3.8\"']\nspark_require_list = ['numpy', 'petastorm>=0.12.0', 'pyarrow>=0.15.0,<11.0', 'fsspec>=2021.07.0']\n# https://github.com/ray-project/ray/pull/17465\n# google-api-core>=2.9.0 depends on protobuf<5.0.0dev,>=3.20.1, which conflicts with\n# tensorflow protobuf~=3.20 and pytorch-lightning protobuf<3.20,>=3.9.2\nray_require_list = ['ray', 'aioredis<2', 'google-api-core<2.9.0']\npytorch_spark_require_list = pytorch_require_list + \\\n spark_require_list + \\\n pyspark_require_list + \\\n ['pytorch_lightning>=1.3.8,<1.5.10']\n\n# all frameworks' dependencies\nall_frameworks_require_list = tensorflow_require_list + \\\n keras_require_list + \\\n pytorch_require_list + \\\n mxnet_require_list + \\\n spark_require_list + \\\n pyspark_require_list\n\n# python packages required / recommended to develop horovod\n# these are the earliest versions to work with Python 3.8\n# keep in sync with Dockerfile.test.cpu\n# NOTE: do not use versions with +cpu or +gpu here as users would need to add --find-links to pip\ndev_require_list = ['tensorflow-cpu==2.2.0',\n 'keras==2.3.1',\n 'torch==1.4.0',\n 'torchvision==0.5.0',\n 'pytorch_lightning>=1.3.8,<1.5.10',\n 'mxnet==1.5.0',\n 'pyspark==3.0.1'] + spark_require_list\n# torchvision 0.5.0 depends on torch==1.4.0\n\n# python packages required only to run tests\ntest_require_list = ['mock', 'pytest', 'pytest-forked', 'pytest-subtests', 'parameterized']\n\n# Skip cffi if pytorch extension explicitly disabled\nif not os.environ.get('HOROVOD_WITHOUT_PYTORCH'):\n require_list.append('cffi>=1.4.0')\n\n\ndef get_package_version():\n return __version__ + \"+\" + os.environ['HOROVOD_LOCAL_VERSION'] if 'HOROVOD_LOCAL_VERSION' in os.environ else __version__\n\n\nsetup(name='horovod',\n version=get_package_version(),\n packages=find_packages(),\n description='Distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.',\n author='The Horovod Authors',\n license='Apache 2.0',\n long_description=textwrap.dedent('''\\\n Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and Apache MXNet.\n The goal of Horovod is to make distributed Deep Learning fast and easy to use.'''),\n url='https://github.com/horovod/horovod',\n keywords=['deep learning', 'tensorflow', 'keras', 'pytorch', 'mxnet', 'spark', 'AI'],\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n ext_modules=[tensorflow_mpi_lib, torch_mpi_lib_v2, mxnet_mpi_lib],\n cmdclass={'build_ext': custom_build_ext},\n # cffi is required for PyTorch\n # If cffi is specified in setup_requires, it will need libffi to be installed on the machine,\n # which is undesirable. Luckily, `install` action will install cffi before executing build,\n # so it's only necessary for `build*` or `bdist*` actions.\n setup_requires=require_list if is_build_action() else [],\n install_requires=require_list,\n tests_require=test_require_list,\n extras_require={\n 'all-frameworks': all_frameworks_require_list,\n 'tensorflow': tensorflow_require_list,\n 'tensorflow-cpu': tensorflow_cpu_require_list,\n 'tensorflow-gpu': tensorflow_gpu_require_list,\n 'keras': keras_require_list,\n 'pytorch': pytorch_require_list,\n 'mxnet': mxnet_require_list,\n 'spark': spark_require_list + pyspark_require_list,\n 'pytorch-spark': pytorch_spark_require_list,\n 'ray': ray_require_list,\n 'dev': dev_require_list,\n 'test': test_require_list,\n },\n python_requires='>=3.6',\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'horovodrun = horovod.runner.launch:run_commandline'\n ]\n })\n", "path": "setup.py"}]} | 4,050 | 270 |
gh_patches_debug_36049 | rasdani/github-patches | git_diff | mozilla__pontoon-2716 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pretranslated Fluent string has the ID translated or modified
It happened for at least two strings.
```
remember-pw-link = Remember your password? Sign in
```
Became
```
Remember-pw-link = Ricordi la password? Accedi
```
No clue why it changed to uppercase.
On the other hand, for
```
plan-price-interval-year =
{ $intervalCount ->
[one] { $amount } all’anno
*[other] { $amount } ogni { $intervalCount } anni
}
.title =
{ $intervalCount ->
[one] { $amount } all’anno
*[other] { $amount } ogni { $intervalCount } anni
}
```
The id was translated to `piano-prezzo-intervallo-anno`(but the translation was good besides that).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/pretranslation/pretranslate.py`
Content:
```
1 import operator
2
3 from fluent.syntax import FluentSerializer
4 from functools import reduce
5
6 from django.db.models import CharField, Value as V
7 from django.db.models.functions import Concat
8
9 from pontoon.base.models import User, TranslatedResource
10 from pontoon.machinery.utils import (
11 get_google_translate_data,
12 get_translation_memory_data,
13 )
14
15 from pontoon.base.templatetags.helpers import (
16 as_simple_translation,
17 is_single_input_ftl_string,
18 get_reconstructed_message,
19 )
20
21
22 serializer = FluentSerializer()
23
24
25 def get_translations(entity, locale):
26 """
27 Get pretranslations for the entity-locale pair
28
29 :arg Entity entity: the Entity object
30 :arg Locale locale: the Locale object
31
32 :returns: a list of tuple with:
33 - a pretranslation of the entity
34 - plural form
35 - user - tm_user/gt_user
36 """
37 tm_user = User.objects.get(email="[email protected]")
38 gt_user = User.objects.get(email="[email protected]")
39
40 strings = []
41 plural_forms = range(0, locale.nplurals or 1)
42
43 entity_string = (
44 as_simple_translation(entity.string)
45 if is_single_input_ftl_string(entity.string)
46 else entity.string
47 )
48
49 # Try to get matches from translation_memory
50 tm_response = get_translation_memory_data(
51 text=entity_string,
52 locale=locale,
53 )
54
55 tm_response = [t for t in tm_response if int(t["quality"]) == 100]
56
57 if tm_response:
58 if entity.string_plural == "":
59 translation = tm_response[0]["target"]
60
61 if entity.string != entity_string:
62 translation = serializer.serialize_entry(
63 get_reconstructed_message(entity.string, translation)
64 )
65
66 strings = [(translation, None, tm_user)]
67 else:
68 for plural_form in plural_forms:
69 strings.append((tm_response[0]["target"], plural_form, tm_user))
70
71 # Else fetch from google translate
72 elif locale.google_translate_code:
73 gt_response = get_google_translate_data(
74 text=entity.string,
75 locale=locale,
76 )
77
78 if gt_response["status"]:
79 if entity.string_plural == "":
80 strings = [(gt_response["translation"], None, gt_user)]
81 else:
82 for plural_form in plural_forms:
83 strings.append((gt_response["translation"], plural_form, gt_user))
84 return strings
85
86
87 def update_changed_instances(tr_filter, tr_dict, translations):
88 """
89 Update the latest activity and stats for changed Locales, ProjectLocales
90 & TranslatedResources
91 """
92 tr_filter = tuple(tr_filter)
93 # Combine all generated filters with an OK operator.
94 # `operator.ior` is the '|' Python operator, which turns into a logical OR
95 # when used between django ORM query objects.
96 tr_query = reduce(operator.ior, tr_filter)
97
98 translatedresources = TranslatedResource.objects.filter(tr_query).annotate(
99 locale_resource=Concat(
100 "locale_id", V("-"), "resource_id", output_field=CharField()
101 )
102 )
103
104 translatedresources.update_stats()
105
106 for tr in translatedresources:
107 index = tr_dict[tr.locale_resource]
108 translation = translations[index]
109 translation.update_latest_translation()
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py
--- a/pontoon/pretranslation/pretranslate.py
+++ b/pontoon/pretranslation/pretranslate.py
@@ -18,6 +18,7 @@
get_reconstructed_message,
)
+UNTRANSLATABLE_KEY = "AIzaSyDX3R5Y1kxh_8lJ4OAO"
serializer = FluentSerializer()
@@ -40,7 +41,7 @@
strings = []
plural_forms = range(0, locale.nplurals or 1)
- entity_string = (
+ tm_input = (
as_simple_translation(entity.string)
if is_single_input_ftl_string(entity.string)
else entity.string
@@ -48,7 +49,7 @@
# Try to get matches from translation_memory
tm_response = get_translation_memory_data(
- text=entity_string,
+ text=tm_input,
locale=locale,
)
@@ -58,7 +59,7 @@
if entity.string_plural == "":
translation = tm_response[0]["target"]
- if entity.string != entity_string:
+ if entity.string != tm_input:
translation = serializer.serialize_entry(
get_reconstructed_message(entity.string, translation)
)
@@ -70,12 +71,23 @@
# Else fetch from google translate
elif locale.google_translate_code:
+ gt_input = (
+ entity.string.replace(entity.key, UNTRANSLATABLE_KEY, 1)
+ if entity.resource.format == "ftl"
+ else entity.string
+ )
+
gt_response = get_google_translate_data(
- text=entity.string,
+ text=gt_input,
locale=locale,
)
if gt_response["status"]:
+ if entity.string != gt_input:
+ gt_response["translation"] = gt_response["translation"].replace(
+ UNTRANSLATABLE_KEY, entity.key
+ )
+
if entity.string_plural == "":
strings = [(gt_response["translation"], None, gt_user)]
else:
| {"golden_diff": "diff --git a/pontoon/pretranslation/pretranslate.py b/pontoon/pretranslation/pretranslate.py\n--- a/pontoon/pretranslation/pretranslate.py\n+++ b/pontoon/pretranslation/pretranslate.py\n@@ -18,6 +18,7 @@\n get_reconstructed_message,\n )\n \n+UNTRANSLATABLE_KEY = \"AIzaSyDX3R5Y1kxh_8lJ4OAO\"\n \n serializer = FluentSerializer()\n \n@@ -40,7 +41,7 @@\n strings = []\n plural_forms = range(0, locale.nplurals or 1)\n \n- entity_string = (\n+ tm_input = (\n as_simple_translation(entity.string)\n if is_single_input_ftl_string(entity.string)\n else entity.string\n@@ -48,7 +49,7 @@\n \n # Try to get matches from translation_memory\n tm_response = get_translation_memory_data(\n- text=entity_string,\n+ text=tm_input,\n locale=locale,\n )\n \n@@ -58,7 +59,7 @@\n if entity.string_plural == \"\":\n translation = tm_response[0][\"target\"]\n \n- if entity.string != entity_string:\n+ if entity.string != tm_input:\n translation = serializer.serialize_entry(\n get_reconstructed_message(entity.string, translation)\n )\n@@ -70,12 +71,23 @@\n \n # Else fetch from google translate\n elif locale.google_translate_code:\n+ gt_input = (\n+ entity.string.replace(entity.key, UNTRANSLATABLE_KEY, 1)\n+ if entity.resource.format == \"ftl\"\n+ else entity.string\n+ )\n+\n gt_response = get_google_translate_data(\n- text=entity.string,\n+ text=gt_input,\n locale=locale,\n )\n \n if gt_response[\"status\"]:\n+ if entity.string != gt_input:\n+ gt_response[\"translation\"] = gt_response[\"translation\"].replace(\n+ UNTRANSLATABLE_KEY, entity.key\n+ )\n+\n if entity.string_plural == \"\":\n strings = [(gt_response[\"translation\"], None, gt_user)]\n else:\n", "issue": "Pretranslated Fluent string has the ID translated or modified\nIt happened for at least two strings.\r\n\r\n```\r\nremember-pw-link = Remember your password? Sign in\r\n```\r\n\r\nBecame\r\n\r\n```\r\nRemember-pw-link = Ricordi la password? Accedi\r\n```\r\n\r\nNo clue why it changed to uppercase.\r\n\r\nOn the other hand, for \r\n\r\n```\r\nplan-price-interval-year =\r\n { $intervalCount ->\r\n [one] { $amount } all\u2019anno\r\n *[other] { $amount } ogni { $intervalCount } anni\r\n }\r\n .title =\r\n { $intervalCount ->\r\n [one] { $amount } all\u2019anno\r\n *[other] { $amount } ogni { $intervalCount } anni\r\n }\r\n```\r\n\r\nThe id was translated to `piano-prezzo-intervallo-anno`(but the translation was good besides that).\r\n\n", "before_files": [{"content": "import operator\n\nfrom fluent.syntax import FluentSerializer\nfrom functools import reduce\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\n\nfrom pontoon.base.templatetags.helpers import (\n as_simple_translation,\n is_single_input_ftl_string,\n get_reconstructed_message,\n)\n\n\nserializer = FluentSerializer()\n\n\ndef get_translations(entity, locale):\n \"\"\"\n Get pretranslations for the entity-locale pair\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n\n :returns: a list of tuple with:\n - a pretranslation of the entity\n - plural form\n - user - tm_user/gt_user\n \"\"\"\n tm_user = User.objects.get(email=\"[email protected]\")\n gt_user = User.objects.get(email=\"[email protected]\")\n\n strings = []\n plural_forms = range(0, locale.nplurals or 1)\n\n entity_string = (\n as_simple_translation(entity.string)\n if is_single_input_ftl_string(entity.string)\n else entity.string\n )\n\n # Try to get matches from translation_memory\n tm_response = get_translation_memory_data(\n text=entity_string,\n locale=locale,\n )\n\n tm_response = [t for t in tm_response if int(t[\"quality\"]) == 100]\n\n if tm_response:\n if entity.string_plural == \"\":\n translation = tm_response[0][\"target\"]\n\n if entity.string != entity_string:\n translation = serializer.serialize_entry(\n get_reconstructed_message(entity.string, translation)\n )\n\n strings = [(translation, None, tm_user)]\n else:\n for plural_form in plural_forms:\n strings.append((tm_response[0][\"target\"], plural_form, tm_user))\n\n # Else fetch from google translate\n elif locale.google_translate_code:\n gt_response = get_google_translate_data(\n text=entity.string,\n locale=locale,\n )\n\n if gt_response[\"status\"]:\n if entity.string_plural == \"\":\n strings = [(gt_response[\"translation\"], None, gt_user)]\n else:\n for plural_form in plural_forms:\n strings.append((gt_response[\"translation\"], plural_form, gt_user))\n return strings\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}], "after_files": [{"content": "import operator\n\nfrom fluent.syntax import FluentSerializer\nfrom functools import reduce\n\nfrom django.db.models import CharField, Value as V\nfrom django.db.models.functions import Concat\n\nfrom pontoon.base.models import User, TranslatedResource\nfrom pontoon.machinery.utils import (\n get_google_translate_data,\n get_translation_memory_data,\n)\n\nfrom pontoon.base.templatetags.helpers import (\n as_simple_translation,\n is_single_input_ftl_string,\n get_reconstructed_message,\n)\n\nUNTRANSLATABLE_KEY = \"AIzaSyDX3R5Y1kxh_8lJ4OAO\"\n\nserializer = FluentSerializer()\n\n\ndef get_translations(entity, locale):\n \"\"\"\n Get pretranslations for the entity-locale pair\n\n :arg Entity entity: the Entity object\n :arg Locale locale: the Locale object\n\n :returns: a list of tuple with:\n - a pretranslation of the entity\n - plural form\n - user - tm_user/gt_user\n \"\"\"\n tm_user = User.objects.get(email=\"[email protected]\")\n gt_user = User.objects.get(email=\"[email protected]\")\n\n strings = []\n plural_forms = range(0, locale.nplurals or 1)\n\n tm_input = (\n as_simple_translation(entity.string)\n if is_single_input_ftl_string(entity.string)\n else entity.string\n )\n\n # Try to get matches from translation_memory\n tm_response = get_translation_memory_data(\n text=tm_input,\n locale=locale,\n )\n\n tm_response = [t for t in tm_response if int(t[\"quality\"]) == 100]\n\n if tm_response:\n if entity.string_plural == \"\":\n translation = tm_response[0][\"target\"]\n\n if entity.string != tm_input:\n translation = serializer.serialize_entry(\n get_reconstructed_message(entity.string, translation)\n )\n\n strings = [(translation, None, tm_user)]\n else:\n for plural_form in plural_forms:\n strings.append((tm_response[0][\"target\"], plural_form, tm_user))\n\n # Else fetch from google translate\n elif locale.google_translate_code:\n gt_input = (\n entity.string.replace(entity.key, UNTRANSLATABLE_KEY, 1)\n if entity.resource.format == \"ftl\"\n else entity.string\n )\n\n gt_response = get_google_translate_data(\n text=gt_input,\n locale=locale,\n )\n\n if gt_response[\"status\"]:\n if entity.string != gt_input:\n gt_response[\"translation\"] = gt_response[\"translation\"].replace(\n UNTRANSLATABLE_KEY, entity.key\n )\n\n if entity.string_plural == \"\":\n strings = [(gt_response[\"translation\"], None, gt_user)]\n else:\n for plural_form in plural_forms:\n strings.append((gt_response[\"translation\"], plural_form, gt_user))\n return strings\n\n\ndef update_changed_instances(tr_filter, tr_dict, translations):\n \"\"\"\n Update the latest activity and stats for changed Locales, ProjectLocales\n & TranslatedResources\n \"\"\"\n tr_filter = tuple(tr_filter)\n # Combine all generated filters with an OK operator.\n # `operator.ior` is the '|' Python operator, which turns into a logical OR\n # when used between django ORM query objects.\n tr_query = reduce(operator.ior, tr_filter)\n\n translatedresources = TranslatedResource.objects.filter(tr_query).annotate(\n locale_resource=Concat(\n \"locale_id\", V(\"-\"), \"resource_id\", output_field=CharField()\n )\n )\n\n translatedresources.update_stats()\n\n for tr in translatedresources:\n index = tr_dict[tr.locale_resource]\n translation = translations[index]\n translation.update_latest_translation()\n", "path": "pontoon/pretranslation/pretranslate.py"}]} | 1,374 | 467 |
gh_patches_debug_21595 | rasdani/github-patches | git_diff | matrix-org__synapse-3927 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
run_as_background_process doesn't catch & log exceptions
(plus if it does raise in a loopingcall, we throw away the exception)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/metrics/background_process_metrics.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2018 New Vector Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import threading
17
18 import six
19
20 from prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily
21
22 from twisted.internet import defer
23
24 from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
25
26 _background_process_start_count = Counter(
27 "synapse_background_process_start_count",
28 "Number of background processes started",
29 ["name"],
30 )
31
32 # we set registry=None in all of these to stop them getting registered with
33 # the default registry. Instead we collect them all via the CustomCollector,
34 # which ensures that we can update them before they are collected.
35 #
36 _background_process_ru_utime = Counter(
37 "synapse_background_process_ru_utime_seconds",
38 "User CPU time used by background processes, in seconds",
39 ["name"],
40 registry=None,
41 )
42
43 _background_process_ru_stime = Counter(
44 "synapse_background_process_ru_stime_seconds",
45 "System CPU time used by background processes, in seconds",
46 ["name"],
47 registry=None,
48 )
49
50 _background_process_db_txn_count = Counter(
51 "synapse_background_process_db_txn_count",
52 "Number of database transactions done by background processes",
53 ["name"],
54 registry=None,
55 )
56
57 _background_process_db_txn_duration = Counter(
58 "synapse_background_process_db_txn_duration_seconds",
59 ("Seconds spent by background processes waiting for database "
60 "transactions, excluding scheduling time"),
61 ["name"],
62 registry=None,
63 )
64
65 _background_process_db_sched_duration = Counter(
66 "synapse_background_process_db_sched_duration_seconds",
67 "Seconds spent by background processes waiting for database connections",
68 ["name"],
69 registry=None,
70 )
71
72 # map from description to a counter, so that we can name our logcontexts
73 # incrementally. (It actually duplicates _background_process_start_count, but
74 # it's much simpler to do so than to try to combine them.)
75 _background_process_counts = dict() # type: dict[str, int]
76
77 # map from description to the currently running background processes.
78 #
79 # it's kept as a dict of sets rather than a big set so that we can keep track
80 # of process descriptions that no longer have any active processes.
81 _background_processes = dict() # type: dict[str, set[_BackgroundProcess]]
82
83 # A lock that covers the above dicts
84 _bg_metrics_lock = threading.Lock()
85
86
87 class _Collector(object):
88 """A custom metrics collector for the background process metrics.
89
90 Ensures that all of the metrics are up-to-date with any in-flight processes
91 before they are returned.
92 """
93 def collect(self):
94 background_process_in_flight_count = GaugeMetricFamily(
95 "synapse_background_process_in_flight_count",
96 "Number of background processes in flight",
97 labels=["name"],
98 )
99
100 # We copy the dict so that it doesn't change from underneath us
101 with _bg_metrics_lock:
102 _background_processes_copy = dict(_background_processes)
103
104 for desc, processes in six.iteritems(_background_processes_copy):
105 background_process_in_flight_count.add_metric(
106 (desc,), len(processes),
107 )
108 for process in processes:
109 process.update_metrics()
110
111 yield background_process_in_flight_count
112
113 # now we need to run collect() over each of the static Counters, and
114 # yield each metric they return.
115 for m in (
116 _background_process_ru_utime,
117 _background_process_ru_stime,
118 _background_process_db_txn_count,
119 _background_process_db_txn_duration,
120 _background_process_db_sched_duration,
121 ):
122 for r in m.collect():
123 yield r
124
125
126 REGISTRY.register(_Collector())
127
128
129 class _BackgroundProcess(object):
130 def __init__(self, desc, ctx):
131 self.desc = desc
132 self._context = ctx
133 self._reported_stats = None
134
135 def update_metrics(self):
136 """Updates the metrics with values from this process."""
137 new_stats = self._context.get_resource_usage()
138 if self._reported_stats is None:
139 diff = new_stats
140 else:
141 diff = new_stats - self._reported_stats
142 self._reported_stats = new_stats
143
144 _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)
145 _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)
146 _background_process_db_txn_count.labels(self.desc).inc(
147 diff.db_txn_count,
148 )
149 _background_process_db_txn_duration.labels(self.desc).inc(
150 diff.db_txn_duration_sec,
151 )
152 _background_process_db_sched_duration.labels(self.desc).inc(
153 diff.db_sched_duration_sec,
154 )
155
156
157 def run_as_background_process(desc, func, *args, **kwargs):
158 """Run the given function in its own logcontext, with resource metrics
159
160 This should be used to wrap processes which are fired off to run in the
161 background, instead of being associated with a particular request.
162
163 It returns a Deferred which completes when the function completes, but it doesn't
164 follow the synapse logcontext rules, which makes it appropriate for passing to
165 clock.looping_call and friends (or for firing-and-forgetting in the middle of a
166 normal synapse inlineCallbacks function).
167
168 Args:
169 desc (str): a description for this background process type
170 func: a function, which may return a Deferred
171 args: positional args for func
172 kwargs: keyword args for func
173
174 Returns: Deferred which returns the result of func, but note that it does not
175 follow the synapse logcontext rules.
176 """
177 @defer.inlineCallbacks
178 def run():
179 with _bg_metrics_lock:
180 count = _background_process_counts.get(desc, 0)
181 _background_process_counts[desc] = count + 1
182
183 _background_process_start_count.labels(desc).inc()
184
185 with LoggingContext(desc) as context:
186 context.request = "%s-%i" % (desc, count)
187 proc = _BackgroundProcess(desc, context)
188
189 with _bg_metrics_lock:
190 _background_processes.setdefault(desc, set()).add(proc)
191
192 try:
193 yield func(*args, **kwargs)
194 finally:
195 proc.update_metrics()
196
197 with _bg_metrics_lock:
198 _background_processes[desc].remove(proc)
199
200 with PreserveLoggingContext():
201 return run()
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py
--- a/synapse/metrics/background_process_metrics.py
+++ b/synapse/metrics/background_process_metrics.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import logging
import threading
import six
@@ -23,6 +24,9 @@
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
+logger = logging.getLogger(__name__)
+
+
_background_process_start_count = Counter(
"synapse_background_process_start_count",
"Number of background processes started",
@@ -191,6 +195,8 @@
try:
yield func(*args, **kwargs)
+ except Exception:
+ logger.exception("Background process '%s' threw an exception", desc)
finally:
proc.update_metrics()
| {"golden_diff": "diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py\n--- a/synapse/metrics/background_process_metrics.py\n+++ b/synapse/metrics/background_process_metrics.py\n@@ -13,6 +13,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import logging\n import threading\n \n import six\n@@ -23,6 +24,9 @@\n \n from synapse.util.logcontext import LoggingContext, PreserveLoggingContext\n \n+logger = logging.getLogger(__name__)\n+\n+\n _background_process_start_count = Counter(\n \"synapse_background_process_start_count\",\n \"Number of background processes started\",\n@@ -191,6 +195,8 @@\n \n try:\n yield func(*args, **kwargs)\n+ except Exception:\n+ logger.exception(\"Background process '%s' threw an exception\", desc)\n finally:\n proc.update_metrics()\n", "issue": "run_as_background_process doesn't catch & log exceptions \n(plus if it does raise in a loopingcall, we throw away the exception)\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport threading\n\nimport six\n\nfrom prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily\n\nfrom twisted.internet import defer\n\nfrom synapse.util.logcontext import LoggingContext, PreserveLoggingContext\n\n_background_process_start_count = Counter(\n \"synapse_background_process_start_count\",\n \"Number of background processes started\",\n [\"name\"],\n)\n\n# we set registry=None in all of these to stop them getting registered with\n# the default registry. Instead we collect them all via the CustomCollector,\n# which ensures that we can update them before they are collected.\n#\n_background_process_ru_utime = Counter(\n \"synapse_background_process_ru_utime_seconds\",\n \"User CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_ru_stime = Counter(\n \"synapse_background_process_ru_stime_seconds\",\n \"System CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_count = Counter(\n \"synapse_background_process_db_txn_count\",\n \"Number of database transactions done by background processes\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_duration = Counter(\n \"synapse_background_process_db_txn_duration_seconds\",\n (\"Seconds spent by background processes waiting for database \"\n \"transactions, excluding scheduling time\"),\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_sched_duration = Counter(\n \"synapse_background_process_db_sched_duration_seconds\",\n \"Seconds spent by background processes waiting for database connections\",\n [\"name\"],\n registry=None,\n)\n\n# map from description to a counter, so that we can name our logcontexts\n# incrementally. (It actually duplicates _background_process_start_count, but\n# it's much simpler to do so than to try to combine them.)\n_background_process_counts = dict() # type: dict[str, int]\n\n# map from description to the currently running background processes.\n#\n# it's kept as a dict of sets rather than a big set so that we can keep track\n# of process descriptions that no longer have any active processes.\n_background_processes = dict() # type: dict[str, set[_BackgroundProcess]]\n\n# A lock that covers the above dicts\n_bg_metrics_lock = threading.Lock()\n\n\nclass _Collector(object):\n \"\"\"A custom metrics collector for the background process metrics.\n\n Ensures that all of the metrics are up-to-date with any in-flight processes\n before they are returned.\n \"\"\"\n def collect(self):\n background_process_in_flight_count = GaugeMetricFamily(\n \"synapse_background_process_in_flight_count\",\n \"Number of background processes in flight\",\n labels=[\"name\"],\n )\n\n # We copy the dict so that it doesn't change from underneath us\n with _bg_metrics_lock:\n _background_processes_copy = dict(_background_processes)\n\n for desc, processes in six.iteritems(_background_processes_copy):\n background_process_in_flight_count.add_metric(\n (desc,), len(processes),\n )\n for process in processes:\n process.update_metrics()\n\n yield background_process_in_flight_count\n\n # now we need to run collect() over each of the static Counters, and\n # yield each metric they return.\n for m in (\n _background_process_ru_utime,\n _background_process_ru_stime,\n _background_process_db_txn_count,\n _background_process_db_txn_duration,\n _background_process_db_sched_duration,\n ):\n for r in m.collect():\n yield r\n\n\nREGISTRY.register(_Collector())\n\n\nclass _BackgroundProcess(object):\n def __init__(self, desc, ctx):\n self.desc = desc\n self._context = ctx\n self._reported_stats = None\n\n def update_metrics(self):\n \"\"\"Updates the metrics with values from this process.\"\"\"\n new_stats = self._context.get_resource_usage()\n if self._reported_stats is None:\n diff = new_stats\n else:\n diff = new_stats - self._reported_stats\n self._reported_stats = new_stats\n\n _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)\n _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)\n _background_process_db_txn_count.labels(self.desc).inc(\n diff.db_txn_count,\n )\n _background_process_db_txn_duration.labels(self.desc).inc(\n diff.db_txn_duration_sec,\n )\n _background_process_db_sched_duration.labels(self.desc).inc(\n diff.db_sched_duration_sec,\n )\n\n\ndef run_as_background_process(desc, func, *args, **kwargs):\n \"\"\"Run the given function in its own logcontext, with resource metrics\n\n This should be used to wrap processes which are fired off to run in the\n background, instead of being associated with a particular request.\n\n It returns a Deferred which completes when the function completes, but it doesn't\n follow the synapse logcontext rules, which makes it appropriate for passing to\n clock.looping_call and friends (or for firing-and-forgetting in the middle of a\n normal synapse inlineCallbacks function).\n\n Args:\n desc (str): a description for this background process type\n func: a function, which may return a Deferred\n args: positional args for func\n kwargs: keyword args for func\n\n Returns: Deferred which returns the result of func, but note that it does not\n follow the synapse logcontext rules.\n \"\"\"\n @defer.inlineCallbacks\n def run():\n with _bg_metrics_lock:\n count = _background_process_counts.get(desc, 0)\n _background_process_counts[desc] = count + 1\n\n _background_process_start_count.labels(desc).inc()\n\n with LoggingContext(desc) as context:\n context.request = \"%s-%i\" % (desc, count)\n proc = _BackgroundProcess(desc, context)\n\n with _bg_metrics_lock:\n _background_processes.setdefault(desc, set()).add(proc)\n\n try:\n yield func(*args, **kwargs)\n finally:\n proc.update_metrics()\n\n with _bg_metrics_lock:\n _background_processes[desc].remove(proc)\n\n with PreserveLoggingContext():\n return run()\n", "path": "synapse/metrics/background_process_metrics.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport threading\n\nimport six\n\nfrom prometheus_client.core import REGISTRY, Counter, GaugeMetricFamily\n\nfrom twisted.internet import defer\n\nfrom synapse.util.logcontext import LoggingContext, PreserveLoggingContext\n\nlogger = logging.getLogger(__name__)\n\n\n_background_process_start_count = Counter(\n \"synapse_background_process_start_count\",\n \"Number of background processes started\",\n [\"name\"],\n)\n\n# we set registry=None in all of these to stop them getting registered with\n# the default registry. Instead we collect them all via the CustomCollector,\n# which ensures that we can update them before they are collected.\n#\n_background_process_ru_utime = Counter(\n \"synapse_background_process_ru_utime_seconds\",\n \"User CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_ru_stime = Counter(\n \"synapse_background_process_ru_stime_seconds\",\n \"System CPU time used by background processes, in seconds\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_count = Counter(\n \"synapse_background_process_db_txn_count\",\n \"Number of database transactions done by background processes\",\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_txn_duration = Counter(\n \"synapse_background_process_db_txn_duration_seconds\",\n (\"Seconds spent by background processes waiting for database \"\n \"transactions, excluding scheduling time\"),\n [\"name\"],\n registry=None,\n)\n\n_background_process_db_sched_duration = Counter(\n \"synapse_background_process_db_sched_duration_seconds\",\n \"Seconds spent by background processes waiting for database connections\",\n [\"name\"],\n registry=None,\n)\n\n# map from description to a counter, so that we can name our logcontexts\n# incrementally. (It actually duplicates _background_process_start_count, but\n# it's much simpler to do so than to try to combine them.)\n_background_process_counts = dict() # type: dict[str, int]\n\n# map from description to the currently running background processes.\n#\n# it's kept as a dict of sets rather than a big set so that we can keep track\n# of process descriptions that no longer have any active processes.\n_background_processes = dict() # type: dict[str, set[_BackgroundProcess]]\n\n# A lock that covers the above dicts\n_bg_metrics_lock = threading.Lock()\n\n\nclass _Collector(object):\n \"\"\"A custom metrics collector for the background process metrics.\n\n Ensures that all of the metrics are up-to-date with any in-flight processes\n before they are returned.\n \"\"\"\n def collect(self):\n background_process_in_flight_count = GaugeMetricFamily(\n \"synapse_background_process_in_flight_count\",\n \"Number of background processes in flight\",\n labels=[\"name\"],\n )\n\n # We copy the dict so that it doesn't change from underneath us\n with _bg_metrics_lock:\n _background_processes_copy = dict(_background_processes)\n\n for desc, processes in six.iteritems(_background_processes_copy):\n background_process_in_flight_count.add_metric(\n (desc,), len(processes),\n )\n for process in processes:\n process.update_metrics()\n\n yield background_process_in_flight_count\n\n # now we need to run collect() over each of the static Counters, and\n # yield each metric they return.\n for m in (\n _background_process_ru_utime,\n _background_process_ru_stime,\n _background_process_db_txn_count,\n _background_process_db_txn_duration,\n _background_process_db_sched_duration,\n ):\n for r in m.collect():\n yield r\n\n\nREGISTRY.register(_Collector())\n\n\nclass _BackgroundProcess(object):\n def __init__(self, desc, ctx):\n self.desc = desc\n self._context = ctx\n self._reported_stats = None\n\n def update_metrics(self):\n \"\"\"Updates the metrics with values from this process.\"\"\"\n new_stats = self._context.get_resource_usage()\n if self._reported_stats is None:\n diff = new_stats\n else:\n diff = new_stats - self._reported_stats\n self._reported_stats = new_stats\n\n _background_process_ru_utime.labels(self.desc).inc(diff.ru_utime)\n _background_process_ru_stime.labels(self.desc).inc(diff.ru_stime)\n _background_process_db_txn_count.labels(self.desc).inc(\n diff.db_txn_count,\n )\n _background_process_db_txn_duration.labels(self.desc).inc(\n diff.db_txn_duration_sec,\n )\n _background_process_db_sched_duration.labels(self.desc).inc(\n diff.db_sched_duration_sec,\n )\n\n\ndef run_as_background_process(desc, func, *args, **kwargs):\n \"\"\"Run the given function in its own logcontext, with resource metrics\n\n This should be used to wrap processes which are fired off to run in the\n background, instead of being associated with a particular request.\n\n It returns a Deferred which completes when the function completes, but it doesn't\n follow the synapse logcontext rules, which makes it appropriate for passing to\n clock.looping_call and friends (or for firing-and-forgetting in the middle of a\n normal synapse inlineCallbacks function).\n\n Args:\n desc (str): a description for this background process type\n func: a function, which may return a Deferred\n args: positional args for func\n kwargs: keyword args for func\n\n Returns: Deferred which returns the result of func, but note that it does not\n follow the synapse logcontext rules.\n \"\"\"\n @defer.inlineCallbacks\n def run():\n with _bg_metrics_lock:\n count = _background_process_counts.get(desc, 0)\n _background_process_counts[desc] = count + 1\n\n _background_process_start_count.labels(desc).inc()\n\n with LoggingContext(desc) as context:\n context.request = \"%s-%i\" % (desc, count)\n proc = _BackgroundProcess(desc, context)\n\n with _bg_metrics_lock:\n _background_processes.setdefault(desc, set()).add(proc)\n\n try:\n yield func(*args, **kwargs)\n except Exception:\n logger.exception(\"Background process '%s' threw an exception\", desc)\n finally:\n proc.update_metrics()\n\n with _bg_metrics_lock:\n _background_processes[desc].remove(proc)\n\n with PreserveLoggingContext():\n return run()\n", "path": "synapse/metrics/background_process_metrics.py"}]} | 2,252 | 207 |
gh_patches_debug_21192 | rasdani/github-patches | git_diff | bokeh__bokeh-7000 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incomplete docs re embedding when using data tables
Hi, I banged my head a bit trying to use `components` to render tables and failing while `file_html` would work well.
Turns out we now need to include:
```html
<link rel="stylesheet" href="https://cdn.pydata.org/bokeh/release/bokeh-tables-0.12.9.min.css" type="text/css" />
<script type="text/javascript" src="https://cdn.pydata.org/bokeh/release/bokeh-tables-0.12.9.min.js"></script>
```
in order for data tables to render properly and this is not mentioned in the embed sections of the User Guide or the Reference.
Having a look right now how the docs are built to see if I can provide help with a PR if that's not fixed already.
EDIT: Going through the docs to prepare a PR, I read in the 0.12.7 release notes that the change should be transparent as Bokeh would load the `-tables` resources automatically when necessary. It wasn't the case for me. But maybe it wasn't meant for the `components` use case?
I will provide a PR later today. Just waiting for a feedback on that last question to see whether this paragraph in the 0.12.7 release notes should be reworded if `components` is not covered by the automatic process.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/embed/standalone.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
3 #
4 # Powered by the Bokeh Development Team.
5 #
6 # The full license is in the file LICENSE.txt, distributed with this software.
7 #-----------------------------------------------------------------------------
8 '''
9
10 '''
11
12 #-----------------------------------------------------------------------------
13 # Boilerplate
14 #----------------------------------------------------------------------------
15 from __future__ import absolute_import, division, print_function, unicode_literals
16
17 import logging
18 log = logging.getLogger(__name__)
19
20 from bokeh.util.api import public, internal ; public, internal
21
22 #-----------------------------------------------------------------------------
23 # Imports
24 #-----------------------------------------------------------------------------
25
26 # Standard library imports
27 from contextlib import contextmanager
28 import re
29
30 # External imports
31
32 # Bokeh imports
33 from ..core.templates import AUTOLOAD_JS, AUTOLOAD_TAG, FILE
34 from ..document.document import DEFAULT_TITLE, Document
35 from ..model import Model
36 from ..settings import settings
37 from ..util.compiler import bundle_all_models
38 from ..util.string import encode_utf8
39 from .bundle import bundle_for_objs_and_resources
40 from .util import FromCurdoc
41 from .util import (check_models_or_docs, check_one_model_or_doc, div_for_render_item, find_existing_docs, html_page_for_render_items,
42 script_for_render_items, standalone_docs_json_and_render_items, wrap_in_onload, wrap_in_script_tag)
43
44 #-----------------------------------------------------------------------------
45 # Globals and constants
46 #-----------------------------------------------------------------------------
47
48 #-----------------------------------------------------------------------------
49 # Public API
50 #-----------------------------------------------------------------------------
51
52 @public((1,0,0))
53 def autoload_static(model, resources, script_path):
54 ''' Return JavaScript code and a script tag that can be used to embed
55 Bokeh Plots.
56
57 The data for the plot is stored directly in the returned JavaScript code.
58
59 Args:
60 model (Model or Document) :
61
62 resources (Resources) :
63
64 script_path (str) :
65
66 Returns:
67 (js, tag) :
68 JavaScript code to be saved at ``script_path`` and a ``<script>``
69 tag to load it
70
71 Raises:
72 ValueError
73
74 '''
75 # TODO: maybe warn that it's not exactly useful, but technically possible
76 # if resources.mode == 'inline':
77 # raise ValueError("autoload_static() requires non-inline resources")
78
79 model = check_one_model_or_doc(model)
80
81 with _ModelInDocument([model]):
82 (docs_json, render_items) = standalone_docs_json_and_render_items([model])
83
84 bundle = bundle_all_models()
85 script = script_for_render_items(docs_json, render_items)
86 item = render_items[0]
87
88 js = wrap_in_onload(AUTOLOAD_JS.render(
89 js_urls = resources.js_files,
90 css_urls = resources.css_files,
91 js_raw = resources.js_raw + [bundle, script],
92 css_raw = resources.css_raw_str,
93 elementid = item['elementid'],
94 ))
95
96 tag = AUTOLOAD_TAG.render(
97 src_path = script_path,
98 elementid = item['elementid'],
99 modelid = item.get('modelid', ''),
100 docid = item.get('docid', ''),
101 )
102
103 return encode_utf8(js), encode_utf8(tag)
104
105 @public((1,0,0))
106 def components(models, wrap_script=True, wrap_plot_info=True, theme=FromCurdoc):
107 ''' Return HTML components to embed a Bokeh plot. The data for the plot is
108 stored directly in the returned HTML.
109
110 An example can be found in examples/embed/embed_multiple.py
111
112 .. note::
113 The returned components assume that BokehJS resources are
114 **already loaded**.
115
116 Args:
117 models (Model|list|dict|tuple) :
118 A single Model, a list/tuple of Models, or a dictionary of keys and Models.
119
120 wrap_script (boolean, optional) :
121 If True, the returned javascript is wrapped in a script tag.
122 (default: True)
123
124 wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings.
125 Otherwise, return dicts that can be used to build your own divs.
126 (default: True)
127
128 If False, the returned dictionary contains the following information:
129
130 .. code-block:: python
131
132 {
133 'modelid': 'The model ID, used with Document.get_model_by_id',
134 'elementid': 'The css identifier the BokehJS will look for to target the plot',
135 'docid': 'Used by Bokeh to find the doc embedded in the returned script',
136 }
137
138 theme (Theme, optional) :
139 Defaults to the ``Theme`` instance in the current document.
140 Setting this to ``None`` uses the default theme or the theme
141 already specified in the document. Any other value must be an
142 instance of the ``Theme`` class.
143
144 Returns:
145 UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])*
146
147 Examples:
148
149 With default wrapping parameter values:
150
151 .. code-block:: python
152
153 components(plot)
154 # => (script, plot_div)
155
156 components((plot1, plot2))
157 # => (script, (plot1_div, plot2_div))
158
159 components({"Plot 1": plot1, "Plot 2": plot2})
160 # => (script, {"Plot 1": plot1_div, "Plot 2": plot2_div})
161
162 Examples:
163
164 With wrapping parameters set to ``False``:
165
166 .. code-block:: python
167
168 components(plot, wrap_script=False, wrap_plot_info=False)
169 # => (javascript, plot_dict)
170
171 components((plot1, plot2), wrap_script=False, wrap_plot_info=False)
172 # => (javascript, (plot1_dict, plot2_dict))
173
174 components({"Plot 1": plot1, "Plot 2": plot2}, wrap_script=False, wrap_plot_info=False)
175 # => (javascript, {"Plot 1": plot1_dict, "Plot 2": plot2_dict})
176
177 '''
178 # 1) Convert single items and dicts into list
179
180 was_single_object = isinstance(models, Model) or isinstance(models, Document)
181 # converts single to list
182 models = check_models_or_docs(models, allow_dict=True)
183 # now convert dict to list, saving keys in the same order
184 model_keys = None
185 if isinstance(models, dict):
186 model_keys = models.keys()
187 values = []
188 # don't just use .values() to ensure we are in the same order as key list
189 for k in model_keys:
190 values.append(models[k])
191 models = values
192
193 # 2) Append models to one document. Either pre-existing or new and render
194 with _ModelInDocument(models, apply_theme=theme):
195 (docs_json, render_items) = standalone_docs_json_and_render_items(models)
196
197 script = bundle_all_models()
198 script += script_for_render_items(docs_json, render_items)
199 if wrap_script:
200 script = wrap_in_script_tag(script)
201 script = encode_utf8(script)
202
203 if wrap_plot_info:
204 results = list(div_for_render_item(item) for item in render_items)
205 else:
206 results = render_items
207
208 # 3) convert back to the input shape
209
210 if was_single_object:
211 return script, results[0]
212 elif model_keys is not None:
213 result = {}
214 for (key, value) in zip(model_keys, results):
215 result[key] = value
216 return script, result
217 else:
218 return script, tuple(results)
219
220 @public((1,0,0))
221 def file_html(models,
222 resources,
223 title=None,
224 template=FILE,
225 template_variables={},
226 theme=FromCurdoc):
227 ''' Return an HTML document that embeds Bokeh Model or Document objects.
228
229 The data for the plot is stored directly in the returned HTML, with
230 support for customizing the JS/CSS resources independently and
231 customizing the jinja2 template.
232
233 Args:
234 models (Model or Document or list) : Bokeh object or objects to render
235 typically a Model or Document
236
237 resources (Resources or tuple(JSResources or None, CSSResources or None)) : i
238 A resource configuration for Bokeh JS & CSS assets.
239
240 title (str, optional) : a title for the HTML document ``<title>`` tags or None. (default: None)
241 If None, attempt to automatically find the Document title from the given plot objects.
242
243 template (Template, optional) : HTML document template (default: FILE)
244 A Jinja2 Template, see bokeh.core.templates.FILE for the required
245 template parameters
246
247 template_variables (dict, optional) : variables to be used in the Jinja2
248 template. If used, the following variable names will be overwritten:
249 title, bokeh_js, bokeh_css, plot_script, plot_div
250
251 theme (Theme, optional) :
252 Defaults to the ``Theme`` instance in the current document.
253 Setting this to ``None`` uses the default theme or the theme
254 already specified in the document. Any other value must be an
255 instance of the ``Theme`` class.
256
257 Returns:
258 UTF-8 encoded HTML
259
260 '''
261 models = check_models_or_docs(models)
262
263 with _ModelInDocument(models, apply_theme=theme):
264 (docs_json, render_items) = standalone_docs_json_and_render_items(models)
265 title = _title_from_models(models, title)
266 bundle = bundle_for_objs_and_resources(models, resources)
267 return html_page_for_render_items(bundle, docs_json, render_items, title=title,
268 template=template, template_variables=template_variables)
269
270 #-----------------------------------------------------------------------------
271 # Internal API
272 #-----------------------------------------------------------------------------
273
274 #-----------------------------------------------------------------------------
275 # Private API
276 #-----------------------------------------------------------------------------
277
278 @contextmanager
279 def _ModelInDocument(models, apply_theme=None):
280 doc = find_existing_docs(models)
281 old_theme = doc.theme
282
283 if apply_theme is FromCurdoc:
284 from ..io import curdoc; curdoc
285 doc.theme = curdoc().theme
286 elif apply_theme is not None:
287 doc.theme = apply_theme
288
289 models_to_dedoc = _add_doc_to_models(doc, models)
290
291 if settings.perform_document_validation():
292 doc.validate()
293
294 yield doc
295
296 for model in models_to_dedoc:
297 doc.remove_root(model, apply_theme)
298 doc.theme = old_theme
299
300 def _add_doc_to_models(doc, models):
301 models_to_dedoc = []
302 for model in models:
303 if isinstance(model, Model):
304 if model.document is None:
305 try:
306 doc.add_root(model)
307 models_to_dedoc.append(model)
308 except RuntimeError as e:
309 child = re.search('\((.*)\)', str(e)).group(0)
310 msg = ('Sub-model {0} of the root model {1} is already owned '
311 'by another document (Models must be owned by only a '
312 'single document). This may indicate a usage '
313 'error.'.format(child, model))
314 raise RuntimeError(msg)
315 return models_to_dedoc
316
317 def _title_from_models(models, title):
318 # use override title
319 if title is not None:
320 return title
321
322 # use title from any listed document
323 for p in models:
324 if isinstance(p, Document):
325 return p.title
326
327 # use title from any model's document
328 for p in models:
329 if p.document is not None:
330 return p.document.title
331
332 # use default title
333 return DEFAULT_TITLE
334
335 #-----------------------------------------------------------------------------
336 # Code
337 #-----------------------------------------------------------------------------
338
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bokeh/embed/standalone.py b/bokeh/embed/standalone.py
--- a/bokeh/embed/standalone.py
+++ b/bokeh/embed/standalone.py
@@ -109,9 +109,25 @@
An example can be found in examples/embed/embed_multiple.py
- .. note::
- The returned components assume that BokehJS resources are
- **already loaded**.
+ The returned components assume that BokehJS resources are **already loaded**.
+ The html template in which they will be embedded needs to include the following
+ links and scripts tags. The widgets and tables resources are only necessary if
+ the components make use of widgets and tables.
+
+ .. code-block:: html
+ <link
+ href="http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.css"
+ rel="stylesheet" type="text/css">
+ <link
+ href="http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.css"
+ rel="stylesheet" type="text/css">
+ <link
+ href="http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.css"
+ rel="stylesheet" type="text/css">
+
+ <script src="http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.js"></script>
+ <script src="http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.js"></script>
+ <script src="http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.js"></script>
Args:
models (Model|list|dict|tuple) :
| {"golden_diff": "diff --git a/bokeh/embed/standalone.py b/bokeh/embed/standalone.py\n--- a/bokeh/embed/standalone.py\n+++ b/bokeh/embed/standalone.py\n@@ -109,9 +109,25 @@\n \n An example can be found in examples/embed/embed_multiple.py\n \n- .. note::\n- The returned components assume that BokehJS resources are\n- **already loaded**.\n+ The returned components assume that BokehJS resources are **already loaded**.\n+ The html template in which they will be embedded needs to include the following\n+ links and scripts tags. The widgets and tables resources are only necessary if\n+ the components make use of widgets and tables.\n+ \n+ .. code-block:: html\n+ <link\n+ href=\"http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.css\"\n+ rel=\"stylesheet\" type=\"text/css\">\n+ <link\n+ href=\"http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.css\"\n+ rel=\"stylesheet\" type=\"text/css\">\n+ <link\n+ href=\"http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.css\"\n+ rel=\"stylesheet\" type=\"text/css\">\n+\n+ <script src=\"http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.js\"></script>\n+ <script src=\"http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.js\"></script>\n+ <script src=\"http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.js\"></script>\n \n Args:\n models (Model|list|dict|tuple) :\n", "issue": "Incomplete docs re embedding when using data tables\nHi, I banged my head a bit trying to use `components` to render tables and failing while `file_html` would work well.\r\nTurns out we now need to include: \r\n```html\r\n<link rel=\"stylesheet\" href=\"https://cdn.pydata.org/bokeh/release/bokeh-tables-0.12.9.min.css\" type=\"text/css\" />\r\n<script type=\"text/javascript\" src=\"https://cdn.pydata.org/bokeh/release/bokeh-tables-0.12.9.min.js\"></script>\r\n```\r\nin order for data tables to render properly and this is not mentioned in the embed sections of the User Guide or the Reference.\r\n\r\nHaving a look right now how the docs are built to see if I can provide help with a PR if that's not fixed already.\r\n\r\nEDIT: Going through the docs to prepare a PR, I read in the 0.12.7 release notes that the change should be transparent as Bokeh would load the `-tables` resources automatically when necessary. It wasn't the case for me. But maybe it wasn't meant for the `components` use case?\r\n\r\nI will provide a PR later today. Just waiting for a feedback on that last question to see whether this paragraph in the 0.12.7 release notes should be reworded if `components` is not covered by the automatic process.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n'''\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\nfrom bokeh.util.api import public, internal ; public, internal\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nfrom contextlib import contextmanager\nimport re\n\n# External imports\n\n# Bokeh imports\nfrom ..core.templates import AUTOLOAD_JS, AUTOLOAD_TAG, FILE\nfrom ..document.document import DEFAULT_TITLE, Document\nfrom ..model import Model\nfrom ..settings import settings\nfrom ..util.compiler import bundle_all_models\nfrom ..util.string import encode_utf8\nfrom .bundle import bundle_for_objs_and_resources\nfrom .util import FromCurdoc\nfrom .util import (check_models_or_docs, check_one_model_or_doc, div_for_render_item, find_existing_docs, html_page_for_render_items,\n script_for_render_items, standalone_docs_json_and_render_items, wrap_in_onload, wrap_in_script_tag)\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Public API\n#-----------------------------------------------------------------------------\n\n@public((1,0,0))\ndef autoload_static(model, resources, script_path):\n ''' Return JavaScript code and a script tag that can be used to embed\n Bokeh Plots.\n\n The data for the plot is stored directly in the returned JavaScript code.\n\n Args:\n model (Model or Document) :\n\n resources (Resources) :\n\n script_path (str) :\n\n Returns:\n (js, tag) :\n JavaScript code to be saved at ``script_path`` and a ``<script>``\n tag to load it\n\n Raises:\n ValueError\n\n '''\n # TODO: maybe warn that it's not exactly useful, but technically possible\n # if resources.mode == 'inline':\n # raise ValueError(\"autoload_static() requires non-inline resources\")\n\n model = check_one_model_or_doc(model)\n\n with _ModelInDocument([model]):\n (docs_json, render_items) = standalone_docs_json_and_render_items([model])\n\n bundle = bundle_all_models()\n script = script_for_render_items(docs_json, render_items)\n item = render_items[0]\n\n js = wrap_in_onload(AUTOLOAD_JS.render(\n js_urls = resources.js_files,\n css_urls = resources.css_files,\n js_raw = resources.js_raw + [bundle, script],\n css_raw = resources.css_raw_str,\n elementid = item['elementid'],\n ))\n\n tag = AUTOLOAD_TAG.render(\n src_path = script_path,\n elementid = item['elementid'],\n modelid = item.get('modelid', ''),\n docid = item.get('docid', ''),\n )\n\n return encode_utf8(js), encode_utf8(tag)\n\n@public((1,0,0))\ndef components(models, wrap_script=True, wrap_plot_info=True, theme=FromCurdoc):\n ''' Return HTML components to embed a Bokeh plot. The data for the plot is\n stored directly in the returned HTML.\n\n An example can be found in examples/embed/embed_multiple.py\n\n .. note::\n The returned components assume that BokehJS resources are\n **already loaded**.\n\n Args:\n models (Model|list|dict|tuple) :\n A single Model, a list/tuple of Models, or a dictionary of keys and Models.\n\n wrap_script (boolean, optional) :\n If True, the returned javascript is wrapped in a script tag.\n (default: True)\n\n wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings.\n Otherwise, return dicts that can be used to build your own divs.\n (default: True)\n\n If False, the returned dictionary contains the following information:\n\n .. code-block:: python\n\n {\n 'modelid': 'The model ID, used with Document.get_model_by_id',\n 'elementid': 'The css identifier the BokehJS will look for to target the plot',\n 'docid': 'Used by Bokeh to find the doc embedded in the returned script',\n }\n\n theme (Theme, optional) :\n Defaults to the ``Theme`` instance in the current document.\n Setting this to ``None`` uses the default theme or the theme\n already specified in the document. Any other value must be an\n instance of the ``Theme`` class.\n\n Returns:\n UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])*\n\n Examples:\n\n With default wrapping parameter values:\n\n .. code-block:: python\n\n components(plot)\n # => (script, plot_div)\n\n components((plot1, plot2))\n # => (script, (plot1_div, plot2_div))\n\n components({\"Plot 1\": plot1, \"Plot 2\": plot2})\n # => (script, {\"Plot 1\": plot1_div, \"Plot 2\": plot2_div})\n\n Examples:\n\n With wrapping parameters set to ``False``:\n\n .. code-block:: python\n\n components(plot, wrap_script=False, wrap_plot_info=False)\n # => (javascript, plot_dict)\n\n components((plot1, plot2), wrap_script=False, wrap_plot_info=False)\n # => (javascript, (plot1_dict, plot2_dict))\n\n components({\"Plot 1\": plot1, \"Plot 2\": plot2}, wrap_script=False, wrap_plot_info=False)\n # => (javascript, {\"Plot 1\": plot1_dict, \"Plot 2\": plot2_dict})\n\n '''\n # 1) Convert single items and dicts into list\n\n was_single_object = isinstance(models, Model) or isinstance(models, Document)\n # converts single to list\n models = check_models_or_docs(models, allow_dict=True)\n # now convert dict to list, saving keys in the same order\n model_keys = None\n if isinstance(models, dict):\n model_keys = models.keys()\n values = []\n # don't just use .values() to ensure we are in the same order as key list\n for k in model_keys:\n values.append(models[k])\n models = values\n\n # 2) Append models to one document. Either pre-existing or new and render\n with _ModelInDocument(models, apply_theme=theme):\n (docs_json, render_items) = standalone_docs_json_and_render_items(models)\n\n script = bundle_all_models()\n script += script_for_render_items(docs_json, render_items)\n if wrap_script:\n script = wrap_in_script_tag(script)\n script = encode_utf8(script)\n\n if wrap_plot_info:\n results = list(div_for_render_item(item) for item in render_items)\n else:\n results = render_items\n\n # 3) convert back to the input shape\n\n if was_single_object:\n return script, results[0]\n elif model_keys is not None:\n result = {}\n for (key, value) in zip(model_keys, results):\n result[key] = value\n return script, result\n else:\n return script, tuple(results)\n\n@public((1,0,0))\ndef file_html(models,\n resources,\n title=None,\n template=FILE,\n template_variables={},\n theme=FromCurdoc):\n ''' Return an HTML document that embeds Bokeh Model or Document objects.\n\n The data for the plot is stored directly in the returned HTML, with\n support for customizing the JS/CSS resources independently and\n customizing the jinja2 template.\n\n Args:\n models (Model or Document or list) : Bokeh object or objects to render\n typically a Model or Document\n\n resources (Resources or tuple(JSResources or None, CSSResources or None)) : i\n A resource configuration for Bokeh JS & CSS assets.\n\n title (str, optional) : a title for the HTML document ``<title>`` tags or None. (default: None)\n If None, attempt to automatically find the Document title from the given plot objects.\n\n template (Template, optional) : HTML document template (default: FILE)\n A Jinja2 Template, see bokeh.core.templates.FILE for the required\n template parameters\n\n template_variables (dict, optional) : variables to be used in the Jinja2\n template. If used, the following variable names will be overwritten:\n title, bokeh_js, bokeh_css, plot_script, plot_div\n\n theme (Theme, optional) :\n Defaults to the ``Theme`` instance in the current document.\n Setting this to ``None`` uses the default theme or the theme\n already specified in the document. Any other value must be an\n instance of the ``Theme`` class.\n\n Returns:\n UTF-8 encoded HTML\n\n '''\n models = check_models_or_docs(models)\n\n with _ModelInDocument(models, apply_theme=theme):\n (docs_json, render_items) = standalone_docs_json_and_render_items(models)\n title = _title_from_models(models, title)\n bundle = bundle_for_objs_and_resources(models, resources)\n return html_page_for_render_items(bundle, docs_json, render_items, title=title,\n template=template, template_variables=template_variables)\n\n#-----------------------------------------------------------------------------\n# Internal API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n@contextmanager\ndef _ModelInDocument(models, apply_theme=None):\n doc = find_existing_docs(models)\n old_theme = doc.theme\n\n if apply_theme is FromCurdoc:\n from ..io import curdoc; curdoc\n doc.theme = curdoc().theme\n elif apply_theme is not None:\n doc.theme = apply_theme\n\n models_to_dedoc = _add_doc_to_models(doc, models)\n\n if settings.perform_document_validation():\n doc.validate()\n\n yield doc\n\n for model in models_to_dedoc:\n doc.remove_root(model, apply_theme)\n doc.theme = old_theme\n\ndef _add_doc_to_models(doc, models):\n models_to_dedoc = []\n for model in models:\n if isinstance(model, Model):\n if model.document is None:\n try:\n doc.add_root(model)\n models_to_dedoc.append(model)\n except RuntimeError as e:\n child = re.search('\\((.*)\\)', str(e)).group(0)\n msg = ('Sub-model {0} of the root model {1} is already owned '\n 'by another document (Models must be owned by only a '\n 'single document). This may indicate a usage '\n 'error.'.format(child, model))\n raise RuntimeError(msg)\n return models_to_dedoc\n\ndef _title_from_models(models, title):\n # use override title\n if title is not None:\n return title\n\n # use title from any listed document\n for p in models:\n if isinstance(p, Document):\n return p.title\n\n # use title from any model's document\n for p in models:\n if p.document is not None:\n return p.document.title\n\n # use default title\n return DEFAULT_TITLE\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/embed/standalone.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.\n#\n# Powered by the Bokeh Development Team.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n'''\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\nfrom bokeh.util.api import public, internal ; public, internal\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nfrom contextlib import contextmanager\nimport re\n\n# External imports\n\n# Bokeh imports\nfrom ..core.templates import AUTOLOAD_JS, AUTOLOAD_TAG, FILE\nfrom ..document.document import DEFAULT_TITLE, Document\nfrom ..model import Model\nfrom ..settings import settings\nfrom ..util.compiler import bundle_all_models\nfrom ..util.string import encode_utf8\nfrom .bundle import bundle_for_objs_and_resources\nfrom .util import FromCurdoc\nfrom .util import (check_models_or_docs, check_one_model_or_doc, div_for_render_item, find_existing_docs, html_page_for_render_items,\n script_for_render_items, standalone_docs_json_and_render_items, wrap_in_onload, wrap_in_script_tag)\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Public API\n#-----------------------------------------------------------------------------\n\n@public((1,0,0))\ndef autoload_static(model, resources, script_path):\n ''' Return JavaScript code and a script tag that can be used to embed\n Bokeh Plots.\n\n The data for the plot is stored directly in the returned JavaScript code.\n\n Args:\n model (Model or Document) :\n\n resources (Resources) :\n\n script_path (str) :\n\n Returns:\n (js, tag) :\n JavaScript code to be saved at ``script_path`` and a ``<script>``\n tag to load it\n\n Raises:\n ValueError\n\n '''\n # TODO: maybe warn that it's not exactly useful, but technically possible\n # if resources.mode == 'inline':\n # raise ValueError(\"autoload_static() requires non-inline resources\")\n\n model = check_one_model_or_doc(model)\n\n with _ModelInDocument([model]):\n (docs_json, render_items) = standalone_docs_json_and_render_items([model])\n\n bundle = bundle_all_models()\n script = script_for_render_items(docs_json, render_items)\n item = render_items[0]\n\n js = wrap_in_onload(AUTOLOAD_JS.render(\n js_urls = resources.js_files,\n css_urls = resources.css_files,\n js_raw = resources.js_raw + [bundle, script],\n css_raw = resources.css_raw_str,\n elementid = item['elementid'],\n ))\n\n tag = AUTOLOAD_TAG.render(\n src_path = script_path,\n elementid = item['elementid'],\n modelid = item.get('modelid', ''),\n docid = item.get('docid', ''),\n )\n\n return encode_utf8(js), encode_utf8(tag)\n\n@public((1,0,0))\ndef components(models, wrap_script=True, wrap_plot_info=True, theme=FromCurdoc):\n ''' Return HTML components to embed a Bokeh plot. The data for the plot is\n stored directly in the returned HTML.\n\n An example can be found in examples/embed/embed_multiple.py\n\n The returned components assume that BokehJS resources are **already loaded**.\n The html template in which they will be embedded needs to include the following\n links and scripts tags. The widgets and tables resources are only necessary if\n the components make use of widgets and tables.\n \n .. code-block:: html\n <link\n href=\"http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.css\"\n rel=\"stylesheet\" type=\"text/css\">\n <link\n href=\"http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.css\"\n rel=\"stylesheet\" type=\"text/css\">\n <link\n href=\"http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.css\"\n rel=\"stylesheet\" type=\"text/css\">\n\n <script src=\"http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.js\"></script>\n <script src=\"http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.js\"></script>\n <script src=\"http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.js\"></script>\n\n Args:\n models (Model|list|dict|tuple) :\n A single Model, a list/tuple of Models, or a dictionary of keys and Models.\n\n wrap_script (boolean, optional) :\n If True, the returned javascript is wrapped in a script tag.\n (default: True)\n\n wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings.\n Otherwise, return dicts that can be used to build your own divs.\n (default: True)\n\n If False, the returned dictionary contains the following information:\n\n .. code-block:: python\n\n {\n 'modelid': 'The model ID, used with Document.get_model_by_id',\n 'elementid': 'The css identifier the BokehJS will look for to target the plot',\n 'docid': 'Used by Bokeh to find the doc embedded in the returned script',\n }\n\n theme (Theme, optional) :\n Defaults to the ``Theme`` instance in the current document.\n Setting this to ``None`` uses the default theme or the theme\n already specified in the document. Any other value must be an\n instance of the ``Theme`` class.\n\n Returns:\n UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])*\n\n Examples:\n\n With default wrapping parameter values:\n\n .. code-block:: python\n\n components(plot)\n # => (script, plot_div)\n\n components((plot1, plot2))\n # => (script, (plot1_div, plot2_div))\n\n components({\"Plot 1\": plot1, \"Plot 2\": plot2})\n # => (script, {\"Plot 1\": plot1_div, \"Plot 2\": plot2_div})\n\n Examples:\n\n With wrapping parameters set to ``False``:\n\n .. code-block:: python\n\n components(plot, wrap_script=False, wrap_plot_info=False)\n # => (javascript, plot_dict)\n\n components((plot1, plot2), wrap_script=False, wrap_plot_info=False)\n # => (javascript, (plot1_dict, plot2_dict))\n\n components({\"Plot 1\": plot1, \"Plot 2\": plot2}, wrap_script=False, wrap_plot_info=False)\n # => (javascript, {\"Plot 1\": plot1_dict, \"Plot 2\": plot2_dict})\n\n '''\n # 1) Convert single items and dicts into list\n\n was_single_object = isinstance(models, Model) or isinstance(models, Document)\n # converts single to list\n models = check_models_or_docs(models, allow_dict=True)\n # now convert dict to list, saving keys in the same order\n model_keys = None\n if isinstance(models, dict):\n model_keys = models.keys()\n values = []\n # don't just use .values() to ensure we are in the same order as key list\n for k in model_keys:\n values.append(models[k])\n models = values\n\n # 2) Append models to one document. Either pre-existing or new and render\n with _ModelInDocument(models, apply_theme=theme):\n (docs_json, render_items) = standalone_docs_json_and_render_items(models)\n\n script = bundle_all_models()\n script += script_for_render_items(docs_json, render_items)\n if wrap_script:\n script = wrap_in_script_tag(script)\n script = encode_utf8(script)\n\n if wrap_plot_info:\n results = list(div_for_render_item(item) for item in render_items)\n else:\n results = render_items\n\n # 3) convert back to the input shape\n\n if was_single_object:\n return script, results[0]\n elif model_keys is not None:\n result = {}\n for (key, value) in zip(model_keys, results):\n result[key] = value\n return script, result\n else:\n return script, tuple(results)\n\n@public((1,0,0))\ndef file_html(models,\n resources,\n title=None,\n template=FILE,\n template_variables={},\n theme=FromCurdoc):\n ''' Return an HTML document that embeds Bokeh Model or Document objects.\n\n The data for the plot is stored directly in the returned HTML, with\n support for customizing the JS/CSS resources independently and\n customizing the jinja2 template.\n\n Args:\n models (Model or Document or list) : Bokeh object or objects to render\n typically a Model or Document\n\n resources (Resources or tuple(JSResources or None, CSSResources or None)) : i\n A resource configuration for Bokeh JS & CSS assets.\n\n title (str, optional) : a title for the HTML document ``<title>`` tags or None. (default: None)\n If None, attempt to automatically find the Document title from the given plot objects.\n\n template (Template, optional) : HTML document template (default: FILE)\n A Jinja2 Template, see bokeh.core.templates.FILE for the required\n template parameters\n\n template_variables (dict, optional) : variables to be used in the Jinja2\n template. If used, the following variable names will be overwritten:\n title, bokeh_js, bokeh_css, plot_script, plot_div\n\n theme (Theme, optional) :\n Defaults to the ``Theme`` instance in the current document.\n Setting this to ``None`` uses the default theme or the theme\n already specified in the document. Any other value must be an\n instance of the ``Theme`` class.\n\n Returns:\n UTF-8 encoded HTML\n\n '''\n models = check_models_or_docs(models)\n\n with _ModelInDocument(models, apply_theme=theme):\n (docs_json, render_items) = standalone_docs_json_and_render_items(models)\n title = _title_from_models(models, title)\n bundle = bundle_for_objs_and_resources(models, resources)\n return html_page_for_render_items(bundle, docs_json, render_items, title=title,\n template=template, template_variables=template_variables)\n\n#-----------------------------------------------------------------------------\n# Internal API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n@contextmanager\ndef _ModelInDocument(models, apply_theme=None):\n doc = find_existing_docs(models)\n old_theme = doc.theme\n\n if apply_theme is FromCurdoc:\n from ..io import curdoc; curdoc\n doc.theme = curdoc().theme\n elif apply_theme is not None:\n doc.theme = apply_theme\n\n models_to_dedoc = _add_doc_to_models(doc, models)\n\n if settings.perform_document_validation():\n doc.validate()\n\n yield doc\n\n for model in models_to_dedoc:\n doc.remove_root(model, apply_theme)\n doc.theme = old_theme\n\ndef _add_doc_to_models(doc, models):\n models_to_dedoc = []\n for model in models:\n if isinstance(model, Model):\n if model.document is None:\n try:\n doc.add_root(model)\n models_to_dedoc.append(model)\n except RuntimeError as e:\n child = re.search('\\((.*)\\)', str(e)).group(0)\n msg = ('Sub-model {0} of the root model {1} is already owned '\n 'by another document (Models must be owned by only a '\n 'single document). This may indicate a usage '\n 'error.'.format(child, model))\n raise RuntimeError(msg)\n return models_to_dedoc\n\ndef _title_from_models(models, title):\n # use override title\n if title is not None:\n return title\n\n # use title from any listed document\n for p in models:\n if isinstance(p, Document):\n return p.title\n\n # use title from any model's document\n for p in models:\n if p.document is not None:\n return p.document.title\n\n # use default title\n return DEFAULT_TITLE\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/embed/standalone.py"}]} | 3,938 | 391 |
gh_patches_debug_38587 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1232 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor function-based view to class-based
### Idea summary
Rewrite function-based view to class-based and add docstring
### Further details
In the accounts app's view, there is a function-based view called user_profile:
```python
@login_required
@full_profile
def user_profile(request, username=None):
if request.method == "GET":
if not username:
return HttpResponseRedirect(f"/profile/{request.user}")
else:
is_owner = username == request.user.username
try:
user = get_user_model().objects.get(username=username)
except get_user_model().DoesNotExist:
return HttpResponseRedirect("/404")
form = ProfileEditForm(
initial={
"username": user.username,
"email": user.email,
"first_name": user.profile.first_name or None,
"last_name": user.profile.last_name or None,
"about_me": user.profile.about_me or None,
},
readonly=True,
)
data = {
"username": user,
"profile_image_form": UpdateProfileImage,
"form": form if is_owner else None,
"readonly": True,
}
return TemplateResponse(request, "account.html", data)
```
All views in this file are created using classes, so I think it will be great to rewrite this view for better sticking to the code style. Moreover, this view has no docstring. I'm new to contributing to Open Source. I want to solve this issue, and I know-how.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/accounts/urls/urls.py`
Content:
```
1 from django.urls import path
2 from django.contrib.auth import views as auth_views
3 from accounts.views import (
4 RegisterView,
5 SettingsView,
6 ProfileActivationView,
7 PasswordResetView,
8 PasswordResetDoneView,
9 PasswordResetConfirmView,
10 PasswordResetCompleteView,
11 ProfileSetupView,
12 user_profile,
13 )
14
15 urlpatterns = [
16 path(
17 "login/",
18 auth_views.LoginView.as_view(template_name="accounts/register/login.html"),
19 name="accounts_login",
20 ),
21 path("logout/", auth_views.LogoutView.as_view(), name="accounts_logout"),
22 path("register/", RegisterView.as_view(), name="accounts_register"),
23 path("settings/", SettingsView.as_view(), name="accounts_settings"),
24 path("setup/", ProfileSetupView.as_view(), name="accounts_profile_setup"),
25 path("profile/<str:username>/", user_profile, name="profile"),
26 path(
27 "activate_account/<uidb64>/<token>/",
28 ProfileActivationView.as_view(),
29 name="accounts_activate",
30 ),
31 path(
32 "accounts/password_reset/",
33 PasswordResetView.as_view(),
34 name="accounts_password_reset",
35 ),
36 path(
37 "accounts/password_reset_done/",
38 PasswordResetDoneView.as_view(),
39 name="accounts_password_reset_done",
40 ),
41 path(
42 "accounts/password_reset_confirm/<uidb64>/<token>/",
43 PasswordResetConfirmView.as_view(),
44 name="accounts_password_reset_confirm",
45 ),
46 path(
47 "accounts/password_reset_complete/",
48 PasswordResetCompleteView.as_view(),
49 name="accounts_password_reset_complete",
50 ),
51 ]
52
```
Path: `project/accounts/views.py`
Content:
```
1 """
2 Class based views.
3
4 This module will include views for the accounts app.
5 """
6
7 from core.custom_decorators import full_profile, login_required
8 from django.conf import settings
9 from django.contrib.auth import get_user_model, login
10 from django.contrib.auth import views as auth_views
11 from django.contrib.auth.mixins import LoginRequiredMixin
12 from django.contrib.sites.shortcuts import get_current_site
13 from django.http import HttpResponseRedirect
14 from django.template.response import TemplateResponse
15 from django.urls import reverse_lazy
16 from django.utils.encoding import force_str
17 from django.utils.http import urlsafe_base64_decode
18 from django.views import View
19 from django.views.generic.edit import FormView, UpdateView
20
21 from accounts.authentication import account_activation_token, send_activation_email
22 from accounts.forms import ProfileEditForm, UpdateProfileImage, UserRegistrationForm
23 from accounts.models import Profile
24
25
26 class RegisterView(FormView):
27 """
28 A form view that handles user registration.
29 """
30
31 template_name = "accounts/register/register.html"
32 form_class = UserRegistrationForm
33 success_url = "/"
34
35 def _create_user(self, form):
36 username = form.cleaned_data["username"]
37 password = form.cleaned_data["password"]
38 email = form.cleaned_data["email"]
39 user = get_user_model().objects.create_user(username, email, password)
40 return user
41
42 def _send_email(self, user):
43 domain = get_current_site(self.request).domain
44 send_activation_email(user, domain)
45
46 def _login(self, user):
47 login(self.request, user)
48
49 def form_valid(self, form):
50 user = self._create_user(form)
51
52 self._send_email(user)
53 self._login(user)
54
55 return super(RegisterView, self).form_valid(form)
56
57
58 class PasswordResetView(auth_views.PasswordResetView):
59 template_name = "accounts/users/password_reset.html"
60 email_template_name = "accounts/users/password_reset_email.html"
61 subject_template_name = "accounts/users/password_reset_subject.txt"
62 from_email = settings.EMAIL_HOST_USER
63 success_url = reverse_lazy("accounts_password_reset_done")
64
65
66 class PasswordResetDoneView(auth_views.PasswordResetDoneView):
67 template_name = "accounts/users/password_reset_done.html"
68
69
70 class PasswordResetConfirmView(auth_views.PasswordResetConfirmView):
71 template_name = "accounts/users/password_reset_confirm.html"
72 success_url = reverse_lazy("accounts_password_reset_complete")
73
74
75 class PasswordResetCompleteView(auth_views.PasswordResetCompleteView):
76 template_name = "accounts/users/password_reset_complete.html"
77
78
79 class SettingsView(LoginRequiredMixin, UpdateView):
80 """A form view to edit Profile"""
81
82 login_url = "accounts_login"
83 form_class = ProfileEditForm
84 success_url = reverse_lazy("accounts_settings")
85 template_name = "accounts/update_settings.html"
86
87 def get_object(self, queryset=None):
88 return Profile.objects.get(user=self.request.user)
89
90 def get_initial(self):
91 profile = Profile.objects.get(user=self.request.user)
92 self.initial.update(
93 {
94 "username": profile.user.username,
95 "email": profile.user.email,
96 "first_name": profile.first_name or None,
97 "last_name": profile.last_name or None,
98 "about_me": profile.about_me or None,
99 }
100 )
101 return super(SettingsView, self).get_initial()
102
103
104 class ProfileActivationView(View):
105 """
106 This shows different views to the user when they are verifying
107 their account based on whether they are already verified or not.
108 """
109
110 def get(self, request, uidb64, token):
111
112 try:
113 uid = force_str(urlsafe_base64_decode(uidb64))
114 user = get_user_model().objects.get(pk=uid)
115
116 except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):
117 user = None
118
119 if user is not None and account_activation_token.check_token(user, token):
120 profile = user.profile
121 if profile.is_verified:
122 redirect_link = {"href": "/", "label": "Back to Main"}
123 template_var = {
124 "title": "Email Already Verified",
125 "content": "You have already verified your email",
126 "link": redirect_link,
127 }
128 else:
129 profile.is_verified = True
130 profile.save()
131
132 redirect_link = {"href": "/", "label": "Back to Main"}
133 template_var = {
134 "title": "Email Verification Successful",
135 "content": "Thank you for verifying your email with CiviWiki",
136 "link": redirect_link,
137 }
138 else:
139 # invalid link
140 redirect_link = {"href": "/", "label": "Back to Main"}
141 template_var = {
142 "title": "Email Verification Error",
143 "content": "Email could not be verified",
144 "link": redirect_link,
145 }
146
147 return TemplateResponse(request, "general_message.html", template_var)
148
149
150 class ProfileSetupView(LoginRequiredMixin, View):
151 """A view to make the user profile full_profile"""
152
153 login_url = "accounts_login"
154
155 def get(self, request):
156 profile = Profile.objects.get(user=request.user)
157 if profile.full_profile:
158 return HttpResponseRedirect("/")
159 # start temp rep rendering TODO: REMOVE THIS
160 else:
161 data = {
162 "username": request.user.username,
163 "email": request.user.email,
164 }
165 return TemplateResponse(request, "accounts/user-setup.html", data)
166
167
168 @login_required
169 @full_profile
170 def user_profile(request, username=None):
171 if request.method == "GET":
172 if not username:
173 return HttpResponseRedirect(f"/profile/{request.user}")
174 else:
175 is_owner = username == request.user.username
176 try:
177 user = get_user_model().objects.get(username=username)
178 except get_user_model().DoesNotExist:
179 return HttpResponseRedirect("/404")
180
181 form = ProfileEditForm(
182 initial={
183 "username": user.username,
184 "email": user.email,
185 "first_name": user.profile.first_name or None,
186 "last_name": user.profile.last_name or None,
187 "about_me": user.profile.about_me or None,
188 },
189 readonly=True,
190 )
191 data = {
192 "username": user,
193 "profile_image_form": UpdateProfileImage,
194 "form": form if is_owner else None,
195 "readonly": True,
196 }
197 return TemplateResponse(request, "account.html", data)
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/project/accounts/urls/urls.py b/project/accounts/urls/urls.py
--- a/project/accounts/urls/urls.py
+++ b/project/accounts/urls/urls.py
@@ -9,7 +9,7 @@
PasswordResetConfirmView,
PasswordResetCompleteView,
ProfileSetupView,
- user_profile,
+ UserProfileView,
)
urlpatterns = [
@@ -22,7 +22,7 @@
path("register/", RegisterView.as_view(), name="accounts_register"),
path("settings/", SettingsView.as_view(), name="accounts_settings"),
path("setup/", ProfileSetupView.as_view(), name="accounts_profile_setup"),
- path("profile/<str:username>/", user_profile, name="profile"),
+ path("profile/<str:username>/", UserProfileView.as_view(), name="profile"),
path(
"activate_account/<uidb64>/<token>/",
ProfileActivationView.as_view(),
diff --git a/project/accounts/views.py b/project/accounts/views.py
--- a/project/accounts/views.py
+++ b/project/accounts/views.py
@@ -4,7 +4,7 @@
This module will include views for the accounts app.
"""
-from core.custom_decorators import full_profile, login_required
+from core.custom_decorators import full_profile
from django.conf import settings
from django.contrib.auth import get_user_model, login
from django.contrib.auth import views as auth_views
@@ -15,6 +15,7 @@
from django.urls import reverse_lazy
from django.utils.encoding import force_str
from django.utils.http import urlsafe_base64_decode
+from django.utils.decorators import method_decorator
from django.views import View
from django.views.generic.edit import FormView, UpdateView
@@ -165,16 +166,18 @@
return TemplateResponse(request, "accounts/user-setup.html", data)
-@login_required
-@full_profile
-def user_profile(request, username=None):
- if request.method == "GET":
+class UserProfileView(LoginRequiredMixin, View):
+ """A view that shows profile for authorized users"""
+
+ @method_decorator(full_profile)
+ def get(self, request, username=None):
if not username:
return HttpResponseRedirect(f"/profile/{request.user}")
else:
is_owner = username == request.user.username
try:
user = get_user_model().objects.get(username=username)
+
except get_user_model().DoesNotExist:
return HttpResponseRedirect("/404")
| {"golden_diff": "diff --git a/project/accounts/urls/urls.py b/project/accounts/urls/urls.py\n--- a/project/accounts/urls/urls.py\n+++ b/project/accounts/urls/urls.py\n@@ -9,7 +9,7 @@\n PasswordResetConfirmView,\n PasswordResetCompleteView,\n ProfileSetupView,\n- user_profile,\n+ UserProfileView,\n )\n \n urlpatterns = [\n@@ -22,7 +22,7 @@\n path(\"register/\", RegisterView.as_view(), name=\"accounts_register\"),\n path(\"settings/\", SettingsView.as_view(), name=\"accounts_settings\"),\n path(\"setup/\", ProfileSetupView.as_view(), name=\"accounts_profile_setup\"),\n- path(\"profile/<str:username>/\", user_profile, name=\"profile\"),\n+ path(\"profile/<str:username>/\", UserProfileView.as_view(), name=\"profile\"),\n path(\n \"activate_account/<uidb64>/<token>/\",\n ProfileActivationView.as_view(),\ndiff --git a/project/accounts/views.py b/project/accounts/views.py\n--- a/project/accounts/views.py\n+++ b/project/accounts/views.py\n@@ -4,7 +4,7 @@\n This module will include views for the accounts app.\n \"\"\"\n \n-from core.custom_decorators import full_profile, login_required\n+from core.custom_decorators import full_profile\n from django.conf import settings\n from django.contrib.auth import get_user_model, login\n from django.contrib.auth import views as auth_views\n@@ -15,6 +15,7 @@\n from django.urls import reverse_lazy\n from django.utils.encoding import force_str\n from django.utils.http import urlsafe_base64_decode\n+from django.utils.decorators import method_decorator\n from django.views import View\n from django.views.generic.edit import FormView, UpdateView\n \n@@ -165,16 +166,18 @@\n return TemplateResponse(request, \"accounts/user-setup.html\", data)\n \n \n-@login_required\n-@full_profile\n-def user_profile(request, username=None):\n- if request.method == \"GET\":\n+class UserProfileView(LoginRequiredMixin, View):\n+ \"\"\"A view that shows profile for authorized users\"\"\"\n+\n+ @method_decorator(full_profile)\n+ def get(self, request, username=None):\n if not username:\n return HttpResponseRedirect(f\"/profile/{request.user}\")\n else:\n is_owner = username == request.user.username\n try:\n user = get_user_model().objects.get(username=username)\n+\n except get_user_model().DoesNotExist:\n return HttpResponseRedirect(\"/404\")\n", "issue": "Refactor function-based view to class-based\n### Idea summary\n\nRewrite function-based view to class-based and add docstring\n\n### Further details\n\nIn the accounts app's view, there is a function-based view called user_profile: \r\n```python\r\n@login_required\r\n@full_profile\r\ndef user_profile(request, username=None):\r\n if request.method == \"GET\":\r\n if not username:\r\n return HttpResponseRedirect(f\"/profile/{request.user}\")\r\n else:\r\n is_owner = username == request.user.username\r\n try:\r\n user = get_user_model().objects.get(username=username)\r\n except get_user_model().DoesNotExist:\r\n return HttpResponseRedirect(\"/404\")\r\n\r\n form = ProfileEditForm(\r\n initial={\r\n \"username\": user.username,\r\n \"email\": user.email,\r\n \"first_name\": user.profile.first_name or None,\r\n \"last_name\": user.profile.last_name or None,\r\n \"about_me\": user.profile.about_me or None,\r\n },\r\n readonly=True,\r\n )\r\n data = {\r\n \"username\": user,\r\n \"profile_image_form\": UpdateProfileImage,\r\n \"form\": form if is_owner else None,\r\n \"readonly\": True,\r\n }\r\n return TemplateResponse(request, \"account.html\", data)\r\n```\r\nAll views in this file are created using classes, so I think it will be great to rewrite this view for better sticking to the code style. Moreover, this view has no docstring. I'm new to contributing to Open Source. I want to solve this issue, and I know-how. \n", "before_files": [{"content": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom accounts.views import (\n RegisterView,\n SettingsView,\n ProfileActivationView,\n PasswordResetView,\n PasswordResetDoneView,\n PasswordResetConfirmView,\n PasswordResetCompleteView,\n ProfileSetupView,\n user_profile,\n)\n\nurlpatterns = [\n path(\n \"login/\",\n auth_views.LoginView.as_view(template_name=\"accounts/register/login.html\"),\n name=\"accounts_login\",\n ),\n path(\"logout/\", auth_views.LogoutView.as_view(), name=\"accounts_logout\"),\n path(\"register/\", RegisterView.as_view(), name=\"accounts_register\"),\n path(\"settings/\", SettingsView.as_view(), name=\"accounts_settings\"),\n path(\"setup/\", ProfileSetupView.as_view(), name=\"accounts_profile_setup\"),\n path(\"profile/<str:username>/\", user_profile, name=\"profile\"),\n path(\n \"activate_account/<uidb64>/<token>/\",\n ProfileActivationView.as_view(),\n name=\"accounts_activate\",\n ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\n name=\"accounts_password_reset\",\n ),\n path(\n \"accounts/password_reset_done/\",\n PasswordResetDoneView.as_view(),\n name=\"accounts_password_reset_done\",\n ),\n path(\n \"accounts/password_reset_confirm/<uidb64>/<token>/\",\n PasswordResetConfirmView.as_view(),\n name=\"accounts_password_reset_confirm\",\n ),\n path(\n \"accounts/password_reset_complete/\",\n PasswordResetCompleteView.as_view(),\n name=\"accounts_password_reset_complete\",\n ),\n]\n", "path": "project/accounts/urls/urls.py"}, {"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom core.custom_decorators import full_profile, login_required\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UpdateProfileImage, UserRegistrationForm\nfrom accounts.models import Profile\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/update_settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n if profile.is_verified:\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Already Verified\",\n \"content\": \"You have already verified your email\",\n \"link\": redirect_link,\n }\n else:\n profile.is_verified = True\n profile.save()\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Successful\",\n \"content\": \"Thank you for verifying your email with CiviWiki\",\n \"link\": redirect_link,\n }\n else:\n # invalid link\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Error\",\n \"content\": \"Email could not be verified\",\n \"link\": redirect_link,\n }\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass ProfileSetupView(LoginRequiredMixin, View):\n \"\"\"A view to make the user profile full_profile\"\"\"\n\n login_url = \"accounts_login\"\n\n def get(self, request):\n profile = Profile.objects.get(user=request.user)\n if profile.full_profile:\n return HttpResponseRedirect(\"/\")\n # start temp rep rendering TODO: REMOVE THIS\n else:\n data = {\n \"username\": request.user.username,\n \"email\": request.user.email,\n }\n return TemplateResponse(request, \"accounts/user-setup.html\", data)\n\n\n@login_required\n@full_profile\ndef user_profile(request, username=None):\n if request.method == \"GET\":\n if not username:\n return HttpResponseRedirect(f\"/profile/{request.user}\")\n else:\n is_owner = username == request.user.username\n try:\n user = get_user_model().objects.get(username=username)\n except get_user_model().DoesNotExist:\n return HttpResponseRedirect(\"/404\")\n\n form = ProfileEditForm(\n initial={\n \"username\": user.username,\n \"email\": user.email,\n \"first_name\": user.profile.first_name or None,\n \"last_name\": user.profile.last_name or None,\n \"about_me\": user.profile.about_me or None,\n },\n readonly=True,\n )\n data = {\n \"username\": user,\n \"profile_image_form\": UpdateProfileImage,\n \"form\": form if is_owner else None,\n \"readonly\": True,\n }\n return TemplateResponse(request, \"account.html\", data)\n", "path": "project/accounts/views.py"}], "after_files": [{"content": "from django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom accounts.views import (\n RegisterView,\n SettingsView,\n ProfileActivationView,\n PasswordResetView,\n PasswordResetDoneView,\n PasswordResetConfirmView,\n PasswordResetCompleteView,\n ProfileSetupView,\n UserProfileView,\n)\n\nurlpatterns = [\n path(\n \"login/\",\n auth_views.LoginView.as_view(template_name=\"accounts/register/login.html\"),\n name=\"accounts_login\",\n ),\n path(\"logout/\", auth_views.LogoutView.as_view(), name=\"accounts_logout\"),\n path(\"register/\", RegisterView.as_view(), name=\"accounts_register\"),\n path(\"settings/\", SettingsView.as_view(), name=\"accounts_settings\"),\n path(\"setup/\", ProfileSetupView.as_view(), name=\"accounts_profile_setup\"),\n path(\"profile/<str:username>/\", UserProfileView.as_view(), name=\"profile\"),\n path(\n \"activate_account/<uidb64>/<token>/\",\n ProfileActivationView.as_view(),\n name=\"accounts_activate\",\n ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\n name=\"accounts_password_reset\",\n ),\n path(\n \"accounts/password_reset_done/\",\n PasswordResetDoneView.as_view(),\n name=\"accounts_password_reset_done\",\n ),\n path(\n \"accounts/password_reset_confirm/<uidb64>/<token>/\",\n PasswordResetConfirmView.as_view(),\n name=\"accounts_password_reset_confirm\",\n ),\n path(\n \"accounts/password_reset_complete/\",\n PasswordResetCompleteView.as_view(),\n name=\"accounts_password_reset_complete\",\n ),\n]\n", "path": "project/accounts/urls/urls.py"}, {"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom core.custom_decorators import full_profile\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UpdateProfileImage, UserRegistrationForm\nfrom accounts.models import Profile\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/update_settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n if profile.is_verified:\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Already Verified\",\n \"content\": \"You have already verified your email\",\n \"link\": redirect_link,\n }\n else:\n profile.is_verified = True\n profile.save()\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Successful\",\n \"content\": \"Thank you for verifying your email with CiviWiki\",\n \"link\": redirect_link,\n }\n else:\n # invalid link\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Error\",\n \"content\": \"Email could not be verified\",\n \"link\": redirect_link,\n }\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass ProfileSetupView(LoginRequiredMixin, View):\n \"\"\"A view to make the user profile full_profile\"\"\"\n\n login_url = \"accounts_login\"\n\n def get(self, request):\n profile = Profile.objects.get(user=request.user)\n if profile.full_profile:\n return HttpResponseRedirect(\"/\")\n # start temp rep rendering TODO: REMOVE THIS\n else:\n data = {\n \"username\": request.user.username,\n \"email\": request.user.email,\n }\n return TemplateResponse(request, \"accounts/user-setup.html\", data)\n\n\nclass UserProfileView(LoginRequiredMixin, View):\n \"\"\"A view that shows profile for authorized users\"\"\"\n\n @method_decorator(full_profile)\n def get(self, request, username=None):\n if not username:\n return HttpResponseRedirect(f\"/profile/{request.user}\")\n else:\n is_owner = username == request.user.username\n try:\n user = get_user_model().objects.get(username=username)\n\n except get_user_model().DoesNotExist:\n return HttpResponseRedirect(\"/404\")\n\n form = ProfileEditForm(\n initial={\n \"username\": user.username,\n \"email\": user.email,\n \"first_name\": user.profile.first_name or None,\n \"last_name\": user.profile.last_name or None,\n \"about_me\": user.profile.about_me or None,\n },\n readonly=True,\n )\n data = {\n \"username\": user,\n \"profile_image_form\": UpdateProfileImage,\n \"form\": form if is_owner else None,\n \"readonly\": True,\n }\n return TemplateResponse(request, \"account.html\", data)\n", "path": "project/accounts/views.py"}]} | 2,843 | 527 |
gh_patches_debug_22239 | rasdani/github-patches | git_diff | dotkom__onlineweb4-2359 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Atendee list bug when adding or removing users
<!-- If this is a security issue or information leakage (having access to something you (probably) shouldn't), please send an email rather than opening a public issue. -->
## What kind of an issue is this?
- Bug report
## What is the expected behaviour?
It should look normal where every box and number is under its corresponding column. It should just look normal. Even if we remove users or add users through the dashboard, nothing should change.

## What is the current behaviour?
If you remove or add a user through the dashboard menu at the attendee list it will look like the screenshot above. We have randomly two boxes, the x for removing users is all the way to the right and the text "none" is occupying the remove column. If you refresh the site it will go back to expected behaviour, its only after deleteing/adding a user

<!-- if this is a bug report -->
## How do you reproduce this problem?
Remove or add a user to the attendee list manually.
<!-- if this is a bug report -->
<!-- provide steps to reproduce this problem, preferably in a bullet point list -->
1. go to the attendee list
2. Add a user by writing their name OR remove a user from the list
## Other information
This might be a bug which I didn't catch when I added "year of study" to the attendee list. I'm not sure if this was an issue before, but since it hasn't been brought up I will assume this is a bug from that pull request of mine
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/events/dashboard/utils.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from django.urls import reverse
3
4 from apps.authentication.models import OnlineUser as User
5 from apps.events.models import Attendee, Event
6
7
8 def _get_attendee(attendee_id):
9 try:
10 return Attendee.objects.get(pk=attendee_id)
11 except Attendee.DoesNotExist:
12 return None
13
14
15 def event_ajax_handler(event: Event, request):
16 action = request.POST.get('action')
17 administrating_user = request.user
18 attendee_id = request.POST.get('attendee_id')
19 user_id = request.POST.get('user_id')
20
21 if action == 'attended':
22 attendee = _get_attendee(attendee_id)
23 if not attendee:
24 return {'message': f'Fant ingen påmeldte med oppgitt ID ({attendee_id}).', 'status': 400}
25 return handle_attended(attendee)
26 elif action == 'paid':
27 attendee = _get_attendee(attendee_id)
28 if not attendee:
29 return {'message': f'Fant ingen påmeldte med oppgitt ID ({attendee_id}).', 'status': 400}
30 return handle_paid(attendee)
31 elif action == 'add_attendee':
32 return handle_add_attendee(event, user_id)
33 elif action == 'remove_attendee':
34 return handle_remove_attendee(event, attendee_id, administrating_user)
35 else:
36 raise NotImplementedError
37
38
39 def handle_attended(attendee: Attendee):
40 """
41 Toggle attending-status of an attendee between attending and not attending
42 """
43 attendee.attended = not attendee.attended
44 attendee.save()
45
46 return {'message': 'OK', 'status': 200}
47
48
49 def handle_paid(attendee: Attendee):
50 """
51 Toggle paid status of an attendee between paid and not paid
52 """
53 attendee.paid = not attendee.paid
54 attendee.save()
55
56 return {'message': 'OK', 'status': 200}
57
58
59 def _get_attendee_data(attendee_qs):
60 attendees = []
61
62 for number, a in enumerate(attendee_qs):
63 attendees.append({
64 'number': number + 1,
65 'id': a.id,
66 'first_name': a.user.first_name,
67 'last_name': a.user.last_name,
68 'paid': a.paid,
69 'extras': str(a.extras),
70 'attended': a.attended,
71 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})
72 })
73
74 return attendees
75
76
77 def _get_event_context(event: Event, response={}):
78 response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)
79 response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)
80
81 return response
82
83
84 def handle_add_attendee(event: Event, user_id: int):
85 resp = _get_event_context(event)
86 if event.attendance_event.number_of_seats_taken >= event.attendance_event.max_capacity:
87 if not event.attendance_event.waitlist:
88 return {'message': f'Det er ingen ledige plasser på {event.title}.', 'status': 400, **resp}
89
90 user = User.objects.filter(pk=user_id)
91 if user.count() != 1:
92 return {'message': f'Fant ingen bruker med oppgitt ID ({user_id}).', 'status': 400, **resp}
93 user = user[0]
94 if Attendee.objects.filter(user=user, event=event.attendance_event).count() != 0:
95 return {'message': f'{user} er allerede påmeldt {event.title}.', 'status': 400, **resp}
96
97 attendee = Attendee(user=user, event=event.attendance_event)
98 attendee.save()
99
100 resp = _get_event_context(event, resp)
101 return {'message': f'{user} ble meldt på {event}', 'status': 200, **resp}
102
103
104 def handle_remove_attendee(event: Event, attendee_id: int, admin_user: User):
105 resp = _get_event_context(event)
106 attendee = Attendee.objects.filter(pk=attendee_id)
107 if attendee.count() != 1:
108 return {'message': f'Fant ingen påmeldte med oppgitt ID ({attendee_id}).', 'status': 400, **resp}
109 attendee = attendee[0]
110 attendee.unattend(admin_user)
111
112 resp = _get_event_context(event, resp)
113 return {'message': f'{attendee.user} ble fjernet fra {attendee.event}', 'status': 200, **resp}
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/events/dashboard/utils.py b/apps/events/dashboard/utils.py
--- a/apps/events/dashboard/utils.py
+++ b/apps/events/dashboard/utils.py
@@ -65,7 +65,9 @@
'id': a.id,
'first_name': a.user.first_name,
'last_name': a.user.last_name,
+ 'year_of_study': a.user.year,
'paid': a.paid,
+ 'payment_deadline': a.get_payment_deadline(),
'extras': str(a.extras),
'attended': a.attended,
'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})
@@ -77,6 +79,8 @@
def _get_event_context(event: Event, response={}):
response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)
response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)
+ response['is_payment_event'] = bool(event.attendance_event.payment())
+ response['has_extras'] = event.attendance_event.has_extras
return response
| {"golden_diff": "diff --git a/apps/events/dashboard/utils.py b/apps/events/dashboard/utils.py\n--- a/apps/events/dashboard/utils.py\n+++ b/apps/events/dashboard/utils.py\n@@ -65,7 +65,9 @@\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n+ 'year_of_study': a.user.year,\n 'paid': a.paid,\n+ 'payment_deadline': a.get_payment_deadline(),\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n@@ -77,6 +79,8 @@\n def _get_event_context(event: Event, response={}):\n response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)\n response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)\n+ response['is_payment_event'] = bool(event.attendance_event.payment())\n+ response['has_extras'] = event.attendance_event.has_extras\n \n return response\n", "issue": "Atendee list bug when adding or removing users\n<!-- If this is a security issue or information leakage (having access to something you (probably) shouldn't), please send an email rather than opening a public issue. -->\r\n\r\n## What kind of an issue is this?\r\n\r\n- Bug report\r\n\r\n\r\n## What is the expected behaviour?\r\nIt should look normal where every box and number is under its corresponding column. It should just look normal. Even if we remove users or add users through the dashboard, nothing should change.\r\n\r\n\r\n\r\n\r\n## What is the current behaviour?\r\nIf you remove or add a user through the dashboard menu at the attendee list it will look like the screenshot above. We have randomly two boxes, the x for removing users is all the way to the right and the text \"none\" is occupying the remove column. If you refresh the site it will go back to expected behaviour, its only after deleteing/adding a user\r\n\r\n\r\n<!-- if this is a bug report -->\r\n\r\n\r\n## How do you reproduce this problem? \r\nRemove or add a user to the attendee list manually.\r\n<!-- if this is a bug report -->\r\n<!-- provide steps to reproduce this problem, preferably in a bullet point list -->\r\n1. go to the attendee list\r\n2. Add a user by writing their name OR remove a user from the list\r\n## Other information\r\n\r\nThis might be a bug which I didn't catch when I added \"year of study\" to the attendee list. I'm not sure if this was an issue before, but since it hasn't been brought up I will assume this is a bug from that pull request of mine\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.urls import reverse\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.events.models import Attendee, Event\n\n\ndef _get_attendee(attendee_id):\n try:\n return Attendee.objects.get(pk=attendee_id)\n except Attendee.DoesNotExist:\n return None\n\n\ndef event_ajax_handler(event: Event, request):\n action = request.POST.get('action')\n administrating_user = request.user\n attendee_id = request.POST.get('attendee_id')\n user_id = request.POST.get('user_id')\n\n if action == 'attended':\n attendee = _get_attendee(attendee_id)\n if not attendee:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400}\n return handle_attended(attendee)\n elif action == 'paid':\n attendee = _get_attendee(attendee_id)\n if not attendee:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400}\n return handle_paid(attendee)\n elif action == 'add_attendee':\n return handle_add_attendee(event, user_id)\n elif action == 'remove_attendee':\n return handle_remove_attendee(event, attendee_id, administrating_user)\n else:\n raise NotImplementedError\n\n\ndef handle_attended(attendee: Attendee):\n \"\"\"\n Toggle attending-status of an attendee between attending and not attending\n \"\"\"\n attendee.attended = not attendee.attended\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef handle_paid(attendee: Attendee):\n \"\"\"\n Toggle paid status of an attendee between paid and not paid\n \"\"\"\n attendee.paid = not attendee.paid\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef _get_attendee_data(attendee_qs):\n attendees = []\n\n for number, a in enumerate(attendee_qs):\n attendees.append({\n 'number': number + 1,\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n 'paid': a.paid,\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n })\n\n return attendees\n\n\ndef _get_event_context(event: Event, response={}):\n response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)\n response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)\n\n return response\n\n\ndef handle_add_attendee(event: Event, user_id: int):\n resp = _get_event_context(event)\n if event.attendance_event.number_of_seats_taken >= event.attendance_event.max_capacity:\n if not event.attendance_event.waitlist:\n return {'message': f'Det er ingen ledige plasser p\u00e5 {event.title}.', 'status': 400, **resp}\n\n user = User.objects.filter(pk=user_id)\n if user.count() != 1:\n return {'message': f'Fant ingen bruker med oppgitt ID ({user_id}).', 'status': 400, **resp}\n user = user[0]\n if Attendee.objects.filter(user=user, event=event.attendance_event).count() != 0:\n return {'message': f'{user} er allerede p\u00e5meldt {event.title}.', 'status': 400, **resp}\n\n attendee = Attendee(user=user, event=event.attendance_event)\n attendee.save()\n\n resp = _get_event_context(event, resp)\n return {'message': f'{user} ble meldt p\u00e5 {event}', 'status': 200, **resp}\n\n\ndef handle_remove_attendee(event: Event, attendee_id: int, admin_user: User):\n resp = _get_event_context(event)\n attendee = Attendee.objects.filter(pk=attendee_id)\n if attendee.count() != 1:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400, **resp}\n attendee = attendee[0]\n attendee.unattend(admin_user)\n\n resp = _get_event_context(event, resp)\n return {'message': f'{attendee.user} ble fjernet fra {attendee.event}', 'status': 200, **resp}\n", "path": "apps/events/dashboard/utils.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.urls import reverse\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.events.models import Attendee, Event\n\n\ndef _get_attendee(attendee_id):\n try:\n return Attendee.objects.get(pk=attendee_id)\n except Attendee.DoesNotExist:\n return None\n\n\ndef event_ajax_handler(event: Event, request):\n action = request.POST.get('action')\n administrating_user = request.user\n attendee_id = request.POST.get('attendee_id')\n user_id = request.POST.get('user_id')\n\n if action == 'attended':\n attendee = _get_attendee(attendee_id)\n if not attendee:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400}\n return handle_attended(attendee)\n elif action == 'paid':\n attendee = _get_attendee(attendee_id)\n if not attendee:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400}\n return handle_paid(attendee)\n elif action == 'add_attendee':\n return handle_add_attendee(event, user_id)\n elif action == 'remove_attendee':\n return handle_remove_attendee(event, attendee_id, administrating_user)\n else:\n raise NotImplementedError\n\n\ndef handle_attended(attendee: Attendee):\n \"\"\"\n Toggle attending-status of an attendee between attending and not attending\n \"\"\"\n attendee.attended = not attendee.attended\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef handle_paid(attendee: Attendee):\n \"\"\"\n Toggle paid status of an attendee between paid and not paid\n \"\"\"\n attendee.paid = not attendee.paid\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef _get_attendee_data(attendee_qs):\n attendees = []\n\n for number, a in enumerate(attendee_qs):\n attendees.append({\n 'number': number + 1,\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n 'year_of_study': a.user.year,\n 'paid': a.paid,\n 'payment_deadline': a.get_payment_deadline(),\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n })\n\n return attendees\n\n\ndef _get_event_context(event: Event, response={}):\n response['attendees'] = _get_attendee_data(event.attendance_event.attending_attendees_qs)\n response['waitlist'] = _get_attendee_data(event.attendance_event.waitlist_qs)\n response['is_payment_event'] = bool(event.attendance_event.payment())\n response['has_extras'] = event.attendance_event.has_extras\n\n return response\n\n\ndef handle_add_attendee(event: Event, user_id: int):\n resp = _get_event_context(event)\n if event.attendance_event.number_of_seats_taken >= event.attendance_event.max_capacity:\n if not event.attendance_event.waitlist:\n return {'message': f'Det er ingen ledige plasser p\u00e5 {event.title}.', 'status': 400, **resp}\n\n user = User.objects.filter(pk=user_id)\n if user.count() != 1:\n return {'message': f'Fant ingen bruker med oppgitt ID ({user_id}).', 'status': 400, **resp}\n user = user[0]\n if Attendee.objects.filter(user=user, event=event.attendance_event).count() != 0:\n return {'message': f'{user} er allerede p\u00e5meldt {event.title}.', 'status': 400, **resp}\n\n attendee = Attendee(user=user, event=event.attendance_event)\n attendee.save()\n\n resp = _get_event_context(event, resp)\n return {'message': f'{user} ble meldt p\u00e5 {event}', 'status': 200, **resp}\n\n\ndef handle_remove_attendee(event: Event, attendee_id: int, admin_user: User):\n resp = _get_event_context(event)\n attendee = Attendee.objects.filter(pk=attendee_id)\n if attendee.count() != 1:\n return {'message': f'Fant ingen p\u00e5meldte med oppgitt ID ({attendee_id}).', 'status': 400, **resp}\n attendee = attendee[0]\n attendee.unattend(admin_user)\n\n resp = _get_event_context(event, resp)\n return {'message': f'{attendee.user} ble fjernet fra {attendee.event}', 'status': 200, **resp}\n", "path": "apps/events/dashboard/utils.py"}]} | 2,006 | 247 |
gh_patches_debug_34954 | rasdani/github-patches | git_diff | deepchecks__deepchecks-807 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Calling `save_as_html` can raise an error if the label column is imbalanced and has the type `np.bool_`
**Describe the bug**
The error comes from this chart (section "Train Test Label Drift"):

This bar chart is created whenever the label is imbalanced and drifts between train and test (which is the case with my data).
The x axis has the values `False` and `True` with the dtype `np.bool_`. Before saving as HTML, somewhere in the code, this diagram is encoded as JSON. The call to `json.dumps` causes the error. The solution is to convert the problematic data to the regular Python `bool` type.
**To Reproduce**
Steps to reproduce the behavior:
1. Run the code below
2. See error: "TypeError: Object of type bool_ is not JSON serializable"
```python
import pandas as pd
import numpy as np
from deepchecks import Dataset
from deepchecks.suites import train_test_validation
if __name__ == "__main__":
# Generate some random data
# Note that the data type of the generated label is numpy.bool_
# (in my project, the label column has this type)
np.random.seed(42)
training_size = 1000
training_positive_proportion = 0.01 # Very low proportion to simulate an imbalanced dataset
testing_size = 100
testing_positive_proportion = 0.05 # This proportion is different from training to simulate a drift on the label
training_df = pd.DataFrame({
"feature_1": np.ones(training_size),
"label": np.random.choice(a=[False, True], size=(training_size,), p=[training_positive_proportion, 1 - training_positive_proportion])
})
testing_df = pd.DataFrame({
"feature_1": np.ones(testing_size),
"label": np.random.choice(a=[False, True], size=(testing_size,), p=[testing_positive_proportion, 1 - testing_positive_proportion])
})
# Run the train_test_validation suite
training_ds = Dataset(df=training_df, label="label", features=["feature_1"])
testing_ds = Dataset(df=testing_df, label="label", features=["feature_1"])
suite = train_test_validation()
results = suite.run(train_dataset=training_ds, test_dataset=testing_ds)
# Save as HTML, this will raise the following error:
# "TypeError: Object of type bool_ is not JSON serializable"
results.save_as_html("output.html")
```
**Expected behavior**
I would expect deepchecks to deal with this internally by converting the legend values to `bool`.
**Environment (please complete the following information):**
- OS: Ubuntu 20.04 (running on Windows 10 with WSL)
- Python Version: 3.8.10
- Deepchecks Version: 0.4.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/utils/distribution/plot.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """A module containing utils for plotting distributions."""
12 import numpy as np
13 import pandas as pd
14 from scipy.stats import gaussian_kde
15 import plotly.graph_objs as go
16
17 __all__ = ['feature_distribution_traces', 'drift_score_bar_traces', 'get_density']
18
19 from typing import List, Union, Dict, Tuple
20
21 from deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins
22 from deepchecks.utils.plot import colors
23
24
25 def get_density(data, xs) -> np.ndarray:
26 """Get gaussian kde density to plot.
27
28 Parameters
29 ----------
30 data
31 The data used to compute the pdf function.
32 xs : iterable
33 List of x values to plot the computed pdf for.
34 Returns
35 -------
36 np.array
37 The computed pdf values at the points xs.
38 """
39 # Is only single value adds noise, otherwise there is singular matrix error
40 if len(np.unique(data)) == 1:
41 data = data + np.random.normal(scale=10 * np.finfo(np.float32).eps, size=len(data))
42 density = gaussian_kde(data)
43 density.covariance_factor = lambda: .25
44 # pylint: disable=protected-access
45 density._compute_covariance()
46 return density(xs)
47
48
49 def drift_score_bar_traces(drift_score: float, bar_max: float = None) -> Tuple[List[go.Bar], Dict, Dict]:
50 """Create a traffic light bar traces for drift score.
51
52 Parameters
53 ----------
54 drift_score : float
55 Drift score
56 bar_max : float , default: None
57 Maximum value for the bar
58 Returns
59 -------
60 Tuple[List[go.Bar], Dict, Dict]
61 list of plotly bar traces.
62 """
63 traffic_light_colors = [((0, 0.1), '#01B8AA'),
64 ((0.1, 0.2), '#F2C80F'),
65 ((0.2, 0.3), '#FE9666'),
66 ((0.3, 1), '#FD625E')
67 ]
68
69 bars = []
70
71 for range_tuple, color in traffic_light_colors:
72 if drift_score < range_tuple[0]:
73 break
74
75 bars.append(go.Bar(
76 x=[min(drift_score, range_tuple[1]) - range_tuple[0]],
77 y=['Drift Score'],
78 orientation='h',
79 marker=dict(
80 color=color,
81 ),
82 offsetgroup=0,
83 base=range_tuple[0],
84 showlegend=False
85
86 ))
87
88 bar_stop = max(0.4, drift_score + 0.1)
89 if bar_max:
90 bar_stop = min(bar_stop, bar_max)
91 xaxis = dict(
92 showgrid=False,
93 gridcolor='black',
94 linecolor='black',
95 range=[0, bar_stop],
96 dtick=0.05,
97 fixedrange=True
98 )
99 yaxis = dict(
100 showgrid=False,
101 showline=False,
102 showticklabels=False,
103 zeroline=False,
104 color='black',
105 fixedrange=True
106 )
107
108 return bars, xaxis, yaxis
109
110
111 def feature_distribution_traces(train_column,
112 test_column,
113 is_categorical: bool = False,
114 max_num_categories: int = 10,
115 quantile_cut: float = 0.02) -> Tuple[List[Union[go.Bar, go.Scatter]], Dict, Dict]:
116 """Create traces for comparison between train and test column.
117
118 Parameters
119 ----------
120 train_column
121 Train data used to trace distribution.
122 test_column
123 Test data used to trace distribution.
124 is_categorical : bool , default: False
125 State if column is categorical.
126 max_num_categories : int , default: 10
127 Maximum number of categories to show in plot (default: 10).
128 quantile_cut : float , default: 0.02
129 in which quantile to cut the edges of the plot
130 Returns
131 -------
132 List[Union[go.Bar, go.Scatter]]
133 list of plotly traces.
134 Dict
135 layout of x axis
136 Dict
137 layout of y axis
138 """
139 if is_categorical:
140 expected_percents, actual_percents, categories_list = \
141 preprocess_2_cat_cols_to_same_bins(dist1=train_column, dist2=test_column,
142 max_num_categories=max_num_categories)
143 cat_df = pd.DataFrame({'Train dataset': expected_percents, 'Test dataset': actual_percents},
144 index=categories_list)
145 train_bar = go.Bar(
146 x=cat_df.index,
147 y=cat_df['Train dataset'],
148 marker=dict(
149 color=colors['Train'],
150 ),
151 name='Train Dataset',
152 )
153
154 test_bar = go.Bar(
155 x=cat_df.index,
156 y=cat_df['Test dataset'],
157 marker=dict(
158 color=colors['Test'],
159 ),
160 name='Test Dataset',
161 )
162
163 traces = [train_bar, test_bar]
164
165 xaxis_layout = dict(type='category')
166 yaxis_layout = dict(fixedrange=True,
167 range=(0, 1),
168 title='Percentage')
169
170 else:
171 x_range = (min(train_column.min(), test_column.min()), max(train_column.max(), test_column.max()))
172 x_range_to_show = (
173 min(np.quantile(train_column, quantile_cut), np.quantile(test_column, quantile_cut)),
174 max(np.quantile(train_column, 1 - quantile_cut), np.quantile(test_column, 1 - quantile_cut))
175 )
176 # Heuristically take points on x-axis to show on the plot
177 # The intuition is the graph will look "smooth" wherever we will zoom it
178 xs = sorted(np.concatenate((
179 np.linspace(x_range[0], x_range[1], 50),
180 np.quantile(train_column, q=np.arange(0.02, 1, 0.02)),
181 np.quantile(test_column, q=np.arange(0.02, 1, 0.02))
182 )))
183
184 traces = [go.Scatter(x=xs, y=get_density(train_column, xs), fill='tozeroy', name='Train Dataset',
185 line_color=colors['Train']),
186 go.Scatter(x=xs, y=get_density(test_column, xs), fill='tozeroy', name='Test Dataset',
187 line_color=colors['Test'])]
188
189 xaxis_layout = dict(fixedrange=False,
190 range=x_range_to_show,
191 title='Distribution')
192 yaxis_layout = dict(title='Probability Density', fixedrange=True)
193
194 return traces, xaxis_layout, yaxis_layout
195
```
Path: `deepchecks/utils/dataframes.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Contain functions for handling dataframes in checks."""
12 import typing as t
13 import pandas as pd
14 from deepchecks.utils.typing import Hashable
15 from deepchecks.utils.validation import ensure_hashable_or_mutable_sequence
16 from deepchecks.core.errors import DeepchecksValueError
17
18
19 __all__ = ['validate_columns_exist', 'select_from_dataframe']
20
21
22 def validate_columns_exist(
23 df: pd.DataFrame,
24 columns: t.Union[Hashable, t.List[Hashable]],
25 raise_error: bool = True
26 ) -> bool:
27 """Validate given columns exist in dataframe.
28
29 Parameters
30 ----------
31 df : pd.DataFrame
32 dataframe to inspect
33 columns : t.Union[Hashable, t.List[Hashable]]
34 Column names to check
35 raise_error : bool, default: True
36 whether to raise an error if some column is not present in the dataframe or not
37
38 Raises
39 ------
40 DeepchecksValueError
41 If some of the columns do not exist within provided dataframe.
42 If receives empty list of 'columns'.
43 If not all elements within 'columns' list are hashable.
44 """
45 error_message = 'columns - expected to receive not empty list of hashable values!'
46 columns = ensure_hashable_or_mutable_sequence(columns, message=error_message)
47
48 is_empty = len(columns) == 0
49
50 if raise_error and is_empty:
51 raise DeepchecksValueError(error_message)
52 elif not raise_error and is_empty:
53 return False
54
55 difference = set(columns) - set(df.columns)
56 all_columns_present = len(difference) == 0
57
58 if raise_error and not all_columns_present:
59 stringified_columns = ','.join(map(str, difference))
60 raise DeepchecksValueError(f'Given columns do not exist in dataset: {stringified_columns}')
61
62 return all_columns_present
63
64
65 def select_from_dataframe(
66 df: pd.DataFrame,
67 columns: t.Union[Hashable, t.List[Hashable], None] = None,
68 ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None
69 ) -> pd.DataFrame:
70 """Filter DataFrame columns by given params.
71
72 Parameters
73 ----------
74 df : pd.DataFrame
75 columns : t.Union[Hashable, t.List[Hashable]] , default: None
76 Column names to keep.
77 ignore_columns : t.Union[Hashable, t.List[Hashable]] , default: None
78 Column names to drop.
79
80 Returns
81 -------
82 pandas.DataFrame
83 returns horizontally filtered dataframe
84
85 Raises
86 ------
87 DeepchecksValueError
88 If some columns do not exist within provided dataframe;
89 If 'columns' and 'ignore_columns' arguments is 'None'.
90 """
91 if columns is not None and ignore_columns is not None:
92 raise DeepchecksValueError(
93 'Cannot receive both parameters "columns" and "ignore", '
94 'only one must be used at most'
95 )
96 elif columns is not None:
97 columns = ensure_hashable_or_mutable_sequence(columns)
98 validate_columns_exist(df, columns)
99 return t.cast(pd.DataFrame, df[columns])
100 elif ignore_columns is not None:
101 ignore_columns = ensure_hashable_or_mutable_sequence(ignore_columns)
102 validate_columns_exist(df, ignore_columns)
103 return df.drop(labels=ignore_columns, axis='columns')
104 else:
105 return df
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepchecks/utils/dataframes.py b/deepchecks/utils/dataframes.py
--- a/deepchecks/utils/dataframes.py
+++ b/deepchecks/utils/dataframes.py
@@ -10,13 +10,37 @@
#
"""Contain functions for handling dataframes in checks."""
import typing as t
+
import pandas as pd
+import numpy as np
+
from deepchecks.utils.typing import Hashable
from deepchecks.utils.validation import ensure_hashable_or_mutable_sequence
from deepchecks.core.errors import DeepchecksValueError
-__all__ = ['validate_columns_exist', 'select_from_dataframe']
+__all__ = ['validate_columns_exist', 'select_from_dataframe', 'un_numpy']
+
+
+def un_numpy(val):
+ """Convert numpy value to native value.
+
+ Parameters
+ ----------
+ val :
+ The value to convert.
+
+ Returns
+ -------
+ returns the numpy value in a native type.
+ """
+ if isinstance(val, np.bool_):
+ return str(val)
+ if isinstance(val, (np.float64, np.float_)):
+ if np.isnan(val):
+ return None
+ return float(val)
+ return val
def validate_columns_exist(
diff --git a/deepchecks/utils/distribution/plot.py b/deepchecks/utils/distribution/plot.py
--- a/deepchecks/utils/distribution/plot.py
+++ b/deepchecks/utils/distribution/plot.py
@@ -20,6 +20,7 @@
from deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins
from deepchecks.utils.plot import colors
+from deepchecks.utils.dataframes import un_numpy
def get_density(data, xs) -> np.ndarray:
@@ -140,6 +141,9 @@
expected_percents, actual_percents, categories_list = \
preprocess_2_cat_cols_to_same_bins(dist1=train_column, dist2=test_column,
max_num_categories=max_num_categories)
+ # fixes plotly widget bug with numpy values by converting them to native values
+ # https://github.com/plotly/plotly.py/issues/3470
+ categories_list = [un_numpy(cat) for cat in categories_list]
cat_df = pd.DataFrame({'Train dataset': expected_percents, 'Test dataset': actual_percents},
index=categories_list)
train_bar = go.Bar(
| {"golden_diff": "diff --git a/deepchecks/utils/dataframes.py b/deepchecks/utils/dataframes.py\n--- a/deepchecks/utils/dataframes.py\n+++ b/deepchecks/utils/dataframes.py\n@@ -10,13 +10,37 @@\n #\n \"\"\"Contain functions for handling dataframes in checks.\"\"\"\n import typing as t\n+\n import pandas as pd\n+import numpy as np\n+\n from deepchecks.utils.typing import Hashable\n from deepchecks.utils.validation import ensure_hashable_or_mutable_sequence\n from deepchecks.core.errors import DeepchecksValueError\n \n \n-__all__ = ['validate_columns_exist', 'select_from_dataframe']\n+__all__ = ['validate_columns_exist', 'select_from_dataframe', 'un_numpy']\n+\n+\n+def un_numpy(val):\n+ \"\"\"Convert numpy value to native value.\n+\n+ Parameters\n+ ----------\n+ val :\n+ The value to convert.\n+\n+ Returns\n+ -------\n+ returns the numpy value in a native type.\n+ \"\"\"\n+ if isinstance(val, np.bool_):\n+ return str(val)\n+ if isinstance(val, (np.float64, np.float_)):\n+ if np.isnan(val):\n+ return None\n+ return float(val)\n+ return val\n \n \n def validate_columns_exist(\ndiff --git a/deepchecks/utils/distribution/plot.py b/deepchecks/utils/distribution/plot.py\n--- a/deepchecks/utils/distribution/plot.py\n+++ b/deepchecks/utils/distribution/plot.py\n@@ -20,6 +20,7 @@\n \n from deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins\n from deepchecks.utils.plot import colors\n+from deepchecks.utils.dataframes import un_numpy\n \n \n def get_density(data, xs) -> np.ndarray:\n@@ -140,6 +141,9 @@\n expected_percents, actual_percents, categories_list = \\\n preprocess_2_cat_cols_to_same_bins(dist1=train_column, dist2=test_column,\n max_num_categories=max_num_categories)\n+ # fixes plotly widget bug with numpy values by converting them to native values\n+ # https://github.com/plotly/plotly.py/issues/3470\n+ categories_list = [un_numpy(cat) for cat in categories_list]\n cat_df = pd.DataFrame({'Train dataset': expected_percents, 'Test dataset': actual_percents},\n index=categories_list)\n train_bar = go.Bar(\n", "issue": "[BUG] Calling `save_as_html` can raise an error if the label column is imbalanced and has the type `np.bool_`\n**Describe the bug**\r\nThe error comes from this chart (section \"Train Test Label Drift\"):\r\n\r\n\r\nThis bar chart is created whenever the label is imbalanced and drifts between train and test (which is the case with my data).\r\n\r\nThe x axis has the values `False` and `True` with the dtype `np.bool_`. Before saving as HTML, somewhere in the code, this diagram is encoded as JSON. The call to `json.dumps` causes the error. The solution is to convert the problematic data to the regular Python `bool` type.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Run the code below\r\n2. See error: \"TypeError: Object of type bool_ is not JSON serializable\"\r\n\r\n```python\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nfrom deepchecks import Dataset\r\nfrom deepchecks.suites import train_test_validation\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Generate some random data\r\n # Note that the data type of the generated label is numpy.bool_\r\n # (in my project, the label column has this type)\r\n np.random.seed(42)\r\n training_size = 1000\r\n training_positive_proportion = 0.01 # Very low proportion to simulate an imbalanced dataset\r\n testing_size = 100\r\n testing_positive_proportion = 0.05 # This proportion is different from training to simulate a drift on the label\r\n training_df = pd.DataFrame({\r\n \"feature_1\": np.ones(training_size),\r\n \"label\": np.random.choice(a=[False, True], size=(training_size,), p=[training_positive_proportion, 1 - training_positive_proportion])\r\n })\r\n testing_df = pd.DataFrame({\r\n \"feature_1\": np.ones(testing_size),\r\n \"label\": np.random.choice(a=[False, True], size=(testing_size,), p=[testing_positive_proportion, 1 - testing_positive_proportion])\r\n })\r\n\r\n # Run the train_test_validation suite\r\n training_ds = Dataset(df=training_df, label=\"label\", features=[\"feature_1\"])\r\n testing_ds = Dataset(df=testing_df, label=\"label\", features=[\"feature_1\"])\r\n suite = train_test_validation()\r\n results = suite.run(train_dataset=training_ds, test_dataset=testing_ds)\r\n\r\n # Save as HTML, this will raise the following error:\r\n # \"TypeError: Object of type bool_ is not JSON serializable\"\r\n results.save_as_html(\"output.html\")\r\n```\r\n\r\n**Expected behavior**\r\nI would expect deepchecks to deal with this internally by converting the legend values to `bool`.\r\n\r\n**Environment (please complete the following information):**\r\n - OS: Ubuntu 20.04 (running on Windows 10 with WSL)\r\n - Python Version: 3.8.10\r\n - Deepchecks Version: 0.4.1\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"A module containing utils for plotting distributions.\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import gaussian_kde\nimport plotly.graph_objs as go\n\n__all__ = ['feature_distribution_traces', 'drift_score_bar_traces', 'get_density']\n\nfrom typing import List, Union, Dict, Tuple\n\nfrom deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins\nfrom deepchecks.utils.plot import colors\n\n\ndef get_density(data, xs) -> np.ndarray:\n \"\"\"Get gaussian kde density to plot.\n\n Parameters\n ----------\n data\n The data used to compute the pdf function.\n xs : iterable\n List of x values to plot the computed pdf for.\n Returns\n -------\n np.array\n The computed pdf values at the points xs.\n \"\"\"\n # Is only single value adds noise, otherwise there is singular matrix error\n if len(np.unique(data)) == 1:\n data = data + np.random.normal(scale=10 * np.finfo(np.float32).eps, size=len(data))\n density = gaussian_kde(data)\n density.covariance_factor = lambda: .25\n # pylint: disable=protected-access\n density._compute_covariance()\n return density(xs)\n\n\ndef drift_score_bar_traces(drift_score: float, bar_max: float = None) -> Tuple[List[go.Bar], Dict, Dict]:\n \"\"\"Create a traffic light bar traces for drift score.\n\n Parameters\n ----------\n drift_score : float\n Drift score\n bar_max : float , default: None\n Maximum value for the bar\n Returns\n -------\n Tuple[List[go.Bar], Dict, Dict]\n list of plotly bar traces.\n \"\"\"\n traffic_light_colors = [((0, 0.1), '#01B8AA'),\n ((0.1, 0.2), '#F2C80F'),\n ((0.2, 0.3), '#FE9666'),\n ((0.3, 1), '#FD625E')\n ]\n\n bars = []\n\n for range_tuple, color in traffic_light_colors:\n if drift_score < range_tuple[0]:\n break\n\n bars.append(go.Bar(\n x=[min(drift_score, range_tuple[1]) - range_tuple[0]],\n y=['Drift Score'],\n orientation='h',\n marker=dict(\n color=color,\n ),\n offsetgroup=0,\n base=range_tuple[0],\n showlegend=False\n\n ))\n\n bar_stop = max(0.4, drift_score + 0.1)\n if bar_max:\n bar_stop = min(bar_stop, bar_max)\n xaxis = dict(\n showgrid=False,\n gridcolor='black',\n linecolor='black',\n range=[0, bar_stop],\n dtick=0.05,\n fixedrange=True\n )\n yaxis = dict(\n showgrid=False,\n showline=False,\n showticklabels=False,\n zeroline=False,\n color='black',\n fixedrange=True\n )\n\n return bars, xaxis, yaxis\n\n\ndef feature_distribution_traces(train_column,\n test_column,\n is_categorical: bool = False,\n max_num_categories: int = 10,\n quantile_cut: float = 0.02) -> Tuple[List[Union[go.Bar, go.Scatter]], Dict, Dict]:\n \"\"\"Create traces for comparison between train and test column.\n\n Parameters\n ----------\n train_column\n Train data used to trace distribution.\n test_column\n Test data used to trace distribution.\n is_categorical : bool , default: False\n State if column is categorical.\n max_num_categories : int , default: 10\n Maximum number of categories to show in plot (default: 10).\n quantile_cut : float , default: 0.02\n in which quantile to cut the edges of the plot\n Returns\n -------\n List[Union[go.Bar, go.Scatter]]\n list of plotly traces.\n Dict\n layout of x axis\n Dict\n layout of y axis\n \"\"\"\n if is_categorical:\n expected_percents, actual_percents, categories_list = \\\n preprocess_2_cat_cols_to_same_bins(dist1=train_column, dist2=test_column,\n max_num_categories=max_num_categories)\n cat_df = pd.DataFrame({'Train dataset': expected_percents, 'Test dataset': actual_percents},\n index=categories_list)\n train_bar = go.Bar(\n x=cat_df.index,\n y=cat_df['Train dataset'],\n marker=dict(\n color=colors['Train'],\n ),\n name='Train Dataset',\n )\n\n test_bar = go.Bar(\n x=cat_df.index,\n y=cat_df['Test dataset'],\n marker=dict(\n color=colors['Test'],\n ),\n name='Test Dataset',\n )\n\n traces = [train_bar, test_bar]\n\n xaxis_layout = dict(type='category')\n yaxis_layout = dict(fixedrange=True,\n range=(0, 1),\n title='Percentage')\n\n else:\n x_range = (min(train_column.min(), test_column.min()), max(train_column.max(), test_column.max()))\n x_range_to_show = (\n min(np.quantile(train_column, quantile_cut), np.quantile(test_column, quantile_cut)),\n max(np.quantile(train_column, 1 - quantile_cut), np.quantile(test_column, 1 - quantile_cut))\n )\n # Heuristically take points on x-axis to show on the plot\n # The intuition is the graph will look \"smooth\" wherever we will zoom it\n xs = sorted(np.concatenate((\n np.linspace(x_range[0], x_range[1], 50),\n np.quantile(train_column, q=np.arange(0.02, 1, 0.02)),\n np.quantile(test_column, q=np.arange(0.02, 1, 0.02))\n )))\n\n traces = [go.Scatter(x=xs, y=get_density(train_column, xs), fill='tozeroy', name='Train Dataset',\n line_color=colors['Train']),\n go.Scatter(x=xs, y=get_density(test_column, xs), fill='tozeroy', name='Test Dataset',\n line_color=colors['Test'])]\n\n xaxis_layout = dict(fixedrange=False,\n range=x_range_to_show,\n title='Distribution')\n yaxis_layout = dict(title='Probability Density', fixedrange=True)\n\n return traces, xaxis_layout, yaxis_layout\n", "path": "deepchecks/utils/distribution/plot.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Contain functions for handling dataframes in checks.\"\"\"\nimport typing as t\nimport pandas as pd\nfrom deepchecks.utils.typing import Hashable\nfrom deepchecks.utils.validation import ensure_hashable_or_mutable_sequence\nfrom deepchecks.core.errors import DeepchecksValueError\n\n\n__all__ = ['validate_columns_exist', 'select_from_dataframe']\n\n\ndef validate_columns_exist(\n df: pd.DataFrame,\n columns: t.Union[Hashable, t.List[Hashable]],\n raise_error: bool = True\n) -> bool:\n \"\"\"Validate given columns exist in dataframe.\n\n Parameters\n ----------\n df : pd.DataFrame\n dataframe to inspect\n columns : t.Union[Hashable, t.List[Hashable]]\n Column names to check\n raise_error : bool, default: True\n whether to raise an error if some column is not present in the dataframe or not\n\n Raises\n ------\n DeepchecksValueError\n If some of the columns do not exist within provided dataframe.\n If receives empty list of 'columns'.\n If not all elements within 'columns' list are hashable.\n \"\"\"\n error_message = 'columns - expected to receive not empty list of hashable values!'\n columns = ensure_hashable_or_mutable_sequence(columns, message=error_message)\n\n is_empty = len(columns) == 0\n\n if raise_error and is_empty:\n raise DeepchecksValueError(error_message)\n elif not raise_error and is_empty:\n return False\n\n difference = set(columns) - set(df.columns)\n all_columns_present = len(difference) == 0\n\n if raise_error and not all_columns_present:\n stringified_columns = ','.join(map(str, difference))\n raise DeepchecksValueError(f'Given columns do not exist in dataset: {stringified_columns}')\n\n return all_columns_present\n\n\ndef select_from_dataframe(\n df: pd.DataFrame,\n columns: t.Union[Hashable, t.List[Hashable], None] = None,\n ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None\n) -> pd.DataFrame:\n \"\"\"Filter DataFrame columns by given params.\n\n Parameters\n ----------\n df : pd.DataFrame\n columns : t.Union[Hashable, t.List[Hashable]] , default: None\n Column names to keep.\n ignore_columns : t.Union[Hashable, t.List[Hashable]] , default: None\n Column names to drop.\n\n Returns\n -------\n pandas.DataFrame\n returns horizontally filtered dataframe\n\n Raises\n ------\n DeepchecksValueError\n If some columns do not exist within provided dataframe;\n If 'columns' and 'ignore_columns' arguments is 'None'.\n \"\"\"\n if columns is not None and ignore_columns is not None:\n raise DeepchecksValueError(\n 'Cannot receive both parameters \"columns\" and \"ignore\", '\n 'only one must be used at most'\n )\n elif columns is not None:\n columns = ensure_hashable_or_mutable_sequence(columns)\n validate_columns_exist(df, columns)\n return t.cast(pd.DataFrame, df[columns])\n elif ignore_columns is not None:\n ignore_columns = ensure_hashable_or_mutable_sequence(ignore_columns)\n validate_columns_exist(df, ignore_columns)\n return df.drop(labels=ignore_columns, axis='columns')\n else:\n return df\n", "path": "deepchecks/utils/dataframes.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"A module containing utils for plotting distributions.\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import gaussian_kde\nimport plotly.graph_objs as go\n\n__all__ = ['feature_distribution_traces', 'drift_score_bar_traces', 'get_density']\n\nfrom typing import List, Union, Dict, Tuple\n\nfrom deepchecks.utils.distribution.preprocessing import preprocess_2_cat_cols_to_same_bins\nfrom deepchecks.utils.plot import colors\nfrom deepchecks.utils.dataframes import un_numpy\n\n\ndef get_density(data, xs) -> np.ndarray:\n \"\"\"Get gaussian kde density to plot.\n\n Parameters\n ----------\n data\n The data used to compute the pdf function.\n xs : iterable\n List of x values to plot the computed pdf for.\n Returns\n -------\n np.array\n The computed pdf values at the points xs.\n \"\"\"\n # Is only single value adds noise, otherwise there is singular matrix error\n if len(np.unique(data)) == 1:\n data = data + np.random.normal(scale=10 * np.finfo(np.float32).eps, size=len(data))\n density = gaussian_kde(data)\n density.covariance_factor = lambda: .25\n # pylint: disable=protected-access\n density._compute_covariance()\n return density(xs)\n\n\ndef drift_score_bar_traces(drift_score: float, bar_max: float = None) -> Tuple[List[go.Bar], Dict, Dict]:\n \"\"\"Create a traffic light bar traces for drift score.\n\n Parameters\n ----------\n drift_score : float\n Drift score\n bar_max : float , default: None\n Maximum value for the bar\n Returns\n -------\n Tuple[List[go.Bar], Dict, Dict]\n list of plotly bar traces.\n \"\"\"\n traffic_light_colors = [((0, 0.1), '#01B8AA'),\n ((0.1, 0.2), '#F2C80F'),\n ((0.2, 0.3), '#FE9666'),\n ((0.3, 1), '#FD625E')\n ]\n\n bars = []\n\n for range_tuple, color in traffic_light_colors:\n if drift_score < range_tuple[0]:\n break\n\n bars.append(go.Bar(\n x=[min(drift_score, range_tuple[1]) - range_tuple[0]],\n y=['Drift Score'],\n orientation='h',\n marker=dict(\n color=color,\n ),\n offsetgroup=0,\n base=range_tuple[0],\n showlegend=False\n\n ))\n\n bar_stop = max(0.4, drift_score + 0.1)\n if bar_max:\n bar_stop = min(bar_stop, bar_max)\n xaxis = dict(\n showgrid=False,\n gridcolor='black',\n linecolor='black',\n range=[0, bar_stop],\n dtick=0.05,\n fixedrange=True\n )\n yaxis = dict(\n showgrid=False,\n showline=False,\n showticklabels=False,\n zeroline=False,\n color='black',\n fixedrange=True\n )\n\n return bars, xaxis, yaxis\n\n\ndef feature_distribution_traces(train_column,\n test_column,\n is_categorical: bool = False,\n max_num_categories: int = 10,\n quantile_cut: float = 0.02) -> Tuple[List[Union[go.Bar, go.Scatter]], Dict, Dict]:\n \"\"\"Create traces for comparison between train and test column.\n\n Parameters\n ----------\n train_column\n Train data used to trace distribution.\n test_column\n Test data used to trace distribution.\n is_categorical : bool , default: False\n State if column is categorical.\n max_num_categories : int , default: 10\n Maximum number of categories to show in plot (default: 10).\n quantile_cut : float , default: 0.02\n in which quantile to cut the edges of the plot\n Returns\n -------\n List[Union[go.Bar, go.Scatter]]\n list of plotly traces.\n Dict\n layout of x axis\n Dict\n layout of y axis\n \"\"\"\n if is_categorical:\n expected_percents, actual_percents, categories_list = \\\n preprocess_2_cat_cols_to_same_bins(dist1=train_column, dist2=test_column,\n max_num_categories=max_num_categories)\n # fixes plotly widget bug with numpy values by converting them to native values\n # https://github.com/plotly/plotly.py/issues/3470\n categories_list = [un_numpy(cat) for cat in categories_list]\n cat_df = pd.DataFrame({'Train dataset': expected_percents, 'Test dataset': actual_percents},\n index=categories_list)\n train_bar = go.Bar(\n x=cat_df.index,\n y=cat_df['Train dataset'],\n marker=dict(\n color=colors['Train'],\n ),\n name='Train Dataset',\n )\n\n test_bar = go.Bar(\n x=cat_df.index,\n y=cat_df['Test dataset'],\n marker=dict(\n color=colors['Test'],\n ),\n name='Test Dataset',\n )\n\n traces = [train_bar, test_bar]\n\n xaxis_layout = dict(type='category')\n yaxis_layout = dict(fixedrange=True,\n range=(0, 1),\n title='Percentage')\n\n else:\n x_range = (min(train_column.min(), test_column.min()), max(train_column.max(), test_column.max()))\n x_range_to_show = (\n min(np.quantile(train_column, quantile_cut), np.quantile(test_column, quantile_cut)),\n max(np.quantile(train_column, 1 - quantile_cut), np.quantile(test_column, 1 - quantile_cut))\n )\n # Heuristically take points on x-axis to show on the plot\n # The intuition is the graph will look \"smooth\" wherever we will zoom it\n xs = sorted(np.concatenate((\n np.linspace(x_range[0], x_range[1], 50),\n np.quantile(train_column, q=np.arange(0.02, 1, 0.02)),\n np.quantile(test_column, q=np.arange(0.02, 1, 0.02))\n )))\n\n traces = [go.Scatter(x=xs, y=get_density(train_column, xs), fill='tozeroy', name='Train Dataset',\n line_color=colors['Train']),\n go.Scatter(x=xs, y=get_density(test_column, xs), fill='tozeroy', name='Test Dataset',\n line_color=colors['Test'])]\n\n xaxis_layout = dict(fixedrange=False,\n range=x_range_to_show,\n title='Distribution')\n yaxis_layout = dict(title='Probability Density', fixedrange=True)\n\n return traces, xaxis_layout, yaxis_layout\n", "path": "deepchecks/utils/distribution/plot.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Contain functions for handling dataframes in checks.\"\"\"\nimport typing as t\n\nimport pandas as pd\nimport numpy as np\n\nfrom deepchecks.utils.typing import Hashable\nfrom deepchecks.utils.validation import ensure_hashable_or_mutable_sequence\nfrom deepchecks.core.errors import DeepchecksValueError\n\n\n__all__ = ['validate_columns_exist', 'select_from_dataframe', 'un_numpy']\n\n\ndef un_numpy(val):\n \"\"\"Convert numpy value to native value.\n\n Parameters\n ----------\n val :\n The value to convert.\n\n Returns\n -------\n returns the numpy value in a native type.\n \"\"\"\n if isinstance(val, np.bool_):\n return str(val)\n if isinstance(val, (np.float64, np.float_)):\n if np.isnan(val):\n return None\n return float(val)\n return val\n\n\ndef validate_columns_exist(\n df: pd.DataFrame,\n columns: t.Union[Hashable, t.List[Hashable]],\n raise_error: bool = True\n) -> bool:\n \"\"\"Validate given columns exist in dataframe.\n\n Parameters\n ----------\n df : pd.DataFrame\n dataframe to inspect\n columns : t.Union[Hashable, t.List[Hashable]]\n Column names to check\n raise_error : bool, default: True\n whether to raise an error if some column is not present in the dataframe or not\n\n Raises\n ------\n DeepchecksValueError\n If some of the columns do not exist within provided dataframe.\n If receives empty list of 'columns'.\n If not all elements within 'columns' list are hashable.\n \"\"\"\n error_message = 'columns - expected to receive not empty list of hashable values!'\n columns = ensure_hashable_or_mutable_sequence(columns, message=error_message)\n\n is_empty = len(columns) == 0\n\n if raise_error and is_empty:\n raise DeepchecksValueError(error_message)\n elif not raise_error and is_empty:\n return False\n\n difference = set(columns) - set(df.columns)\n all_columns_present = len(difference) == 0\n\n if raise_error and not all_columns_present:\n stringified_columns = ','.join(map(str, difference))\n raise DeepchecksValueError(f'Given columns do not exist in dataset: {stringified_columns}')\n\n return all_columns_present\n\n\ndef select_from_dataframe(\n df: pd.DataFrame,\n columns: t.Union[Hashable, t.List[Hashable], None] = None,\n ignore_columns: t.Union[Hashable, t.List[Hashable], None] = None\n) -> pd.DataFrame:\n \"\"\"Filter DataFrame columns by given params.\n\n Parameters\n ----------\n df : pd.DataFrame\n columns : t.Union[Hashable, t.List[Hashable]] , default: None\n Column names to keep.\n ignore_columns : t.Union[Hashable, t.List[Hashable]] , default: None\n Column names to drop.\n\n Returns\n -------\n pandas.DataFrame\n returns horizontally filtered dataframe\n\n Raises\n ------\n DeepchecksValueError\n If some columns do not exist within provided dataframe;\n If 'columns' and 'ignore_columns' arguments is 'None'.\n \"\"\"\n if columns is not None and ignore_columns is not None:\n raise DeepchecksValueError(\n 'Cannot receive both parameters \"columns\" and \"ignore\", '\n 'only one must be used at most'\n )\n elif columns is not None:\n columns = ensure_hashable_or_mutable_sequence(columns)\n validate_columns_exist(df, columns)\n return t.cast(pd.DataFrame, df[columns])\n elif ignore_columns is not None:\n ignore_columns = ensure_hashable_or_mutable_sequence(ignore_columns)\n validate_columns_exist(df, ignore_columns)\n return df.drop(labels=ignore_columns, axis='columns')\n else:\n return df\n", "path": "deepchecks/utils/dataframes.py"}]} | 4,018 | 526 |
gh_patches_debug_26033 | rasdani/github-patches | git_diff | cowrie__cowrie-1237 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`ls -l` user/group names need justification
`ls -l` does not properly pad the user/group names
**To Reproduce**
Steps to reproduce the behaviour:
1. ssh into a cowrie instance
2. `ls -l` on a directory with more than one user/group
3. the user names and group names don't line up between files
**Expected behaviour**
Nice justified columns of user/group names
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/commands/ls.py`
Content:
```
1 # Copyright (c) 2009 Upi Tamminen <[email protected]>
2 # See the COPYRIGHT file for more information
3
4 from __future__ import absolute_import, division
5
6 import getopt
7 import os.path
8 import stat
9 import time
10
11 import cowrie.shell.fs as fs
12 from cowrie.shell.command import HoneyPotCommand
13 from cowrie.shell.pwd import Group, Passwd
14
15 commands = {}
16
17
18 class command_ls(HoneyPotCommand):
19
20 def uid2name(self, uid):
21 try:
22 return Passwd().getpwuid(uid)["pw_name"]
23 except Exception:
24 return str(uid)
25
26 def gid2name(self, gid):
27 try:
28 return Group().getgrgid(gid)["gr_name"]
29 except Exception:
30 return str(gid)
31
32 def call(self):
33 path = self.protocol.cwd
34 paths = []
35 self.showHidden = False
36 self.showDirectories = False
37 func = self.do_ls_normal
38
39 # Parse options or display no files
40 try:
41 opts, args = getopt.gnu_getopt(self.args, '1@ABCFGHLOPRSTUWabcdefghiklmnopqrstuvwx',
42 ['help', 'version', 'param'])
43 except getopt.GetoptError as err:
44 self.write("ls: {}\n".format(err))
45 self.write("Try 'ls --help' for more information.\n")
46 return
47
48 for x, a in opts:
49 if x in ('-l'):
50 func = self.do_ls_l
51 if x in ('-a'):
52 self.showHidden = True
53 if x in ('-d'):
54 self.showDirectories = True
55
56 for arg in args:
57 paths.append(self.protocol.fs.resolve_path(arg, self.protocol.cwd))
58
59 if not paths:
60 func(path)
61 else:
62 for path in paths:
63 func(path)
64
65 def get_dir_files(self, path):
66 try:
67 if self.protocol.fs.isdir(path) and not self.showDirectories:
68 files = self.protocol.fs.get_path(path)[:]
69 if self.showHidden:
70 dot = self.protocol.fs.getfile(path)[:]
71 dot[fs.A_NAME] = '.'
72 files.append(dot)
73 dotdot = self.protocol.fs.getfile(os.path.split(path)[0])[:]
74 if not dotdot:
75 dotdot = self.protocol.fs.getfile(path)[:]
76 dotdot[fs.A_NAME] = '..'
77 files.append(dotdot)
78 else:
79 files = [x for x in files if not x[fs.A_NAME].startswith('.')]
80 files.sort()
81 else:
82 files = (self.protocol.fs.getfile(path)[:],)
83 except Exception:
84 self.write(
85 'ls: cannot access %s: No such file or directory\n' % (path,))
86 return
87 return files
88
89 def do_ls_normal(self, path):
90 files = self.get_dir_files(path)
91
92 line = [x[fs.A_NAME] for x in files]
93 if not line:
94 return
95 count = 0
96 maxlen = max([len(x) for x in line])
97
98 try:
99 wincols = self.protocol.user.windowSize[1]
100 except AttributeError:
101 wincols = 80
102
103 perline = int(wincols / (maxlen + 1))
104 for f in line:
105 if count == perline:
106 count = 0
107 self.write('\n')
108 self.write(f.ljust(maxlen + 1))
109 count += 1
110 self.write('\n')
111
112 def do_ls_l(self, path):
113 files = self.get_dir_files(path)
114
115 largest = 0
116 if len(files):
117 largest = max([x[fs.A_SIZE] for x in files])
118
119 for file in files:
120 if file[fs.A_NAME].startswith('.') and not self.showHidden:
121 continue
122
123 perms = ['-'] * 10
124 if file[fs.A_MODE] & stat.S_IRUSR:
125 perms[1] = 'r'
126 if file[fs.A_MODE] & stat.S_IWUSR:
127 perms[2] = 'w'
128 if file[fs.A_MODE] & stat.S_IXUSR:
129 perms[3] = 'x'
130 if file[fs.A_MODE] & stat.S_ISUID:
131 perms[3] = 'S'
132 if file[fs.A_MODE] & stat.S_IXUSR and file[fs.A_MODE] & stat.S_ISUID:
133 perms[3] = 's'
134
135 if file[fs.A_MODE] & stat.S_IRGRP:
136 perms[4] = 'r'
137 if file[fs.A_MODE] & stat.S_IWGRP:
138 perms[5] = 'w'
139 if file[fs.A_MODE] & stat.S_IXGRP:
140 perms[6] = 'x'
141 if file[fs.A_MODE] & stat.S_ISGID:
142 perms[6] = 'S'
143 if file[fs.A_MODE] & stat.S_IXGRP and file[fs.A_MODE] & stat.S_ISGID:
144 perms[6] = 's'
145
146 if file[fs.A_MODE] & stat.S_IROTH:
147 perms[7] = 'r'
148 if file[fs.A_MODE] & stat.S_IWOTH:
149 perms[8] = 'w'
150 if file[fs.A_MODE] & stat.S_IXOTH:
151 perms[9] = 'x'
152 if file[fs.A_MODE] & stat.S_ISVTX:
153 perms[9] = 'T'
154 if file[fs.A_MODE] & stat.S_IXOTH and file[fs.A_MODE] & stat.S_ISVTX:
155 perms[9] = 't'
156
157 linktarget = ''
158
159 if file[fs.A_TYPE] == fs.T_DIR:
160 perms[0] = 'd'
161 elif file[fs.A_TYPE] == fs.T_LINK:
162 perms[0] = 'l'
163 linktarget = ' -> %s' % (file[fs.A_TARGET],)
164
165 perms = ''.join(perms)
166 ctime = time.localtime(file[fs.A_CTIME])
167
168 line = '%s 1 %s %s %s %s %s%s' % \
169 (perms,
170 self.uid2name(file[fs.A_UID]),
171 self.gid2name(file[fs.A_GID]),
172 str(file[fs.A_SIZE]).rjust(len(str(largest))),
173 time.strftime('%Y-%m-%d %H:%M', ctime),
174 file[fs.A_NAME],
175 linktarget)
176
177 self.write('{0}\n'.format(line))
178
179
180 commands['/bin/ls'] = command_ls
181 commands['ls'] = command_ls
182 commands['/bin/dir'] = command_ls
183 commands['dir'] = command_ls
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cowrie/commands/ls.py b/src/cowrie/commands/ls.py
--- a/src/cowrie/commands/ls.py
+++ b/src/cowrie/commands/ls.py
@@ -112,9 +112,17 @@
def do_ls_l(self, path):
files = self.get_dir_files(path)
- largest = 0
+ filesize_str_extent = 0
if len(files):
- largest = max([x[fs.A_SIZE] for x in files])
+ filesize_str_extent = max([len(str(x[fs.A_SIZE])) for x in files])
+
+ user_name_str_extent = 0
+ if len(files):
+ user_name_str_extent = max([len(self.uid2name(x[fs.A_UID])) for x in files])
+
+ group_name_str_extent = 0
+ if len(files):
+ group_name_str_extent = max([len(self.gid2name(x[fs.A_GID])) for x in files])
for file in files:
if file[fs.A_NAME].startswith('.') and not self.showHidden:
@@ -167,9 +175,9 @@
line = '%s 1 %s %s %s %s %s%s' % \
(perms,
- self.uid2name(file[fs.A_UID]),
- self.gid2name(file[fs.A_GID]),
- str(file[fs.A_SIZE]).rjust(len(str(largest))),
+ self.uid2name(file[fs.A_UID]).ljust(user_name_str_extent),
+ self.gid2name(file[fs.A_GID]).ljust(group_name_str_extent),
+ str(file[fs.A_SIZE]).rjust(filesize_str_extent),
time.strftime('%Y-%m-%d %H:%M', ctime),
file[fs.A_NAME],
linktarget)
| {"golden_diff": "diff --git a/src/cowrie/commands/ls.py b/src/cowrie/commands/ls.py\n--- a/src/cowrie/commands/ls.py\n+++ b/src/cowrie/commands/ls.py\n@@ -112,9 +112,17 @@\n def do_ls_l(self, path):\n files = self.get_dir_files(path)\n \n- largest = 0\n+ filesize_str_extent = 0\n if len(files):\n- largest = max([x[fs.A_SIZE] for x in files])\n+ filesize_str_extent = max([len(str(x[fs.A_SIZE])) for x in files])\n+\n+ user_name_str_extent = 0\n+ if len(files):\n+ user_name_str_extent = max([len(self.uid2name(x[fs.A_UID])) for x in files])\n+\n+ group_name_str_extent = 0\n+ if len(files):\n+ group_name_str_extent = max([len(self.gid2name(x[fs.A_GID])) for x in files])\n \n for file in files:\n if file[fs.A_NAME].startswith('.') and not self.showHidden:\n@@ -167,9 +175,9 @@\n \n line = '%s 1 %s %s %s %s %s%s' % \\\n (perms,\n- self.uid2name(file[fs.A_UID]),\n- self.gid2name(file[fs.A_GID]),\n- str(file[fs.A_SIZE]).rjust(len(str(largest))),\n+ self.uid2name(file[fs.A_UID]).ljust(user_name_str_extent),\n+ self.gid2name(file[fs.A_GID]).ljust(group_name_str_extent),\n+ str(file[fs.A_SIZE]).rjust(filesize_str_extent),\n time.strftime('%Y-%m-%d %H:%M', ctime),\n file[fs.A_NAME],\n linktarget)\n", "issue": "`ls -l` user/group names need justification\n`ls -l` does not properly pad the user/group names\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behaviour:\r\n1. ssh into a cowrie instance\r\n2. `ls -l` on a directory with more than one user/group\r\n3. the user names and group names don't line up between files\r\n\r\n**Expected behaviour**\r\nNice justified columns of user/group names\r\n\n", "before_files": [{"content": "# Copyright (c) 2009 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\nfrom __future__ import absolute_import, division\n\nimport getopt\nimport os.path\nimport stat\nimport time\n\nimport cowrie.shell.fs as fs\nfrom cowrie.shell.command import HoneyPotCommand\nfrom cowrie.shell.pwd import Group, Passwd\n\ncommands = {}\n\n\nclass command_ls(HoneyPotCommand):\n\n def uid2name(self, uid):\n try:\n return Passwd().getpwuid(uid)[\"pw_name\"]\n except Exception:\n return str(uid)\n\n def gid2name(self, gid):\n try:\n return Group().getgrgid(gid)[\"gr_name\"]\n except Exception:\n return str(gid)\n\n def call(self):\n path = self.protocol.cwd\n paths = []\n self.showHidden = False\n self.showDirectories = False\n func = self.do_ls_normal\n\n # Parse options or display no files\n try:\n opts, args = getopt.gnu_getopt(self.args, '1@ABCFGHLOPRSTUWabcdefghiklmnopqrstuvwx',\n ['help', 'version', 'param'])\n except getopt.GetoptError as err:\n self.write(\"ls: {}\\n\".format(err))\n self.write(\"Try 'ls --help' for more information.\\n\")\n return\n\n for x, a in opts:\n if x in ('-l'):\n func = self.do_ls_l\n if x in ('-a'):\n self.showHidden = True\n if x in ('-d'):\n self.showDirectories = True\n\n for arg in args:\n paths.append(self.protocol.fs.resolve_path(arg, self.protocol.cwd))\n\n if not paths:\n func(path)\n else:\n for path in paths:\n func(path)\n\n def get_dir_files(self, path):\n try:\n if self.protocol.fs.isdir(path) and not self.showDirectories:\n files = self.protocol.fs.get_path(path)[:]\n if self.showHidden:\n dot = self.protocol.fs.getfile(path)[:]\n dot[fs.A_NAME] = '.'\n files.append(dot)\n dotdot = self.protocol.fs.getfile(os.path.split(path)[0])[:]\n if not dotdot:\n dotdot = self.protocol.fs.getfile(path)[:]\n dotdot[fs.A_NAME] = '..'\n files.append(dotdot)\n else:\n files = [x for x in files if not x[fs.A_NAME].startswith('.')]\n files.sort()\n else:\n files = (self.protocol.fs.getfile(path)[:],)\n except Exception:\n self.write(\n 'ls: cannot access %s: No such file or directory\\n' % (path,))\n return\n return files\n\n def do_ls_normal(self, path):\n files = self.get_dir_files(path)\n\n line = [x[fs.A_NAME] for x in files]\n if not line:\n return\n count = 0\n maxlen = max([len(x) for x in line])\n\n try:\n wincols = self.protocol.user.windowSize[1]\n except AttributeError:\n wincols = 80\n\n perline = int(wincols / (maxlen + 1))\n for f in line:\n if count == perline:\n count = 0\n self.write('\\n')\n self.write(f.ljust(maxlen + 1))\n count += 1\n self.write('\\n')\n\n def do_ls_l(self, path):\n files = self.get_dir_files(path)\n\n largest = 0\n if len(files):\n largest = max([x[fs.A_SIZE] for x in files])\n\n for file in files:\n if file[fs.A_NAME].startswith('.') and not self.showHidden:\n continue\n\n perms = ['-'] * 10\n if file[fs.A_MODE] & stat.S_IRUSR:\n perms[1] = 'r'\n if file[fs.A_MODE] & stat.S_IWUSR:\n perms[2] = 'w'\n if file[fs.A_MODE] & stat.S_IXUSR:\n perms[3] = 'x'\n if file[fs.A_MODE] & stat.S_ISUID:\n perms[3] = 'S'\n if file[fs.A_MODE] & stat.S_IXUSR and file[fs.A_MODE] & stat.S_ISUID:\n perms[3] = 's'\n\n if file[fs.A_MODE] & stat.S_IRGRP:\n perms[4] = 'r'\n if file[fs.A_MODE] & stat.S_IWGRP:\n perms[5] = 'w'\n if file[fs.A_MODE] & stat.S_IXGRP:\n perms[6] = 'x'\n if file[fs.A_MODE] & stat.S_ISGID:\n perms[6] = 'S'\n if file[fs.A_MODE] & stat.S_IXGRP and file[fs.A_MODE] & stat.S_ISGID:\n perms[6] = 's'\n\n if file[fs.A_MODE] & stat.S_IROTH:\n perms[7] = 'r'\n if file[fs.A_MODE] & stat.S_IWOTH:\n perms[8] = 'w'\n if file[fs.A_MODE] & stat.S_IXOTH:\n perms[9] = 'x'\n if file[fs.A_MODE] & stat.S_ISVTX:\n perms[9] = 'T'\n if file[fs.A_MODE] & stat.S_IXOTH and file[fs.A_MODE] & stat.S_ISVTX:\n perms[9] = 't'\n\n linktarget = ''\n\n if file[fs.A_TYPE] == fs.T_DIR:\n perms[0] = 'd'\n elif file[fs.A_TYPE] == fs.T_LINK:\n perms[0] = 'l'\n linktarget = ' -> %s' % (file[fs.A_TARGET],)\n\n perms = ''.join(perms)\n ctime = time.localtime(file[fs.A_CTIME])\n\n line = '%s 1 %s %s %s %s %s%s' % \\\n (perms,\n self.uid2name(file[fs.A_UID]),\n self.gid2name(file[fs.A_GID]),\n str(file[fs.A_SIZE]).rjust(len(str(largest))),\n time.strftime('%Y-%m-%d %H:%M', ctime),\n file[fs.A_NAME],\n linktarget)\n\n self.write('{0}\\n'.format(line))\n\n\ncommands['/bin/ls'] = command_ls\ncommands['ls'] = command_ls\ncommands['/bin/dir'] = command_ls\ncommands['dir'] = command_ls\n", "path": "src/cowrie/commands/ls.py"}], "after_files": [{"content": "# Copyright (c) 2009 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\nfrom __future__ import absolute_import, division\n\nimport getopt\nimport os.path\nimport stat\nimport time\n\nimport cowrie.shell.fs as fs\nfrom cowrie.shell.command import HoneyPotCommand\nfrom cowrie.shell.pwd import Group, Passwd\n\ncommands = {}\n\n\nclass command_ls(HoneyPotCommand):\n\n def uid2name(self, uid):\n try:\n return Passwd().getpwuid(uid)[\"pw_name\"]\n except Exception:\n return str(uid)\n\n def gid2name(self, gid):\n try:\n return Group().getgrgid(gid)[\"gr_name\"]\n except Exception:\n return str(gid)\n\n def call(self):\n path = self.protocol.cwd\n paths = []\n self.showHidden = False\n self.showDirectories = False\n func = self.do_ls_normal\n\n # Parse options or display no files\n try:\n opts, args = getopt.gnu_getopt(self.args, '1@ABCFGHLOPRSTUWabcdefghiklmnopqrstuvwx',\n ['help', 'version', 'param'])\n except getopt.GetoptError as err:\n self.write(\"ls: {}\\n\".format(err))\n self.write(\"Try 'ls --help' for more information.\\n\")\n return\n\n for x, a in opts:\n if x in ('-l'):\n func = self.do_ls_l\n if x in ('-a'):\n self.showHidden = True\n if x in ('-d'):\n self.showDirectories = True\n\n for arg in args:\n paths.append(self.protocol.fs.resolve_path(arg, self.protocol.cwd))\n\n if not paths:\n func(path)\n else:\n for path in paths:\n func(path)\n\n def get_dir_files(self, path):\n try:\n if self.protocol.fs.isdir(path) and not self.showDirectories:\n files = self.protocol.fs.get_path(path)[:]\n if self.showHidden:\n dot = self.protocol.fs.getfile(path)[:]\n dot[fs.A_NAME] = '.'\n files.append(dot)\n dotdot = self.protocol.fs.getfile(os.path.split(path)[0])[:]\n if not dotdot:\n dotdot = self.protocol.fs.getfile(path)[:]\n dotdot[fs.A_NAME] = '..'\n files.append(dotdot)\n else:\n files = [x for x in files if not x[fs.A_NAME].startswith('.')]\n files.sort()\n else:\n files = (self.protocol.fs.getfile(path)[:],)\n except Exception:\n self.write(\n 'ls: cannot access %s: No such file or directory\\n' % (path,))\n return\n return files\n\n def do_ls_normal(self, path):\n files = self.get_dir_files(path)\n\n line = [x[fs.A_NAME] for x in files]\n if not line:\n return\n count = 0\n maxlen = max([len(x) for x in line])\n\n try:\n wincols = self.protocol.user.windowSize[1]\n except AttributeError:\n wincols = 80\n\n perline = int(wincols / (maxlen + 1))\n for f in line:\n if count == perline:\n count = 0\n self.write('\\n')\n self.write(f.ljust(maxlen + 1))\n count += 1\n self.write('\\n')\n\n def do_ls_l(self, path):\n files = self.get_dir_files(path)\n\n filesize_str_extent = 0\n if len(files):\n filesize_str_extent = max([len(str(x[fs.A_SIZE])) for x in files])\n\n user_name_str_extent = 0\n if len(files):\n user_name_str_extent = max([len(self.uid2name(x[fs.A_UID])) for x in files])\n\n group_name_str_extent = 0\n if len(files):\n group_name_str_extent = max([len(self.gid2name(x[fs.A_GID])) for x in files])\n\n for file in files:\n if file[fs.A_NAME].startswith('.') and not self.showHidden:\n continue\n\n perms = ['-'] * 10\n if file[fs.A_MODE] & stat.S_IRUSR:\n perms[1] = 'r'\n if file[fs.A_MODE] & stat.S_IWUSR:\n perms[2] = 'w'\n if file[fs.A_MODE] & stat.S_IXUSR:\n perms[3] = 'x'\n if file[fs.A_MODE] & stat.S_ISUID:\n perms[3] = 'S'\n if file[fs.A_MODE] & stat.S_IXUSR and file[fs.A_MODE] & stat.S_ISUID:\n perms[3] = 's'\n\n if file[fs.A_MODE] & stat.S_IRGRP:\n perms[4] = 'r'\n if file[fs.A_MODE] & stat.S_IWGRP:\n perms[5] = 'w'\n if file[fs.A_MODE] & stat.S_IXGRP:\n perms[6] = 'x'\n if file[fs.A_MODE] & stat.S_ISGID:\n perms[6] = 'S'\n if file[fs.A_MODE] & stat.S_IXGRP and file[fs.A_MODE] & stat.S_ISGID:\n perms[6] = 's'\n\n if file[fs.A_MODE] & stat.S_IROTH:\n perms[7] = 'r'\n if file[fs.A_MODE] & stat.S_IWOTH:\n perms[8] = 'w'\n if file[fs.A_MODE] & stat.S_IXOTH:\n perms[9] = 'x'\n if file[fs.A_MODE] & stat.S_ISVTX:\n perms[9] = 'T'\n if file[fs.A_MODE] & stat.S_IXOTH and file[fs.A_MODE] & stat.S_ISVTX:\n perms[9] = 't'\n\n linktarget = ''\n\n if file[fs.A_TYPE] == fs.T_DIR:\n perms[0] = 'd'\n elif file[fs.A_TYPE] == fs.T_LINK:\n perms[0] = 'l'\n linktarget = ' -> %s' % (file[fs.A_TARGET],)\n\n perms = ''.join(perms)\n ctime = time.localtime(file[fs.A_CTIME])\n\n line = '%s 1 %s %s %s %s %s%s' % \\\n (perms,\n self.uid2name(file[fs.A_UID]).ljust(user_name_str_extent),\n self.gid2name(file[fs.A_GID]).ljust(group_name_str_extent),\n str(file[fs.A_SIZE]).rjust(filesize_str_extent),\n time.strftime('%Y-%m-%d %H:%M', ctime),\n file[fs.A_NAME],\n linktarget)\n\n self.write('{0}\\n'.format(line))\n\n\ncommands['/bin/ls'] = command_ls\ncommands['ls'] = command_ls\ncommands['/bin/dir'] = command_ls\ncommands['dir'] = command_ls\n", "path": "src/cowrie/commands/ls.py"}]} | 2,268 | 416 |
gh_patches_debug_9897 | rasdani/github-patches | git_diff | freedomofpress__securedrop-4931 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[1.1.0-rc4] "Unable to create virtualenv. Check network settings and try again"
(Tested on a Tails 3.16 Admin Workstation by checking out 1.1.0-rc4 tag, without updating my servers.)
As expected, running `securedrop-admin` commands triggered the "run setup" step. However, the `securedrop-admin setup` step itself did not complete successfully; it went pretty far along but finally failed with this error:
"Unable to create virtualenv. Check network settings and try again"
Tor seems to be working fine. Possibly intermittent issues but good to warn users about and have mitigation instructions if it is likely to arise during updates.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `admin/bootstrap.py`
Content:
```
1 # -*- mode: python; coding: utf-8 -*-
2 #
3 # Copyright (C) 2013-2018 Freedom of the Press Foundation & al
4 # Copyright (C) 2018 Loic Dachary <[email protected]>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19
20 import argparse
21 import logging
22 import os
23 import shutil
24 import subprocess
25 import sys
26
27 sdlog = logging.getLogger(__name__)
28
29 DIR = os.path.dirname(os.path.realpath(__file__))
30 VENV_DIR = os.path.join(DIR, ".venv3")
31
32
33 def setup_logger(verbose=False):
34 """ Configure logging handler """
35 # Set default level on parent
36 sdlog.setLevel(logging.DEBUG)
37 level = logging.DEBUG if verbose else logging.INFO
38
39 stdout = logging.StreamHandler(sys.stdout)
40 stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
41 stdout.setLevel(level)
42 sdlog.addHandler(stdout)
43
44
45 def run_command(command):
46 """
47 Wrapper function to display stdout for running command,
48 similar to how shelling out in a Bash script displays rolling output.
49
50 Yields a list of the stdout from the `command`, and raises a
51 CalledProcessError if `command` returns non-zero.
52 """
53 popen = subprocess.Popen(command,
54 stdout=subprocess.PIPE,
55 stderr=subprocess.STDOUT)
56 for stdout_line in iter(popen.stdout.readline, b""):
57 yield stdout_line
58 popen.stdout.close()
59 return_code = popen.wait()
60 if return_code:
61 raise subprocess.CalledProcessError(return_code, command)
62
63
64 def is_tails():
65 try:
66 id = subprocess.check_output('lsb_release --id --short',
67 shell=True).strip()
68 except subprocess.CalledProcessError:
69 id = None
70
71 # dirty hack to unreliably detect Tails 4.0~beta2
72 if id == b'Debian':
73 if os.uname()[1] == 'amnesia':
74 id = 'Tails'
75
76 return id == 'Tails'
77
78
79 def clean_up_tails3_venv(virtualenv_dir=VENV_DIR):
80 """
81 Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is
82 based on Debian Buster and uses libpython3.7. This means that the Tails 3.x
83 virtualenv will not work under Tails 4.x, and will need to be destroyed and
84 rebuilt. We can detect if the version of libpython is 3.5 in the
85 admin/.venv3/ folder, and delete it if that's the case. This will ensure a
86 smooth upgrade from Tails 3.x to Tails 4.x.
87 """
88 if is_tails():
89 try:
90 dist = subprocess.check_output('lsb_release --codename --short',
91 shell=True).strip()
92 except subprocess.CalledProcessError:
93 dist = None
94
95 # tails4 is based on buster
96 if dist == b'buster':
97 python_lib_path = os.path.join(virtualenv_dir, "lib/python3.5")
98 if os.path.exists(os.path.join(python_lib_path)):
99 sdlog.info(
100 "Tails 3 Python 3 virtualenv detected. "
101 "Removing it."
102 )
103 shutil.rmtree(virtualenv_dir)
104 sdlog.info("Tails 3 Python 3 virtualenv deleted.")
105
106
107 def checkenv(args):
108 clean_up_tails3_venv(VENV_DIR)
109 if not os.path.exists(os.path.join(VENV_DIR, "bin/activate")):
110 sdlog.error('Please run "securedrop-admin setup".')
111 sys.exit(1)
112
113
114 def maybe_torify():
115 if is_tails():
116 return ['torify']
117 else:
118 return []
119
120
121 def install_apt_dependencies(args):
122 """
123 Install apt dependencies in Tails. In order to install Ansible in
124 a virtualenv, first there are a number of Python prerequisites.
125 """
126 sdlog.info("Installing SecureDrop Admin dependencies")
127 sdlog.info(("You'll be prompted for the temporary Tails admin password,"
128 " which was set on Tails login screen"))
129
130 apt_command = ['sudo', 'su', '-c',
131 "apt-get update && \
132 apt-get -q -o=Dpkg::Use-Pty=0 install -y \
133 python3-virtualenv \
134 python3-yaml \
135 python3-pip \
136 ccontrol \
137 virtualenv \
138 libffi-dev \
139 libssl-dev \
140 libpython3-dev",
141 ]
142
143 try:
144 # Print command results in real-time, to keep Admin apprised
145 # of progress during long-running command.
146 for output_line in run_command(apt_command):
147 print(output_line.decode('utf-8').rstrip())
148 except subprocess.CalledProcessError:
149 # Tails supports apt persistence, which was used by SecureDrop
150 # under Tails 2.x. If updates are being applied, don't try to pile
151 # on with more apt requests.
152 sdlog.error(("Failed to install apt dependencies. Check network"
153 " connection and try again."))
154 raise
155
156
157 def envsetup(args):
158 """Installs Admin tooling required for managing SecureDrop. Specifically:
159
160 * updates apt-cache
161 * installs apt packages for Python virtualenv
162 * creates virtualenv
163 * installs pip packages inside virtualenv
164
165 The virtualenv is created within the Persistence volume in Tails, so that
166 Ansible is available to the Admin on subsequent boots without requiring
167 installation of packages again.
168 """
169 # clean up tails 3.x venv when migrating to tails 4.x
170 clean_up_tails3_venv(VENV_DIR)
171
172 # virtualenv doesnt exist? Install dependencies and create
173 if not os.path.exists(VENV_DIR):
174
175 install_apt_dependencies(args)
176
177 # Technically you can create a virtualenv from within python
178 # but pip can only be run over tor on tails, and debugging that
179 # along with instaling a third-party dependency is not worth
180 # the effort here.
181 sdlog.info("Setting up virtualenv")
182 try:
183 sdlog.debug(subprocess.check_output(
184 maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],
185 stderr=subprocess.STDOUT))
186 except subprocess.CalledProcessError as e:
187 sdlog.debug(e.output)
188 sdlog.error(("Unable to create virtualenv. Check network settings"
189 " and try again."))
190 raise
191 else:
192 sdlog.info("Virtualenv already exists, not creating")
193
194 install_pip_dependencies(args)
195 if os.path.exists(os.path.join(DIR, 'setup.py')):
196 install_pip_self(args)
197
198 sdlog.info("Finished installing SecureDrop dependencies")
199
200
201 def install_pip_self(args):
202 pip_install_cmd = [
203 os.path.join(VENV_DIR, 'bin', 'pip3'),
204 'install', '-e', DIR
205 ]
206 try:
207 subprocess.check_output(maybe_torify() + pip_install_cmd,
208 stderr=subprocess.STDOUT)
209 except subprocess.CalledProcessError as e:
210 sdlog.debug(e.output)
211 sdlog.error("Unable to install self, run with -v for more information")
212 raise
213
214
215 def install_pip_dependencies(args, pip_install_cmd=[
216 os.path.join(VENV_DIR, 'bin', 'pip3'),
217 'install',
218 # Specify requirements file.
219 '-r', os.path.join(DIR, 'requirements.txt'),
220 '--require-hashes',
221 # Make sure to upgrade packages only if necessary.
222 '-U', '--upgrade-strategy', 'only-if-needed',
223 ]):
224 """
225 Install Python dependencies via pip into virtualenv.
226 """
227
228 sdlog.info("Checking Python dependencies for securedrop-admin")
229 try:
230 pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,
231 stderr=subprocess.STDOUT)
232 except subprocess.CalledProcessError as e:
233 sdlog.debug(e.output)
234 sdlog.error(("Failed to install pip dependencies. Check network"
235 " connection and try again."))
236 raise
237
238 sdlog.debug(pip_output)
239 if "Successfully installed" in str(pip_output):
240 sdlog.info("Python dependencies for securedrop-admin upgraded")
241 else:
242 sdlog.info("Python dependencies for securedrop-admin are up-to-date")
243
244
245 def parse_argv(argv):
246 parser = argparse.ArgumentParser()
247 parser.add_argument('-v', action='store_true', default=False,
248 help="Increase verbosity on output")
249 parser.set_defaults(func=envsetup)
250
251 subparsers = parser.add_subparsers()
252
253 envsetup_parser = subparsers.add_parser(
254 'envsetup',
255 help='Set up the admin virtualenv.'
256 )
257 envsetup_parser.set_defaults(func=envsetup)
258
259 checkenv_parser = subparsers.add_parser(
260 'checkenv',
261 help='Check that the admin virtualenv is properly set up.'
262 )
263 checkenv_parser.set_defaults(func=checkenv)
264
265 return parser.parse_args(argv)
266
267
268 if __name__ == "__main__":
269 args = parse_argv(sys.argv[1:])
270 setup_logger(args.v)
271
272 try:
273 args.func(args)
274 except Exception:
275 sys.exit(1)
276 else:
277 sys.exit(0)
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/admin/bootstrap.py b/admin/bootstrap.py
--- a/admin/bootstrap.py
+++ b/admin/bootstrap.py
@@ -64,12 +64,12 @@
def is_tails():
try:
id = subprocess.check_output('lsb_release --id --short',
- shell=True).strip()
+ shell=True).decode('utf-8').strip()
except subprocess.CalledProcessError:
id = None
# dirty hack to unreliably detect Tails 4.0~beta2
- if id == b'Debian':
+ if id == 'Debian':
if os.uname()[1] == 'amnesia':
id = 'Tails'
| {"golden_diff": "diff --git a/admin/bootstrap.py b/admin/bootstrap.py\n--- a/admin/bootstrap.py\n+++ b/admin/bootstrap.py\n@@ -64,12 +64,12 @@\n def is_tails():\n try:\n id = subprocess.check_output('lsb_release --id --short',\n- shell=True).strip()\n+ shell=True).decode('utf-8').strip()\n except subprocess.CalledProcessError:\n id = None\n \n # dirty hack to unreliably detect Tails 4.0~beta2\n- if id == b'Debian':\n+ if id == 'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n", "issue": "[1.1.0-rc4] \"Unable to create virtualenv. Check network settings and try again\"\n(Tested on a Tails 3.16 Admin Workstation by checking out 1.1.0-rc4 tag, without updating my servers.)\r\n\r\nAs expected, running `securedrop-admin` commands triggered the \"run setup\" step. However, the `securedrop-admin setup` step itself did not complete successfully; it went pretty far along but finally failed with this error:\r\n\r\n\"Unable to create virtualenv. Check network settings and try again\"\r\n\r\nTor seems to be working fine. Possibly intermittent issues but good to warn users about and have mitigation instructions if it is likely to arise during updates.\n", "before_files": [{"content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport argparse\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nsdlog = logging.getLogger(__name__)\n\nDIR = os.path.dirname(os.path.realpath(__file__))\nVENV_DIR = os.path.join(DIR, \".venv3\")\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef run_command(command):\n \"\"\"\n Wrapper function to display stdout for running command,\n similar to how shelling out in a Bash script displays rolling output.\n\n Yields a list of the stdout from the `command`, and raises a\n CalledProcessError if `command` returns non-zero.\n \"\"\"\n popen = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n for stdout_line in iter(popen.stdout.readline, b\"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, command)\n\n\ndef is_tails():\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n\n # dirty hack to unreliably detect Tails 4.0~beta2\n if id == b'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n\n return id == 'Tails'\n\n\ndef clean_up_tails3_venv(virtualenv_dir=VENV_DIR):\n \"\"\"\n Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is\n based on Debian Buster and uses libpython3.7. This means that the Tails 3.x\n virtualenv will not work under Tails 4.x, and will need to be destroyed and\n rebuilt. We can detect if the version of libpython is 3.5 in the\n admin/.venv3/ folder, and delete it if that's the case. This will ensure a\n smooth upgrade from Tails 3.x to Tails 4.x.\n \"\"\"\n if is_tails():\n try:\n dist = subprocess.check_output('lsb_release --codename --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n dist = None\n\n # tails4 is based on buster\n if dist == b'buster':\n python_lib_path = os.path.join(virtualenv_dir, \"lib/python3.5\")\n if os.path.exists(os.path.join(python_lib_path)):\n sdlog.info(\n \"Tails 3 Python 3 virtualenv detected. \"\n \"Removing it.\"\n )\n shutil.rmtree(virtualenv_dir)\n sdlog.info(\"Tails 3 Python 3 virtualenv deleted.\")\n\n\ndef checkenv(args):\n clean_up_tails3_venv(VENV_DIR)\n if not os.path.exists(os.path.join(VENV_DIR, \"bin/activate\")):\n sdlog.error('Please run \"securedrop-admin setup\".')\n sys.exit(1)\n\n\ndef maybe_torify():\n if is_tails():\n return ['torify']\n else:\n return []\n\n\ndef install_apt_dependencies(args):\n \"\"\"\n Install apt dependencies in Tails. In order to install Ansible in\n a virtualenv, first there are a number of Python prerequisites.\n \"\"\"\n sdlog.info(\"Installing SecureDrop Admin dependencies\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n\n apt_command = ['sudo', 'su', '-c',\n \"apt-get update && \\\n apt-get -q -o=Dpkg::Use-Pty=0 install -y \\\n python3-virtualenv \\\n python3-yaml \\\n python3-pip \\\n ccontrol \\\n virtualenv \\\n libffi-dev \\\n libssl-dev \\\n libpython3-dev\",\n ]\n\n try:\n # Print command results in real-time, to keep Admin apprised\n # of progress during long-running command.\n for output_line in run_command(apt_command):\n print(output_line.decode('utf-8').rstrip())\n except subprocess.CalledProcessError:\n # Tails supports apt persistence, which was used by SecureDrop\n # under Tails 2.x. If updates are being applied, don't try to pile\n # on with more apt requests.\n sdlog.error((\"Failed to install apt dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n\ndef envsetup(args):\n \"\"\"Installs Admin tooling required for managing SecureDrop. Specifically:\n\n * updates apt-cache\n * installs apt packages for Python virtualenv\n * creates virtualenv\n * installs pip packages inside virtualenv\n\n The virtualenv is created within the Persistence volume in Tails, so that\n Ansible is available to the Admin on subsequent boots without requiring\n installation of packages again.\n \"\"\"\n # clean up tails 3.x venv when migrating to tails 4.x\n clean_up_tails3_venv(VENV_DIR)\n\n # virtualenv doesnt exist? Install dependencies and create\n if not os.path.exists(VENV_DIR):\n\n install_apt_dependencies(args)\n\n # Technically you can create a virtualenv from within python\n # but pip can only be run over tor on tails, and debugging that\n # along with instaling a third-party dependency is not worth\n # the effort here.\n sdlog.info(\"Setting up virtualenv\")\n try:\n sdlog.debug(subprocess.check_output(\n maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Unable to create virtualenv. Check network settings\"\n \" and try again.\"))\n raise\n else:\n sdlog.info(\"Virtualenv already exists, not creating\")\n\n install_pip_dependencies(args)\n if os.path.exists(os.path.join(DIR, 'setup.py')):\n install_pip_self(args)\n\n sdlog.info(\"Finished installing SecureDrop dependencies\")\n\n\ndef install_pip_self(args):\n pip_install_cmd = [\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install', '-e', DIR\n ]\n try:\n subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error(\"Unable to install self, run with -v for more information\")\n raise\n\n\ndef install_pip_dependencies(args, pip_install_cmd=[\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install',\n # Specify requirements file.\n '-r', os.path.join(DIR, 'requirements.txt'),\n '--require-hashes',\n # Make sure to upgrade packages only if necessary.\n '-U', '--upgrade-strategy', 'only-if-needed',\n]):\n \"\"\"\n Install Python dependencies via pip into virtualenv.\n \"\"\"\n\n sdlog.info(\"Checking Python dependencies for securedrop-admin\")\n try:\n pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Failed to install pip dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n sdlog.debug(pip_output)\n if \"Successfully installed\" in str(pip_output):\n sdlog.info(\"Python dependencies for securedrop-admin upgraded\")\n else:\n sdlog.info(\"Python dependencies for securedrop-admin are up-to-date\")\n\n\ndef parse_argv(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.set_defaults(func=envsetup)\n\n subparsers = parser.add_subparsers()\n\n envsetup_parser = subparsers.add_parser(\n 'envsetup',\n help='Set up the admin virtualenv.'\n )\n envsetup_parser.set_defaults(func=envsetup)\n\n checkenv_parser = subparsers.add_parser(\n 'checkenv',\n help='Check that the admin virtualenv is properly set up.'\n )\n checkenv_parser.set_defaults(func=checkenv)\n\n return parser.parse_args(argv)\n\n\nif __name__ == \"__main__\":\n args = parse_argv(sys.argv[1:])\n setup_logger(args.v)\n\n try:\n args.func(args)\n except Exception:\n sys.exit(1)\n else:\n sys.exit(0)\n", "path": "admin/bootstrap.py"}], "after_files": [{"content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport argparse\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nsdlog = logging.getLogger(__name__)\n\nDIR = os.path.dirname(os.path.realpath(__file__))\nVENV_DIR = os.path.join(DIR, \".venv3\")\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef run_command(command):\n \"\"\"\n Wrapper function to display stdout for running command,\n similar to how shelling out in a Bash script displays rolling output.\n\n Yields a list of the stdout from the `command`, and raises a\n CalledProcessError if `command` returns non-zero.\n \"\"\"\n popen = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n for stdout_line in iter(popen.stdout.readline, b\"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, command)\n\n\ndef is_tails():\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).decode('utf-8').strip()\n except subprocess.CalledProcessError:\n id = None\n\n # dirty hack to unreliably detect Tails 4.0~beta2\n if id == 'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n\n return id == 'Tails'\n\n\ndef clean_up_tails3_venv(virtualenv_dir=VENV_DIR):\n \"\"\"\n Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is\n based on Debian Buster and uses libpython3.7. This means that the Tails 3.x\n virtualenv will not work under Tails 4.x, and will need to be destroyed and\n rebuilt. We can detect if the version of libpython is 3.5 in the\n admin/.venv3/ folder, and delete it if that's the case. This will ensure a\n smooth upgrade from Tails 3.x to Tails 4.x.\n \"\"\"\n if is_tails():\n try:\n dist = subprocess.check_output('lsb_release --codename --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n dist = None\n\n # tails4 is based on buster\n if dist == b'buster':\n python_lib_path = os.path.join(virtualenv_dir, \"lib/python3.5\")\n if os.path.exists(os.path.join(python_lib_path)):\n sdlog.info(\n \"Tails 3 Python 3 virtualenv detected. \"\n \"Removing it.\"\n )\n shutil.rmtree(virtualenv_dir)\n sdlog.info(\"Tails 3 Python 3 virtualenv deleted.\")\n\n\ndef checkenv(args):\n clean_up_tails3_venv(VENV_DIR)\n if not os.path.exists(os.path.join(VENV_DIR, \"bin/activate\")):\n sdlog.error('Please run \"securedrop-admin setup\".')\n sys.exit(1)\n\n\ndef maybe_torify():\n if is_tails():\n return ['torify']\n else:\n return []\n\n\ndef install_apt_dependencies(args):\n \"\"\"\n Install apt dependencies in Tails. In order to install Ansible in\n a virtualenv, first there are a number of Python prerequisites.\n \"\"\"\n sdlog.info(\"Installing SecureDrop Admin dependencies\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n\n apt_command = ['sudo', 'su', '-c',\n \"apt-get update && \\\n apt-get -q -o=Dpkg::Use-Pty=0 install -y \\\n python3-virtualenv \\\n python3-yaml \\\n python3-pip \\\n ccontrol \\\n virtualenv \\\n libffi-dev \\\n libssl-dev \\\n libpython3-dev\",\n ]\n\n try:\n # Print command results in real-time, to keep Admin apprised\n # of progress during long-running command.\n for output_line in run_command(apt_command):\n print(output_line.decode('utf-8').rstrip())\n except subprocess.CalledProcessError:\n # Tails supports apt persistence, which was used by SecureDrop\n # under Tails 2.x. If updates are being applied, don't try to pile\n # on with more apt requests.\n sdlog.error((\"Failed to install apt dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n\ndef envsetup(args):\n \"\"\"Installs Admin tooling required for managing SecureDrop. Specifically:\n\n * updates apt-cache\n * installs apt packages for Python virtualenv\n * creates virtualenv\n * installs pip packages inside virtualenv\n\n The virtualenv is created within the Persistence volume in Tails, so that\n Ansible is available to the Admin on subsequent boots without requiring\n installation of packages again.\n \"\"\"\n # clean up tails 3.x venv when migrating to tails 4.x\n clean_up_tails3_venv(VENV_DIR)\n\n # virtualenv doesnt exist? Install dependencies and create\n if not os.path.exists(VENV_DIR):\n\n install_apt_dependencies(args)\n\n # Technically you can create a virtualenv from within python\n # but pip can only be run over tor on tails, and debugging that\n # along with instaling a third-party dependency is not worth\n # the effort here.\n sdlog.info(\"Setting up virtualenv\")\n try:\n sdlog.debug(subprocess.check_output(\n maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Unable to create virtualenv. Check network settings\"\n \" and try again.\"))\n raise\n else:\n sdlog.info(\"Virtualenv already exists, not creating\")\n\n install_pip_dependencies(args)\n if os.path.exists(os.path.join(DIR, 'setup.py')):\n install_pip_self(args)\n\n sdlog.info(\"Finished installing SecureDrop dependencies\")\n\n\ndef install_pip_self(args):\n pip_install_cmd = [\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install', '-e', DIR\n ]\n try:\n subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error(\"Unable to install self, run with -v for more information\")\n raise\n\n\ndef install_pip_dependencies(args, pip_install_cmd=[\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install',\n # Specify requirements file.\n '-r', os.path.join(DIR, 'requirements.txt'),\n '--require-hashes',\n # Make sure to upgrade packages only if necessary.\n '-U', '--upgrade-strategy', 'only-if-needed',\n]):\n \"\"\"\n Install Python dependencies via pip into virtualenv.\n \"\"\"\n\n sdlog.info(\"Checking Python dependencies for securedrop-admin\")\n try:\n pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Failed to install pip dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n sdlog.debug(pip_output)\n if \"Successfully installed\" in str(pip_output):\n sdlog.info(\"Python dependencies for securedrop-admin upgraded\")\n else:\n sdlog.info(\"Python dependencies for securedrop-admin are up-to-date\")\n\n\ndef parse_argv(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.set_defaults(func=envsetup)\n\n subparsers = parser.add_subparsers()\n\n envsetup_parser = subparsers.add_parser(\n 'envsetup',\n help='Set up the admin virtualenv.'\n )\n envsetup_parser.set_defaults(func=envsetup)\n\n checkenv_parser = subparsers.add_parser(\n 'checkenv',\n help='Check that the admin virtualenv is properly set up.'\n )\n checkenv_parser.set_defaults(func=checkenv)\n\n return parser.parse_args(argv)\n\n\nif __name__ == \"__main__\":\n args = parse_argv(sys.argv[1:])\n setup_logger(args.v)\n\n try:\n args.func(args)\n except Exception:\n sys.exit(1)\n else:\n sys.exit(0)\n", "path": "admin/bootstrap.py"}]} | 3,284 | 152 |
gh_patches_debug_2042 | rasdani/github-patches | git_diff | aws__aws-cli-357 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip install awscli fails
I tried `pip install awscli` from https://github.com/aws/aws-cli/blob/develop/README.rst and failed:
http://sprunge.us/NfbW
/home/hendry/.pip/pip.log = http://ix.io/7SC
Hilarious how bad Python packaging is. I'm running Archlinux with Python 3.3.2.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import sys
3
4 from setuptools import setup, find_packages
5
6 import awscli
7
8
9 requires = ['botocore>=0.16.0,<0.17.0',
10 'bcdoc>=0.9.0,<0.10.0',
11 'six>=1.1.0',
12 'colorama==0.2.5',
13 'docutils>=0.10',
14 'rsa==3.1.1']
15
16 if sys.version_info[:2] == (2, 6):
17 # For python2.6 we have to require argparse since it
18 # was not in stdlib until 2.7.
19 requires.append('argparse>=1.1')
20
21
22 setup_options = dict(
23 name='awscli',
24 version=awscli.__version__,
25 description='Universal Command Line Environment for AWS.',
26 long_description=open('README.rst').read(),
27 author='Mitch Garnaat',
28 author_email='[email protected]',
29 url='http://aws.amazon.com/cli/',
30 scripts=['bin/aws', 'bin/aws.cmd',
31 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],
32 packages=find_packages('.', exclude=['tests*']),
33 package_dir={'awscli': 'awscli'},
34 package_data={'awscli': ['data/*.json', 'examples/*/*']},
35 install_requires=requires,
36 license="Apache License 2.0",
37 classifiers=(
38 'Development Status :: 5 - Production/Stable',
39 'Intended Audience :: Developers',
40 'Intended Audience :: System Administrators',
41 'Natural Language :: English',
42 'License :: OSI Approved :: Apache Software License',
43 'Programming Language :: Python',
44 'Programming Language :: Python :: 2.6',
45 'Programming Language :: Python :: 2.7',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.3',
48 ),
49 )
50
51 if 'py2exe' in sys.argv:
52 # This will actually give us a py2exe command.
53 import py2exe
54 # And we have some py2exe specific options.
55 setup_options['options'] = {
56 'py2exe': {
57 'optimize': 0,
58 'skip_archive': True,
59 'includes': ['ConfigParser', 'urllib', 'httplib',
60 'docutils.readers.standalone',
61 'docutils.parsers.rst',
62 'docutils.languages.en',
63 'xml.etree.ElementTree', 'HTMLParser',
64 'awscli.handlers'],
65 }
66 }
67 setup_options['console'] = ['bin/aws']
68
69
70 setup(**setup_options)
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@
'six>=1.1.0',
'colorama==0.2.5',
'docutils>=0.10',
- 'rsa==3.1.1']
+ 'rsa==3.1.2']
if sys.version_info[:2] == (2, 6):
# For python2.6 we have to require argparse since it
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,7 +11,7 @@\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'docutils>=0.10',\n- 'rsa==3.1.1']\n+ 'rsa==3.1.2']\n \n if sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n", "issue": "pip install awscli fails\nI tried `pip install awscli` from https://github.com/aws/aws-cli/blob/develop/README.rst and failed:\n\nhttp://sprunge.us/NfbW\n/home/hendry/.pip/pip.log = http://ix.io/7SC\n\nHilarious how bad Python packaging is. I'm running Archlinux with Python 3.3.2.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport sys\n\nfrom setuptools import setup, find_packages\n\nimport awscli\n\n\nrequires = ['botocore>=0.16.0,<0.17.0',\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'docutils>=0.10',\n 'rsa==3.1.1']\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=awscli.__version__,\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Mitch Garnaat',\n author_email='[email protected]',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],\n packages=find_packages('.', exclude=['tests*']),\n package_dir={'awscli': 'awscli'},\n package_data={'awscli': ['data/*.json', 'examples/*/*']},\n install_requires=requires,\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'includes': ['ConfigParser', 'urllib', 'httplib',\n 'docutils.readers.standalone',\n 'docutils.parsers.rst',\n 'docutils.languages.en',\n 'xml.etree.ElementTree', 'HTMLParser',\n 'awscli.handlers'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport sys\n\nfrom setuptools import setup, find_packages\n\nimport awscli\n\n\nrequires = ['botocore>=0.16.0,<0.17.0',\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'docutils>=0.10',\n 'rsa==3.1.2']\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=awscli.__version__,\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Mitch Garnaat',\n author_email='[email protected]',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],\n packages=find_packages('.', exclude=['tests*']),\n package_dir={'awscli': 'awscli'},\n package_data={'awscli': ['data/*.json', 'examples/*/*']},\n install_requires=requires,\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'includes': ['ConfigParser', 'urllib', 'httplib',\n 'docutils.readers.standalone',\n 'docutils.parsers.rst',\n 'docutils.languages.en',\n 'xml.etree.ElementTree', 'HTMLParser',\n 'awscli.handlers'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]} | 1,043 | 115 |
gh_patches_debug_7073 | rasdani/github-patches | git_diff | pre-commit__pre-commit-287 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make pre-commit consider a hook as "failed" if it modifies files and still (incorrectly?) exits 0
This would allow us to ditch autopep8-wrapper and support a bunch of hooks which refused to be scriptable (yapf, etc.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/run.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import logging
5 import os
6 import sys
7
8 from pre_commit import color
9 from pre_commit import git
10 from pre_commit.logging_handler import LoggingHandler
11 from pre_commit.output import get_hook_message
12 from pre_commit.output import sys_stdout_write_wrapper
13 from pre_commit.staged_files_only import staged_files_only
14 from pre_commit.util import cmd_output
15 from pre_commit.util import noop_context
16
17
18 logger = logging.getLogger('pre_commit')
19
20
21 def _get_skips(environ):
22 skips = environ.get('SKIP', '')
23 return set(skip.strip() for skip in skips.split(',') if skip.strip())
24
25
26 def _hook_msg_start(hook, verbose):
27 return '{0}{1}'.format(
28 '[{0}] '.format(hook['id']) if verbose else '',
29 hook['name'],
30 )
31
32
33 def _print_no_files_skipped(hook, write, args):
34 write(get_hook_message(
35 _hook_msg_start(hook, args.verbose),
36 postfix='(no files to check) ',
37 end_msg='Skipped',
38 end_color=color.TURQUOISE,
39 use_color=args.color,
40 ))
41
42
43 def _print_user_skipped(hook, write, args):
44 write(get_hook_message(
45 _hook_msg_start(hook, args.verbose),
46 end_msg='Skipped',
47 end_color=color.YELLOW,
48 use_color=args.color,
49 ))
50
51
52 def get_changed_files(new, old):
53 return cmd_output(
54 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),
55 )[1].splitlines()
56
57
58 def get_filenames(args, include_expr, exclude_expr):
59 if args.origin and args.source:
60 getter = git.get_files_matching(
61 lambda: get_changed_files(args.origin, args.source),
62 )
63 elif args.files:
64 getter = git.get_files_matching(lambda: args.files)
65 elif args.all_files:
66 getter = git.get_all_files_matching
67 elif git.is_in_merge_conflict():
68 getter = git.get_conflicted_files_matching
69 else:
70 getter = git.get_staged_files_matching
71 return getter(include_expr, exclude_expr)
72
73
74 def _run_single_hook(hook, repo, args, write, skips=frozenset()):
75 filenames = get_filenames(args, hook['files'], hook['exclude'])
76 if hook['id'] in skips:
77 _print_user_skipped(hook, write, args)
78 return 0
79 elif not filenames:
80 _print_no_files_skipped(hook, write, args)
81 return 0
82
83 # Print the hook and the dots first in case the hook takes hella long to
84 # run.
85 write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))
86 sys.stdout.flush()
87
88 retcode, stdout, stderr = repo.run_hook(hook, filenames)
89
90 if retcode:
91 retcode = 1
92 print_color = color.RED
93 pass_fail = 'Failed'
94 else:
95 retcode = 0
96 print_color = color.GREEN
97 pass_fail = 'Passed'
98
99 write(color.format_color(pass_fail, print_color, args.color) + '\n')
100
101 if (stdout or stderr) and (retcode or args.verbose):
102 write('hookid: {0}\n'.format(hook['id']))
103 write('\n')
104 for output in (stdout, stderr):
105 assert type(output) is bytes, type(output)
106 if output.strip():
107 write(output.strip() + b'\n')
108 write('\n')
109
110 return retcode
111
112
113 def _run_hooks(repo_hooks, args, write, environ):
114 """Actually run the hooks."""
115 skips = _get_skips(environ)
116 retval = 0
117 for repo, hook in repo_hooks:
118 retval |= _run_single_hook(hook, repo, args, write, skips)
119 return retval
120
121
122 def get_repo_hooks(runner):
123 for repo in runner.repositories:
124 for _, hook in repo.hooks:
125 yield (repo, hook)
126
127
128 def _has_unmerged_paths(runner):
129 _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])
130 return bool(stdout.strip())
131
132
133 def _has_unstaged_config(runner):
134 retcode, _, _ = runner.cmd_runner.run(
135 ('git', 'diff', '--exit-code', runner.config_file_path),
136 retcode=None,
137 )
138 # be explicit, other git errors don't mean it has an unstaged config.
139 return retcode == 1
140
141
142 def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):
143 no_stash = args.no_stash or args.all_files or bool(args.files)
144 # Set up our logging handler
145 logger.addHandler(LoggingHandler(args.color, write=write))
146 logger.setLevel(logging.INFO)
147
148 # Check if we have unresolved merge conflict files and fail fast.
149 if _has_unmerged_paths(runner):
150 logger.error('Unmerged files. Resolve before committing.')
151 return 1
152 if bool(args.source) != bool(args.origin):
153 logger.error('Specify both --origin and --source.')
154 return 1
155 if _has_unstaged_config(runner) and not no_stash:
156 if args.allow_unstaged_config:
157 logger.warn(
158 'You have an unstaged config file and have specified the '
159 '--allow-unstaged-config option.\n'
160 'Note that your config will be stashed before the config is '
161 'parsed unless --no-stash is specified.',
162 )
163 else:
164 logger.error(
165 'Your .pre-commit-config.yaml is unstaged.\n'
166 '`git add .pre-commit-config.yaml` to fix this.\n'
167 'Run pre-commit with --allow-unstaged-config to silence this.'
168 )
169 return 1
170
171 if no_stash:
172 ctx = noop_context()
173 else:
174 ctx = staged_files_only(runner.cmd_runner)
175
176 with ctx:
177 repo_hooks = list(get_repo_hooks(runner))
178
179 if args.hook:
180 repo_hooks = [
181 (repo, hook) for repo, hook in repo_hooks
182 if hook['id'] == args.hook
183 ]
184 if not repo_hooks:
185 write('No hook with id `{0}`\n'.format(args.hook))
186 return 1
187
188 # Filter hooks for stages
189 repo_hooks = [
190 (repo, hook) for repo, hook in repo_hooks
191 if not hook['stages'] or args.hook_stage in hook['stages']
192 ]
193
194 return _run_hooks(repo_hooks, args, write, environ)
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -85,7 +85,13 @@
write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))
sys.stdout.flush()
+ diff_before = cmd_output('git', 'diff', retcode=None)
retcode, stdout, stderr = repo.run_hook(hook, filenames)
+ diff_after = cmd_output('git', 'diff', retcode=None)
+
+ # If the hook makes changes, fail the commit
+ if diff_before != diff_after:
+ retcode = 1
if retcode:
retcode = 1
| {"golden_diff": "diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py\n--- a/pre_commit/commands/run.py\n+++ b/pre_commit/commands/run.py\n@@ -85,7 +85,13 @@\n write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))\n sys.stdout.flush()\n \n+ diff_before = cmd_output('git', 'diff', retcode=None)\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n+ diff_after = cmd_output('git', 'diff', retcode=None)\n+\n+ # If the hook makes changes, fail the commit\n+ if diff_before != diff_after:\n+ retcode = 1\n \n if retcode:\n retcode = 1\n", "issue": "Make pre-commit consider a hook as \"failed\" if it modifies files and still (incorrectly?) exits 0\nThis would allow us to ditch autopep8-wrapper and support a bunch of hooks which refused to be scriptable (yapf, etc.)\n\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.output import sys_stdout_write_wrapper\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return set(skip.strip() for skip in skips.split(',') if skip.strip())\n\n\ndef _hook_msg_start(hook, verbose):\n return '{0}{1}'.format(\n '[{0}] '.format(hook['id']) if verbose else '',\n hook['name'],\n )\n\n\ndef _print_no_files_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix='(no files to check) ',\n end_msg='Skipped',\n end_color=color.TURQUOISE,\n use_color=args.color,\n ))\n\n\ndef _print_user_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg='Skipped',\n end_color=color.YELLOW,\n use_color=args.color,\n ))\n\n\ndef get_changed_files(new, old):\n return cmd_output(\n 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),\n )[1].splitlines()\n\n\ndef get_filenames(args, include_expr, exclude_expr):\n if args.origin and args.source:\n getter = git.get_files_matching(\n lambda: get_changed_files(args.origin, args.source),\n )\n elif args.files:\n getter = git.get_files_matching(lambda: args.files)\n elif args.all_files:\n getter = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n getter = git.get_conflicted_files_matching\n else:\n getter = git.get_staged_files_matching\n return getter(include_expr, exclude_expr)\n\n\ndef _run_single_hook(hook, repo, args, write, skips=frozenset()):\n filenames = get_filenames(args, hook['files'], hook['exclude'])\n if hook['id'] in skips:\n _print_user_skipped(hook, write, args)\n return 0\n elif not filenames:\n _print_no_files_skipped(hook, write, args)\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))\n sys.stdout.flush()\n\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n\n if retcode:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('hookid: {0}\\n'.format(hook['id']))\n write('\\n')\n for output in (stdout, stderr):\n assert type(output) is bytes, type(output)\n if output.strip():\n write(output.strip() + b'\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(repo_hooks, args, write, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(hook, repo, args, write, skips)\n return retval\n\n\ndef get_repo_hooks(runner):\n for repo in runner.repositories:\n for _, hook in repo.hooks:\n yield (repo, hook)\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = runner.cmd_runner.run(\n ('git', 'diff', '--exit-code', runner.config_file_path),\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):\n no_stash = args.no_stash or args.all_files or bool(args.files)\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n '--allow-unstaged-config option.\\n'\n 'Note that your config will be stashed before the config is '\n 'parsed unless --no-stash is specified.',\n )\n else:\n logger.error(\n 'Your .pre-commit-config.yaml is unstaged.\\n'\n '`git add .pre-commit-config.yaml` to fix this.\\n'\n 'Run pre-commit with --allow-unstaged-config to silence this.'\n )\n return 1\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n repo_hooks = list(get_repo_hooks(runner))\n\n if args.hook:\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if hook['id'] == args.hook\n ]\n if not repo_hooks:\n write('No hook with id `{0}`\\n'.format(args.hook))\n return 1\n\n # Filter hooks for stages\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if not hook['stages'] or args.hook_stage in hook['stages']\n ]\n\n return _run_hooks(repo_hooks, args, write, environ)\n", "path": "pre_commit/commands/run.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.output import sys_stdout_write_wrapper\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return set(skip.strip() for skip in skips.split(',') if skip.strip())\n\n\ndef _hook_msg_start(hook, verbose):\n return '{0}{1}'.format(\n '[{0}] '.format(hook['id']) if verbose else '',\n hook['name'],\n )\n\n\ndef _print_no_files_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix='(no files to check) ',\n end_msg='Skipped',\n end_color=color.TURQUOISE,\n use_color=args.color,\n ))\n\n\ndef _print_user_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg='Skipped',\n end_color=color.YELLOW,\n use_color=args.color,\n ))\n\n\ndef get_changed_files(new, old):\n return cmd_output(\n 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),\n )[1].splitlines()\n\n\ndef get_filenames(args, include_expr, exclude_expr):\n if args.origin and args.source:\n getter = git.get_files_matching(\n lambda: get_changed_files(args.origin, args.source),\n )\n elif args.files:\n getter = git.get_files_matching(lambda: args.files)\n elif args.all_files:\n getter = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n getter = git.get_conflicted_files_matching\n else:\n getter = git.get_staged_files_matching\n return getter(include_expr, exclude_expr)\n\n\ndef _run_single_hook(hook, repo, args, write, skips=frozenset()):\n filenames = get_filenames(args, hook['files'], hook['exclude'])\n if hook['id'] in skips:\n _print_user_skipped(hook, write, args)\n return 0\n elif not filenames:\n _print_no_files_skipped(hook, write, args)\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))\n sys.stdout.flush()\n\n diff_before = cmd_output('git', 'diff', retcode=None)\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n diff_after = cmd_output('git', 'diff', retcode=None)\n\n # If the hook makes changes, fail the commit\n if diff_before != diff_after:\n retcode = 1\n\n if retcode:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('hookid: {0}\\n'.format(hook['id']))\n write('\\n')\n for output in (stdout, stderr):\n assert type(output) is bytes, type(output)\n if output.strip():\n write(output.strip() + b'\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(repo_hooks, args, write, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(hook, repo, args, write, skips)\n return retval\n\n\ndef get_repo_hooks(runner):\n for repo in runner.repositories:\n for _, hook in repo.hooks:\n yield (repo, hook)\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = runner.cmd_runner.run(\n ('git', 'diff', '--exit-code', runner.config_file_path),\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):\n no_stash = args.no_stash or args.all_files or bool(args.files)\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n '--allow-unstaged-config option.\\n'\n 'Note that your config will be stashed before the config is '\n 'parsed unless --no-stash is specified.',\n )\n else:\n logger.error(\n 'Your .pre-commit-config.yaml is unstaged.\\n'\n '`git add .pre-commit-config.yaml` to fix this.\\n'\n 'Run pre-commit with --allow-unstaged-config to silence this.'\n )\n return 1\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n repo_hooks = list(get_repo_hooks(runner))\n\n if args.hook:\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if hook['id'] == args.hook\n ]\n if not repo_hooks:\n write('No hook with id `{0}`\\n'.format(args.hook))\n return 1\n\n # Filter hooks for stages\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if not hook['stages'] or args.hook_stage in hook['stages']\n ]\n\n return _run_hooks(repo_hooks, args, write, environ)\n", "path": "pre_commit/commands/run.py"}]} | 2,230 | 168 |
gh_patches_debug_8933 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1945 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Public projects filter in REST API not working correctly
## Test plan
All projects (and other objects) should be displayed in the REST API.
## Issue description
As a quick fix, just display all projects (public and private) in the API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/viewsets.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from rest_framework import filters
8 from rest_framework import viewsets
9 from rest_framework.authentication import SessionAuthentication
10 from rest_framework.permissions import DjangoObjectPermissions
11
12 from .models import TastyTokenAuthentication
13
14 from akvo.rsr.models import Project
15
16
17 class BaseRSRViewSet(viewsets.ModelViewSet):
18 """
19 Base class used for the view sets for RSR models. Provides unified auth and perms settings.
20 Only public projects will be shown by filtering the queryset.
21 """
22 authentication_classes = (SessionAuthentication, TastyTokenAuthentication, )
23 permission_classes = (DjangoObjectPermissions, )
24 filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )
25 ordering_fields = '__all__'
26
27 def get_queryset(self):
28 """Filter out any private projects."""
29 for related_obj in self.queryset.model._meta.get_all_related_objects():
30 if related_obj.model == Project:
31 self.queryset = self.queryset.filter(project__is_public=True)
32 break
33 return super(BaseRSRViewSet, self).get_queryset()
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/viewsets.py b/akvo/rest/viewsets.py
--- a/akvo/rest/viewsets.py
+++ b/akvo/rest/viewsets.py
@@ -23,11 +23,3 @@
permission_classes = (DjangoObjectPermissions, )
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )
ordering_fields = '__all__'
-
- def get_queryset(self):
- """Filter out any private projects."""
- for related_obj in self.queryset.model._meta.get_all_related_objects():
- if related_obj.model == Project:
- self.queryset = self.queryset.filter(project__is_public=True)
- break
- return super(BaseRSRViewSet, self).get_queryset()
| {"golden_diff": "diff --git a/akvo/rest/viewsets.py b/akvo/rest/viewsets.py\n--- a/akvo/rest/viewsets.py\n+++ b/akvo/rest/viewsets.py\n@@ -23,11 +23,3 @@\n permission_classes = (DjangoObjectPermissions, )\n filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )\n ordering_fields = '__all__'\n-\n- def get_queryset(self):\n- \"\"\"Filter out any private projects.\"\"\"\n- for related_obj in self.queryset.model._meta.get_all_related_objects():\n- if related_obj.model == Project:\n- self.queryset = self.queryset.filter(project__is_public=True)\n- break\n- return super(BaseRSRViewSet, self).get_queryset()\n", "issue": "Public projects filter in REST API not working correctly\n## Test plan\n\nAll projects (and other objects) should be displayed in the REST API.\n## Issue description\n\nAs a quick fix, just display all projects (public and private) in the API.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import filters\nfrom rest_framework import viewsets\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import DjangoObjectPermissions\n\nfrom .models import TastyTokenAuthentication\n\nfrom akvo.rsr.models import Project\n\n\nclass BaseRSRViewSet(viewsets.ModelViewSet):\n \"\"\"\n Base class used for the view sets for RSR models. Provides unified auth and perms settings.\n Only public projects will be shown by filtering the queryset.\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, )\n permission_classes = (DjangoObjectPermissions, )\n filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )\n ordering_fields = '__all__'\n\n def get_queryset(self):\n \"\"\"Filter out any private projects.\"\"\"\n for related_obj in self.queryset.model._meta.get_all_related_objects():\n if related_obj.model == Project:\n self.queryset = self.queryset.filter(project__is_public=True)\n break\n return super(BaseRSRViewSet, self).get_queryset()\n", "path": "akvo/rest/viewsets.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import filters\nfrom rest_framework import viewsets\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import DjangoObjectPermissions\n\nfrom .models import TastyTokenAuthentication\n\nfrom akvo.rsr.models import Project\n\n\nclass BaseRSRViewSet(viewsets.ModelViewSet):\n \"\"\"\n Base class used for the view sets for RSR models. Provides unified auth and perms settings.\n Only public projects will be shown by filtering the queryset.\n \"\"\"\n authentication_classes = (SessionAuthentication, TastyTokenAuthentication, )\n permission_classes = (DjangoObjectPermissions, )\n filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter, )\n ordering_fields = '__all__'\n", "path": "akvo/rest/viewsets.py"}]} | 658 | 167 |
gh_patches_debug_15907 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-2286 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PNG icons installed as scalable icons
The 1.1.11 pre-releases install various PNG icons in `/usr/share/icons/hicolor/scalable/apps/`, but the `scalable` hierarchy is reserved for scalable (SVG) icons.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/solaar/ui/icons.py`
Content:
```
1 # -*- python-mode -*-
2
3 ## Copyright (C) 2012-2013 Daniel Pavel
4 ##
5 ## This program is free software; you can redistribute it and/or modify
6 ## it under the terms of the GNU General Public License as published by
7 ## the Free Software Foundation; either version 2 of the License, or
8 ## (at your option) any later version.
9 ##
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ## GNU General Public License for more details.
14 ##
15 ## You should have received a copy of the GNU General Public License along
16 ## with this program; if not, write to the Free Software Foundation, Inc.,
17 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
19 import logging
20
21 import solaar.gtk as gtk
22
23 from gi.repository import Gtk
24
25 logger = logging.getLogger(__name__)
26
27 #
28 #
29 #
30
31 _LARGE_SIZE = 64
32 Gtk.IconSize.LARGE = Gtk.icon_size_register('large', _LARGE_SIZE, _LARGE_SIZE)
33 # Gtk.IconSize.XLARGE = Gtk.icon_size_register('x-large', _LARGE_SIZE * 2, _LARGE_SIZE * 2)
34
35 TRAY_INIT = 'solaar-init'
36 TRAY_OKAY = 'solaar'
37 TRAY_ATTENTION = 'solaar-attention'
38
39 _default_theme = None
40
41
42 def _init_icon_paths():
43 global _default_theme
44 if _default_theme:
45 return
46
47 _default_theme = Gtk.IconTheme.get_default()
48 if logger.isEnabledFor(logging.DEBUG):
49 logger.debug('icon theme paths: %s', _default_theme.get_search_path())
50
51 if gtk.battery_icons_style == 'symbolic':
52 global TRAY_OKAY
53 TRAY_OKAY = TRAY_INIT # use monochrome tray icon
54 if not _default_theme.has_icon('battery-good-symbolic'):
55 logger.warning('failed to detect symbolic icons')
56 gtk.battery_icons_style = 'regular'
57 if gtk.battery_icons_style == 'regular':
58 if not _default_theme.has_icon('battery-good'):
59 logger.warning('failed to detect icons')
60 gtk.battery_icons_style = 'solaar'
61
62
63 #
64 #
65 #
66
67
68 def battery(level=None, charging=False):
69 icon_name = _battery_icon_name(level, charging)
70 if not _default_theme.has_icon(icon_name):
71 logger.warning('icon %s not found in current theme', icon_name)
72 return TRAY_OKAY # use Solaar icon if battery icon not available
73 elif logger.isEnabledFor(logging.DEBUG):
74 logger.debug('battery icon for %s:%s = %s', level, charging, icon_name)
75 return icon_name
76
77
78 # return first res where val >= guard
79 # _first_res(val,((guard,res),...))
80 def _first_res(val, pairs):
81 return next((res for guard, res in pairs if val >= guard), None)
82
83
84 def _battery_icon_name(level, charging):
85 _init_icon_paths()
86
87 if level is None or level < 0:
88 return 'battery-missing' + ('-symbolic' if gtk.battery_icons_style == 'symbolic' else '')
89
90 level_name = _first_res(level, ((90, 'full'), (30, 'good'), (20, 'low'), (5, 'caution'), (0, 'empty')))
91 return 'battery-%s%s%s' % (
92 level_name, '-charging' if charging else '', '-symbolic' if gtk.battery_icons_style == 'symbolic' else ''
93 )
94
95
96 #
97 #
98 #
99
100
101 def lux(level=None):
102 if level is None or level < 0:
103 return 'light_unknown'
104 return 'light_%03d' % (20 * ((level + 50) // 100))
105
106
107 #
108 #
109 #
110
111 _ICON_SETS = {}
112
113
114 def device_icon_set(name='_', kind=None):
115 icon_set = _ICON_SETS.get(name)
116 if icon_set is None:
117 icon_set = Gtk.IconSet.new()
118 _ICON_SETS[name] = icon_set
119
120 # names of possible icons, in reverse order of likelihood
121 # the theme will hopefully pick up the most appropriate
122 names = ['preferences-desktop-peripherals']
123 if kind:
124 if str(kind) == 'numpad':
125 names += ('input-keyboard', 'input-dialpad')
126 elif str(kind) == 'touchpad':
127 names += ('input-mouse', 'input-tablet')
128 elif str(kind) == 'trackball':
129 names += ('input-mouse', )
130 elif str(kind) == 'headset':
131 names += ('audio-headphones', 'audio-headset')
132 names += ('input-' + str(kind), )
133 # names += (name.replace(' ', '-'),)
134
135 source = Gtk.IconSource.new()
136 for n in names:
137 source.set_icon_name(n)
138 icon_set.add_source(source)
139 icon_set.names = names
140
141 return icon_set
142
143
144 def device_icon_file(name, kind=None, size=_LARGE_SIZE):
145 _init_icon_paths()
146
147 icon_set = device_icon_set(name, kind)
148 assert icon_set
149 for n in reversed(icon_set.names):
150 if _default_theme.has_icon(n):
151 return _default_theme.lookup_icon(n, size, 0).get_filename()
152
153
154 def device_icon_name(name, kind=None):
155 _init_icon_paths()
156
157 icon_set = device_icon_set(name, kind)
158 assert icon_set
159 for n in reversed(icon_set.names):
160 if _default_theme.has_icon(n):
161 return n
162
163
164 def icon_file(name, size=_LARGE_SIZE):
165 _init_icon_paths()
166
167 # has_icon() somehow returned False while lookup_icon returns non-None.
168 # I guess it happens because share/solaar/icons/ has no hicolor and
169 # resolution subdirs
170 theme_icon = _default_theme.lookup_icon(name, size, 0)
171 if theme_icon:
172 file_name = theme_icon.get_filename()
173 # if logger.isEnabledFor(logging.DEBUG):
174 # logger.debug("icon %s(%d) => %s", name, size, file_name)
175 return file_name
176
177 logger.warning('icon %s(%d) not found in current theme', name, size)
178
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 import subprocess
3
4 from glob import glob as _glob
5
6 try:
7 from setuptools import setup
8 except ImportError:
9 from distutils.core import setup
10
11 NAME = 'Solaar'
12
13 with open('lib/solaar/version', 'r') as vfile:
14 version = vfile.read().strip()
15
16 try: # get commit from git describe
17 commit = subprocess.check_output(['git', 'describe', '--always'], stderr=subprocess.DEVNULL).strip().decode()
18 with open('lib/solaar/commit', 'w') as vfile:
19 vfile.write(f'{commit}\n')
20 except Exception: # get commit from Ubuntu dpkg-parsechangelog
21 try:
22 commit = subprocess.check_output(['dpkg-parsechangelog', '--show-field', 'Version'],
23 stderr=subprocess.DEVNULL).strip().decode()
24 commit = commit.split('~')
25 with open('lib/solaar/commit', 'w') as vfile:
26 vfile.write(f'{commit[0]}\n')
27 except Exception as e:
28 print('Exception using dpkg-parsechangelog', e)
29
30
31 def _data_files():
32 from os.path import dirname as _dirname
33
34 yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/solaar*.svg')
35 yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/light_*.png')
36
37 for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):
38 yield _dirname(mo), [mo]
39
40 yield 'share/applications', ['share/applications/solaar.desktop']
41 yield 'lib/udev/rules.d', ['rules.d/42-logitech-unify-permissions.rules']
42 yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']
43
44 del _dirname
45
46
47 setup(
48 name=NAME.lower(),
49 version=version,
50 description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',
51 long_description='''
52 Solaar is a Linux device manager for many Logitech peripherals that connect through
53 Unifying and other receivers or via USB or Bluetooth.
54 Solaar is able to pair/unpair devices with receivers and show and modify some of the
55 modifiable features of devices.
56 For instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),
57 author='Daniel Pavel',
58 license='GPLv2',
59 url='http://pwr-solaar.github.io/Solaar/',
60 classifiers=[
61 'Development Status :: 4 - Beta',
62 'Environment :: X11 Applications :: GTK',
63 'Environment :: Console',
64 'Intended Audience :: End Users/Desktop',
65 'License :: DFSG approved',
66 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
67 'Natural Language :: English',
68 'Programming Language :: Python :: 3 :: Only',
69 'Operating System :: POSIX :: Linux',
70 'Topic :: Utilities',
71 ],
72 platforms=['linux'],
73
74 # sudo apt install python-gi python3-gi \
75 # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1
76 # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],
77 python_requires='>=3.7',
78 install_requires=[
79 'evdev (>= 1.1.2) ; platform_system=="Linux"',
80 'pyudev (>= 0.13)',
81 'PyYAML (>= 3.12)',
82 'python-xlib (>= 0.27)',
83 'psutil (>= 5.4.3)',
84 'dbus-python ; platform_system=="Linux"',
85 ],
86 extras_require={
87 'report-descriptor': ['hid-parser'],
88 'desktop-notifications': ['Notify (>= 0.7)'],
89 'git-commit': ['python-git-info'],
90 'test': ['pytest'],
91 },
92 package_dir={'': 'lib'},
93 packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
94 data_files=list(_data_files()),
95 include_package_data=True,
96 scripts=_glob('bin/*'),
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/solaar/ui/icons.py b/lib/solaar/ui/icons.py
--- a/lib/solaar/ui/icons.py
+++ b/lib/solaar/ui/icons.py
@@ -101,7 +101,7 @@
def lux(level=None):
if level is None or level < 0:
return 'light_unknown'
- return 'light_%03d' % (20 * ((level + 50) // 100))
+ return 'solaar-light_%03d' % (20 * ((level + 50) // 100))
#
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,7 +32,7 @@
from os.path import dirname as _dirname
yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/solaar*.svg')
- yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/light_*.png')
+ yield 'share/icons/hicolor/32x32/apps', _glob('share/solaar/icons/light_*.png')
for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):
yield _dirname(mo), [mo]
| {"golden_diff": "diff --git a/lib/solaar/ui/icons.py b/lib/solaar/ui/icons.py\n--- a/lib/solaar/ui/icons.py\n+++ b/lib/solaar/ui/icons.py\n@@ -101,7 +101,7 @@\n def lux(level=None):\n if level is None or level < 0:\n return 'light_unknown'\n- return 'light_%03d' % (20 * ((level + 50) // 100))\n+ return 'solaar-light_%03d' % (20 * ((level + 50) // 100))\n \n \n #\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,7 +32,7 @@\n from os.path import dirname as _dirname\n \n yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/solaar*.svg')\n- yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/light_*.png')\n+ yield 'share/icons/hicolor/32x32/apps', _glob('share/solaar/icons/light_*.png')\n \n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n", "issue": "PNG icons installed as scalable icons\nThe 1.1.11 pre-releases install various PNG icons in `/usr/share/icons/hicolor/scalable/apps/`, but the `scalable` hierarchy is reserved for scalable (SVG) icons.\n", "before_files": [{"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport logging\n\nimport solaar.gtk as gtk\n\nfrom gi.repository import Gtk\n\nlogger = logging.getLogger(__name__)\n\n#\n#\n#\n\n_LARGE_SIZE = 64\nGtk.IconSize.LARGE = Gtk.icon_size_register('large', _LARGE_SIZE, _LARGE_SIZE)\n# Gtk.IconSize.XLARGE = Gtk.icon_size_register('x-large', _LARGE_SIZE * 2, _LARGE_SIZE * 2)\n\nTRAY_INIT = 'solaar-init'\nTRAY_OKAY = 'solaar'\nTRAY_ATTENTION = 'solaar-attention'\n\n_default_theme = None\n\n\ndef _init_icon_paths():\n global _default_theme\n if _default_theme:\n return\n\n _default_theme = Gtk.IconTheme.get_default()\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('icon theme paths: %s', _default_theme.get_search_path())\n\n if gtk.battery_icons_style == 'symbolic':\n global TRAY_OKAY\n TRAY_OKAY = TRAY_INIT # use monochrome tray icon\n if not _default_theme.has_icon('battery-good-symbolic'):\n logger.warning('failed to detect symbolic icons')\n gtk.battery_icons_style = 'regular'\n if gtk.battery_icons_style == 'regular':\n if not _default_theme.has_icon('battery-good'):\n logger.warning('failed to detect icons')\n gtk.battery_icons_style = 'solaar'\n\n\n#\n#\n#\n\n\ndef battery(level=None, charging=False):\n icon_name = _battery_icon_name(level, charging)\n if not _default_theme.has_icon(icon_name):\n logger.warning('icon %s not found in current theme', icon_name)\n return TRAY_OKAY # use Solaar icon if battery icon not available\n elif logger.isEnabledFor(logging.DEBUG):\n logger.debug('battery icon for %s:%s = %s', level, charging, icon_name)\n return icon_name\n\n\n# return first res where val >= guard\n# _first_res(val,((guard,res),...))\ndef _first_res(val, pairs):\n return next((res for guard, res in pairs if val >= guard), None)\n\n\ndef _battery_icon_name(level, charging):\n _init_icon_paths()\n\n if level is None or level < 0:\n return 'battery-missing' + ('-symbolic' if gtk.battery_icons_style == 'symbolic' else '')\n\n level_name = _first_res(level, ((90, 'full'), (30, 'good'), (20, 'low'), (5, 'caution'), (0, 'empty')))\n return 'battery-%s%s%s' % (\n level_name, '-charging' if charging else '', '-symbolic' if gtk.battery_icons_style == 'symbolic' else ''\n )\n\n\n#\n#\n#\n\n\ndef lux(level=None):\n if level is None or level < 0:\n return 'light_unknown'\n return 'light_%03d' % (20 * ((level + 50) // 100))\n\n\n#\n#\n#\n\n_ICON_SETS = {}\n\n\ndef device_icon_set(name='_', kind=None):\n icon_set = _ICON_SETS.get(name)\n if icon_set is None:\n icon_set = Gtk.IconSet.new()\n _ICON_SETS[name] = icon_set\n\n # names of possible icons, in reverse order of likelihood\n # the theme will hopefully pick up the most appropriate\n names = ['preferences-desktop-peripherals']\n if kind:\n if str(kind) == 'numpad':\n names += ('input-keyboard', 'input-dialpad')\n elif str(kind) == 'touchpad':\n names += ('input-mouse', 'input-tablet')\n elif str(kind) == 'trackball':\n names += ('input-mouse', )\n elif str(kind) == 'headset':\n names += ('audio-headphones', 'audio-headset')\n names += ('input-' + str(kind), )\n # names += (name.replace(' ', '-'),)\n\n source = Gtk.IconSource.new()\n for n in names:\n source.set_icon_name(n)\n icon_set.add_source(source)\n icon_set.names = names\n\n return icon_set\n\n\ndef device_icon_file(name, kind=None, size=_LARGE_SIZE):\n _init_icon_paths()\n\n icon_set = device_icon_set(name, kind)\n assert icon_set\n for n in reversed(icon_set.names):\n if _default_theme.has_icon(n):\n return _default_theme.lookup_icon(n, size, 0).get_filename()\n\n\ndef device_icon_name(name, kind=None):\n _init_icon_paths()\n\n icon_set = device_icon_set(name, kind)\n assert icon_set\n for n in reversed(icon_set.names):\n if _default_theme.has_icon(n):\n return n\n\n\ndef icon_file(name, size=_LARGE_SIZE):\n _init_icon_paths()\n\n # has_icon() somehow returned False while lookup_icon returns non-None.\n # I guess it happens because share/solaar/icons/ has no hicolor and\n # resolution subdirs\n theme_icon = _default_theme.lookup_icon(name, size, 0)\n if theme_icon:\n file_name = theme_icon.get_filename()\n # if logger.isEnabledFor(logging.DEBUG):\n # logger.debug(\"icon %s(%d) => %s\", name, size, file_name)\n return file_name\n\n logger.warning('icon %s(%d) not found in current theme', name, size)\n", "path": "lib/solaar/ui/icons.py"}, {"content": "#!/usr/bin/env python3\nimport subprocess\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nNAME = 'Solaar'\n\nwith open('lib/solaar/version', 'r') as vfile:\n version = vfile.read().strip()\n\ntry: # get commit from git describe\n commit = subprocess.check_output(['git', 'describe', '--always'], stderr=subprocess.DEVNULL).strip().decode()\n with open('lib/solaar/commit', 'w') as vfile:\n vfile.write(f'{commit}\\n')\nexcept Exception: # get commit from Ubuntu dpkg-parsechangelog\n try:\n commit = subprocess.check_output(['dpkg-parsechangelog', '--show-field', 'Version'],\n stderr=subprocess.DEVNULL).strip().decode()\n commit = commit.split('~')\n with open('lib/solaar/commit', 'w') as vfile:\n vfile.write(f'{commit[0]}\\n')\n except Exception as e:\n print('Exception using dpkg-parsechangelog', e)\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/light_*.png')\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'lib/udev/rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=version,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.7',\n install_requires=[\n 'evdev (>= 1.1.2) ; platform_system==\"Linux\"',\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n 'dbus-python ; platform_system==\"Linux\"',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n 'git-commit': ['python-git-info'],\n 'test': ['pytest'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n include_package_data=True,\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nimport logging\n\nimport solaar.gtk as gtk\n\nfrom gi.repository import Gtk\n\nlogger = logging.getLogger(__name__)\n\n#\n#\n#\n\n_LARGE_SIZE = 64\nGtk.IconSize.LARGE = Gtk.icon_size_register('large', _LARGE_SIZE, _LARGE_SIZE)\n# Gtk.IconSize.XLARGE = Gtk.icon_size_register('x-large', _LARGE_SIZE * 2, _LARGE_SIZE * 2)\n\nTRAY_INIT = 'solaar-init'\nTRAY_OKAY = 'solaar'\nTRAY_ATTENTION = 'solaar-attention'\n\n_default_theme = None\n\n\ndef _init_icon_paths():\n global _default_theme\n if _default_theme:\n return\n\n _default_theme = Gtk.IconTheme.get_default()\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('icon theme paths: %s', _default_theme.get_search_path())\n\n if gtk.battery_icons_style == 'symbolic':\n global TRAY_OKAY\n TRAY_OKAY = TRAY_INIT # use monochrome tray icon\n if not _default_theme.has_icon('battery-good-symbolic'):\n logger.warning('failed to detect symbolic icons')\n gtk.battery_icons_style = 'regular'\n if gtk.battery_icons_style == 'regular':\n if not _default_theme.has_icon('battery-good'):\n logger.warning('failed to detect icons')\n gtk.battery_icons_style = 'solaar'\n\n\n#\n#\n#\n\n\ndef battery(level=None, charging=False):\n icon_name = _battery_icon_name(level, charging)\n if not _default_theme.has_icon(icon_name):\n logger.warning('icon %s not found in current theme', icon_name)\n return TRAY_OKAY # use Solaar icon if battery icon not available\n elif logger.isEnabledFor(logging.DEBUG):\n logger.debug('battery icon for %s:%s = %s', level, charging, icon_name)\n return icon_name\n\n\n# return first res where val >= guard\n# _first_res(val,((guard,res),...))\ndef _first_res(val, pairs):\n return next((res for guard, res in pairs if val >= guard), None)\n\n\ndef _battery_icon_name(level, charging):\n _init_icon_paths()\n\n if level is None or level < 0:\n return 'battery-missing' + ('-symbolic' if gtk.battery_icons_style == 'symbolic' else '')\n\n level_name = _first_res(level, ((90, 'full'), (30, 'good'), (20, 'low'), (5, 'caution'), (0, 'empty')))\n return 'battery-%s%s%s' % (\n level_name, '-charging' if charging else '', '-symbolic' if gtk.battery_icons_style == 'symbolic' else ''\n )\n\n\n#\n#\n#\n\n\ndef lux(level=None):\n if level is None or level < 0:\n return 'light_unknown'\n return 'solaar-light_%03d' % (20 * ((level + 50) // 100))\n\n\n#\n#\n#\n\n_ICON_SETS = {}\n\n\ndef device_icon_set(name='_', kind=None):\n icon_set = _ICON_SETS.get(name)\n if icon_set is None:\n icon_set = Gtk.IconSet.new()\n _ICON_SETS[name] = icon_set\n\n # names of possible icons, in reverse order of likelihood\n # the theme will hopefully pick up the most appropriate\n names = ['preferences-desktop-peripherals']\n if kind:\n if str(kind) == 'numpad':\n names += ('input-keyboard', 'input-dialpad')\n elif str(kind) == 'touchpad':\n names += ('input-mouse', 'input-tablet')\n elif str(kind) == 'trackball':\n names += ('input-mouse', )\n elif str(kind) == 'headset':\n names += ('audio-headphones', 'audio-headset')\n names += ('input-' + str(kind), )\n # names += (name.replace(' ', '-'),)\n\n source = Gtk.IconSource.new()\n for n in names:\n source.set_icon_name(n)\n icon_set.add_source(source)\n icon_set.names = names\n\n return icon_set\n\n\ndef device_icon_file(name, kind=None, size=_LARGE_SIZE):\n _init_icon_paths()\n\n icon_set = device_icon_set(name, kind)\n assert icon_set\n for n in reversed(icon_set.names):\n if _default_theme.has_icon(n):\n return _default_theme.lookup_icon(n, size, 0).get_filename()\n\n\ndef device_icon_name(name, kind=None):\n _init_icon_paths()\n\n icon_set = device_icon_set(name, kind)\n assert icon_set\n for n in reversed(icon_set.names):\n if _default_theme.has_icon(n):\n return n\n\n\ndef icon_file(name, size=_LARGE_SIZE):\n _init_icon_paths()\n\n # has_icon() somehow returned False while lookup_icon returns non-None.\n # I guess it happens because share/solaar/icons/ has no hicolor and\n # resolution subdirs\n theme_icon = _default_theme.lookup_icon(name, size, 0)\n if theme_icon:\n file_name = theme_icon.get_filename()\n # if logger.isEnabledFor(logging.DEBUG):\n # logger.debug(\"icon %s(%d) => %s\", name, size, file_name)\n return file_name\n\n logger.warning('icon %s(%d) not found in current theme', name, size)\n", "path": "lib/solaar/ui/icons.py"}, {"content": "#!/usr/bin/env python3\nimport subprocess\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nNAME = 'Solaar'\n\nwith open('lib/solaar/version', 'r') as vfile:\n version = vfile.read().strip()\n\ntry: # get commit from git describe\n commit = subprocess.check_output(['git', 'describe', '--always'], stderr=subprocess.DEVNULL).strip().decode()\n with open('lib/solaar/commit', 'w') as vfile:\n vfile.write(f'{commit}\\n')\nexcept Exception: # get commit from Ubuntu dpkg-parsechangelog\n try:\n commit = subprocess.check_output(['dpkg-parsechangelog', '--show-field', 'Version'],\n stderr=subprocess.DEVNULL).strip().decode()\n commit = commit.split('~')\n with open('lib/solaar/commit', 'w') as vfile:\n vfile.write(f'{commit[0]}\\n')\n except Exception as e:\n print('Exception using dpkg-parsechangelog', e)\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/icons/hicolor/scalable/apps', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/icons/hicolor/32x32/apps', _glob('share/solaar/icons/light_*.png')\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'lib/udev/rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=version,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.7',\n install_requires=[\n 'evdev (>= 1.1.2) ; platform_system==\"Linux\"',\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n 'dbus-python ; platform_system==\"Linux\"',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n 'git-commit': ['python-git-info'],\n 'test': ['pytest'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n include_package_data=True,\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}]} | 3,287 | 288 |
gh_patches_debug_725 | rasdani/github-patches | git_diff | rasterio__rasterio-1477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python crashes while building overviews
After performing the below code Python crashes:
```python
import rasterio
from rasterio.enums import Resampling
factors = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]
dst = rasterio.open('rasterio/tests/data/RGB.byte.tif', 'r+')
dst.build_overviews(factors, Resampling.average)
```
```
*** Error in `python': malloc(): memory corruption: 0x0000000002e0f9c0 ***
======= Backtrace: =========
/lib/x86_64-linux-gnu/libc.so.6(+0x777e5)[0x7fbe1c3fd7e5]
/lib/x86_64-linux-gnu/libc.so.6(+0x8213e)[0x7fbe1c40813e]
/lib/x86_64-linux-gnu/libc.so.6(__libc_malloc+0x54)[0x7fbe1c40a184]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(CPLMalloc+0x20)[0x7fbe19ab2700]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(CPLCalloc+0x1c)[0x7fbe19ab27ac]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(_ZN12GTiffDataset15IBuildOverviewsEPKciPiiS2_PFidS1_PvES3_+0x10f0)[0x7fbe19554bd0]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(_ZN11GDALDataset14BuildOverviewsEPKciPiiS2_PFidS1_PvES3_+0x38)[0x7fbe198059f8]
/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/_io.cpython-35m-x86_64-linux-gnu.so(+0x3613a)[0x7fbe0595713a]
python(PyCFunction_Call+0x77)[0x4e9ba7]
python(PyEval_EvalFrameEx+0x614)[0x5372f4]
python[0x540199]
python(PyEval_EvalCode+0x1f)[0x540e4f]
python[0x60c272]
python(PyRun_InteractiveOneObject+0x2b1)[0x46b89f]
python(PyRun_InteractiveLoopFlags+0xe8)[0x46ba48]
python[0x46cfa0]
python[0x4cf2bd]
python(main+0xe1)[0x4cfeb1]
/lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xf0)[0x7fbe1c3a6830]
python(_start+0x29)[0x5d6049]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/errors.py`
Content:
```
1 """Errors and Warnings."""
2
3 from click import FileError
4
5
6 class RasterioError(Exception):
7 """Root exception class"""
8
9
10 class WindowError(RasterioError):
11 """Raised when errors occur during window operations"""
12
13
14 class CRSError(ValueError):
15 """Raised when a CRS string or mapping is invalid or cannot serve
16 to define a coordinate transformation."""
17
18
19 class EnvError(RasterioError):
20 """Raised when the state of GDAL/AWS environment cannot be created
21 or modified."""
22
23
24 class DriverRegistrationError(ValueError):
25 """Raised when a format driver is requested but is not registered."""
26
27
28 class FileOverwriteError(FileError):
29 """Raised when Rasterio's CLI refuses to clobber output files."""
30
31 def __init__(self, message):
32 """Raise FileOverwriteError with message as hint."""
33 super(FileOverwriteError, self).__init__('', hint=message)
34
35
36 class RasterioIOError(IOError):
37 """Raised when a dataset cannot be opened using one of the
38 registered format drivers."""
39
40
41 class NodataShadowWarning(UserWarning):
42 """Warn that a dataset's nodata attribute is shadowing its alpha band."""
43
44 def __str__(self):
45 return ("The dataset's nodata attribute is shadowing "
46 "the alpha band. All masks will be determined "
47 "by the nodata attribute")
48
49
50 class NotGeoreferencedWarning(UserWarning):
51 """Warn that a dataset isn't georeferenced."""
52
53
54 class GDALBehaviorChangeException(RuntimeError):
55 """Raised when GDAL's behavior differs from the given arguments. For
56 example, antimeridian cutting is always on as of GDAL 2.2.0. Users
57 expecting it to be off will be presented with a MultiPolygon when the
58 rest of their code expects a Polygon.
59
60 # Raises an exception on GDAL >= 2.2.0
61 rasterio.warp.transform_geometry(
62 src_crs, dst_crs, antimeridian_cutting=False)
63 """
64
65
66 class GDALOptionNotImplementedError(RasterioError):
67 """A dataset opening or dataset creation option can't be supported
68
69 This will be raised from Rasterio's shim modules. For example, when
70 a user passes arguments to open_dataset() that can't be evaluated
71 by GDAL 1.x.
72 """
73
74 class GDALVersionError(RasterioError):
75 """Raised if the runtime version of GDAL does not meet the required
76 version of GDAL."""
77
78
79 class WindowEvaluationError(ValueError):
80 """Raised when window evaluation fails"""
81
82
83 class RasterioDeprecationWarning(UserWarning):
84 """Rasterio module deprecations"""
85
86
87 class RasterBlockError(RasterioError):
88 """Raised when raster block access fails"""
89
90
91 class BandOverviewError(UserWarning):
92 """Raised when a band overview access fails."""
93
94
95 class WarpOptionsError(RasterioError):
96 """Raised when options for a warp operation are invalid"""
97
98
99 class UnsupportedOperation(RasterioError):
100 """Raised when reading from a file opened in 'w' mode"""
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/errors.py b/rasterio/errors.py
--- a/rasterio/errors.py
+++ b/rasterio/errors.py
@@ -98,3 +98,7 @@
class UnsupportedOperation(RasterioError):
"""Raised when reading from a file opened in 'w' mode"""
+
+
+class OverviewCreationError(RasterioError):
+ """Raised when creation of an overview fails"""
| {"golden_diff": "diff --git a/rasterio/errors.py b/rasterio/errors.py\n--- a/rasterio/errors.py\n+++ b/rasterio/errors.py\n@@ -98,3 +98,7 @@\n \n class UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n+\n+\n+class OverviewCreationError(RasterioError):\n+ \"\"\"Raised when creation of an overview fails\"\"\"\n", "issue": "Python crashes while building overviews\nAfter performing the below code Python crashes:\r\n\r\n```python\r\nimport rasterio\r\nfrom rasterio.enums import Resampling\r\n\r\nfactors = [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]\r\ndst = rasterio.open('rasterio/tests/data/RGB.byte.tif', 'r+')\r\ndst.build_overviews(factors, Resampling.average)\r\n```\r\n\r\n```\r\n*** Error in `python': malloc(): memory corruption: 0x0000000002e0f9c0 ***\r\n======= Backtrace: =========\r\n/lib/x86_64-linux-gnu/libc.so.6(+0x777e5)[0x7fbe1c3fd7e5]\r\n/lib/x86_64-linux-gnu/libc.so.6(+0x8213e)[0x7fbe1c40813e]\r\n/lib/x86_64-linux-gnu/libc.so.6(__libc_malloc+0x54)[0x7fbe1c40a184]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(CPLMalloc+0x20)[0x7fbe19ab2700]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(CPLCalloc+0x1c)[0x7fbe19ab27ac]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(_ZN12GTiffDataset15IBuildOverviewsEPKciPiiS2_PFidS1_PvES3_+0x10f0)[0x7fbe19554bd0]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/.libs/libgdal-acedaae2.so.20.3.1(_ZN11GDALDataset14BuildOverviewsEPKciPiiS2_PFidS1_PvES3_+0x38)[0x7fbe198059f8]\r\n/home/rykov/sandbox/env/lib/python3.5/site-packages/rasterio/_io.cpython-35m-x86_64-linux-gnu.so(+0x3613a)[0x7fbe0595713a]\r\npython(PyCFunction_Call+0x77)[0x4e9ba7]\r\npython(PyEval_EvalFrameEx+0x614)[0x5372f4]\r\npython[0x540199]\r\npython(PyEval_EvalCode+0x1f)[0x540e4f]\r\npython[0x60c272]\r\npython(PyRun_InteractiveOneObject+0x2b1)[0x46b89f]\r\npython(PyRun_InteractiveLoopFlags+0xe8)[0x46ba48]\r\npython[0x46cfa0]\r\npython[0x4cf2bd]\r\npython(main+0xe1)[0x4cfeb1]\r\n/lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0xf0)[0x7fbe1c3a6830]\r\npython(_start+0x29)[0x5d6049]\r\n```\n", "before_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n\n\nclass WarpOptionsError(RasterioError):\n \"\"\"Raised when options for a warp operation are invalid\"\"\"\n\n\nclass UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n", "path": "rasterio/errors.py"}], "after_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n\n\nclass WarpOptionsError(RasterioError):\n \"\"\"Raised when options for a warp operation are invalid\"\"\"\n\n\nclass UnsupportedOperation(RasterioError):\n \"\"\"Raised when reading from a file opened in 'w' mode\"\"\"\n\n\nclass OverviewCreationError(RasterioError):\n \"\"\"Raised when creation of an overview fails\"\"\"\n", "path": "rasterio/errors.py"}]} | 1,939 | 91 |
gh_patches_debug_28841 | rasdani/github-patches | git_diff | ManimCommunity__manim-2567 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Renderer only uses one rendering mode for all Scenes in a file
## Description of bug / unexpected behavior
<!-- Add a clear and concise description of the problem you encountered. -->
When running `manim animations.py -ql -a`, all of the Scenes are rendered as _either_ .png files _or_ .mp4 files.
For example, if the first Scene in 'animations.py' has no animation, then manim will decide to render that Scene to a .png.
However, then if the next Scene has some animation, then manim will not intelligently switch to rendering .mp4, and instead will produce a .png containing the last frame of the intended animation.
## Expected behavior
<!-- Add a clear and concise description of what you expected to happen. -->
If there are some Scenes with animations and some still Scenes in a file, when rendering all Scenes at once, manim should decide for each Scene whether to render to a .png or to an .mp4, based on whether there is animation or not.
## How to reproduce the issue
<!-- Provide a piece of code illustrating the undesired behavior. -->
<details><summary>Code for reproducing the problem</summary>
```py
Paste your code here.
```
</details>
## Additional media files
<!-- Paste in the files manim produced on rendering the code above. -->
<details><summary>Images/GIFs</summary>
<!-- PASTE MEDIA HERE -->
</details>
## Logs
<details><summary>Terminal output</summary>
<!-- Add "-v DEBUG" when calling manim to generate more detailed logs -->
```
PASTE HERE OR PROVIDE LINK TO https://pastebin.com/ OR SIMILAR
```
<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->
</details>
## System specifications
<details><summary>System Details</summary>
- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):
- RAM:
- Python version (`python/py/python3 --version`):
- Installed modules (provide output from `pip list`):
```
PASTE HERE
```
</details>
<details><summary>LaTeX details</summary>
+ LaTeX distribution (e.g. TeX Live 2020):
+ Installed LaTeX packages:
<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->
</details>
<details><summary>FFMPEG</summary>
Output of `ffmpeg -version`:
```
PASTE HERE
```
</details>
## Additional comments
<!-- Add further context that you think might be relevant for this issue here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/cli/render/commands.py`
Content:
```
1 """Manim's default subcommand, render.
2
3 Manim's render subcommand is accessed in the command-line interface via
4 ``manim``, but can be more explicitly accessed with ``manim render``. Here you
5 can specify options, and arguments for the render command.
6
7 """
8 from __future__ import annotations
9
10 import json
11 import sys
12 from pathlib import Path
13
14 import click
15 import cloup
16 import requests
17
18 from ... import __version__, config, console, error_console, logger
19 from ...constants import EPILOG
20 from ...utils.module_ops import scene_classes_from_file
21 from .ease_of_access_options import ease_of_access_options
22 from .global_options import global_options
23 from .output_options import output_options
24 from .render_options import render_options
25
26
27 @cloup.command(
28 context_settings=None,
29 no_args_is_help=True,
30 epilog=EPILOG,
31 )
32 @click.argument("file", type=Path, required=True)
33 @click.argument("scene_names", required=False, nargs=-1)
34 @global_options
35 @output_options
36 @render_options # type: ignore
37 @ease_of_access_options
38 def render(
39 **args,
40 ):
41 """Render SCENE(S) from the input FILE.
42
43 FILE is the file path of the script or a config file.
44
45 SCENES is an optional list of scenes in the file.
46 """
47
48 if args["use_opengl_renderer"]:
49 logger.warning(
50 "--use_opengl_renderer is deprecated, please use --renderer=opengl instead!",
51 )
52 args["renderer"] = "opengl"
53
54 if args["save_as_gif"]:
55 logger.warning("--save_as_gif is deprecated, please use --format=gif instead!")
56 args["format"] = "gif"
57
58 if args["save_pngs"]:
59 logger.warning("--save_pngs is deprecated, please use --format=png instead!")
60 args["format"] = "png"
61
62 if args["show_in_file_browser"]:
63 logger.warning(
64 "The short form of show_in_file_browser is deprecated and will be moved to support --format.",
65 )
66
67 class ClickArgs:
68 def __init__(self, args):
69 for name in args:
70 setattr(self, name, args[name])
71
72 def _get_kwargs(self):
73 return list(self.__dict__.items())
74
75 def __eq__(self, other):
76 if not isinstance(other, ClickArgs):
77 return NotImplemented
78 return vars(self) == vars(other)
79
80 def __contains__(self, key):
81 return key in self.__dict__
82
83 def __repr__(self):
84 return str(self.__dict__)
85
86 click_args = ClickArgs(args)
87 if args["jupyter"]:
88 return click_args
89
90 config.digest_args(click_args)
91 file = Path(config.input_file)
92 if config.renderer == "opengl":
93 from manim.renderer.opengl_renderer import OpenGLRenderer
94
95 try:
96 renderer = OpenGLRenderer()
97 keep_running = True
98 while keep_running:
99 for SceneClass in scene_classes_from_file(file):
100 scene = SceneClass(renderer)
101 rerun = scene.render()
102 if rerun or config["write_all"]:
103 renderer.num_plays = 0
104 continue
105 else:
106 keep_running = False
107 break
108 if config["write_all"]:
109 keep_running = False
110
111 except Exception:
112 error_console.print_exception()
113 sys.exit(1)
114 else:
115 for SceneClass in scene_classes_from_file(file):
116 try:
117 scene = SceneClass()
118 scene.render()
119 except Exception:
120 error_console.print_exception()
121 sys.exit(1)
122
123 if config.notify_outdated_version:
124 manim_info_url = "https://pypi.org/pypi/manim/json"
125 warn_prompt = "Cannot check if latest release of manim is installed"
126 req_info = {}
127
128 try:
129 req_info = requests.get(manim_info_url)
130 req_info.raise_for_status()
131
132 stable = req_info.json()["info"]["version"]
133 if stable != __version__:
134 console.print(
135 f"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.",
136 )
137 console.print(
138 "You should consider upgrading via [yellow]pip install -U manim[/yellow]",
139 )
140 except requests.exceptions.HTTPError:
141 logger.debug(f"HTTP Error: {warn_prompt}")
142 except requests.exceptions.ConnectionError:
143 logger.debug(f"Connection Error: {warn_prompt}")
144 except requests.exceptions.Timeout:
145 logger.debug(f"Timed Out: {warn_prompt}")
146 except json.JSONDecodeError:
147 logger.debug(warn_prompt)
148 logger.debug(f"Error decoding JSON from {manim_info_url}")
149 except Exception:
150 logger.debug(f"Something went wrong: {warn_prompt}")
151
152 return args
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manim/cli/render/commands.py b/manim/cli/render/commands.py
--- a/manim/cli/render/commands.py
+++ b/manim/cli/render/commands.py
@@ -16,6 +16,7 @@
import requests
from ... import __version__, config, console, error_console, logger
+from ..._config import tempconfig
from ...constants import EPILOG
from ...utils.module_ops import scene_classes_from_file
from .ease_of_access_options import ease_of_access_options
@@ -97,8 +98,9 @@
keep_running = True
while keep_running:
for SceneClass in scene_classes_from_file(file):
- scene = SceneClass(renderer)
- rerun = scene.render()
+ with tempconfig(config):
+ scene = SceneClass(renderer)
+ rerun = scene.render()
if rerun or config["write_all"]:
renderer.num_plays = 0
continue
@@ -114,8 +116,9 @@
else:
for SceneClass in scene_classes_from_file(file):
try:
- scene = SceneClass()
- scene.render()
+ with tempconfig(config):
+ scene = SceneClass()
+ scene.render()
except Exception:
error_console.print_exception()
sys.exit(1)
| {"golden_diff": "diff --git a/manim/cli/render/commands.py b/manim/cli/render/commands.py\n--- a/manim/cli/render/commands.py\n+++ b/manim/cli/render/commands.py\n@@ -16,6 +16,7 @@\n import requests\n \n from ... import __version__, config, console, error_console, logger\n+from ..._config import tempconfig\n from ...constants import EPILOG\n from ...utils.module_ops import scene_classes_from_file\n from .ease_of_access_options import ease_of_access_options\n@@ -97,8 +98,9 @@\n keep_running = True\n while keep_running:\n for SceneClass in scene_classes_from_file(file):\n- scene = SceneClass(renderer)\n- rerun = scene.render()\n+ with tempconfig(config):\n+ scene = SceneClass(renderer)\n+ rerun = scene.render()\n if rerun or config[\"write_all\"]:\n renderer.num_plays = 0\n continue\n@@ -114,8 +116,9 @@\n else:\n for SceneClass in scene_classes_from_file(file):\n try:\n- scene = SceneClass()\n- scene.render()\n+ with tempconfig(config):\n+ scene = SceneClass()\n+ scene.render()\n except Exception:\n error_console.print_exception()\n sys.exit(1)\n", "issue": "Renderer only uses one rendering mode for all Scenes in a file\n## Description of bug / unexpected behavior\r\n<!-- Add a clear and concise description of the problem you encountered. -->\r\n\r\nWhen running `manim animations.py -ql -a`, all of the Scenes are rendered as _either_ .png files _or_ .mp4 files.\r\nFor example, if the first Scene in 'animations.py' has no animation, then manim will decide to render that Scene to a .png.\r\nHowever, then if the next Scene has some animation, then manim will not intelligently switch to rendering .mp4, and instead will produce a .png containing the last frame of the intended animation.\r\n\r\n\r\n## Expected behavior\r\n<!-- Add a clear and concise description of what you expected to happen. -->\r\n\r\nIf there are some Scenes with animations and some still Scenes in a file, when rendering all Scenes at once, manim should decide for each Scene whether to render to a .png or to an .mp4, based on whether there is animation or not.\r\n\r\n\r\n## How to reproduce the issue\r\n<!-- Provide a piece of code illustrating the undesired behavior. -->\r\n\r\n<details><summary>Code for reproducing the problem</summary>\r\n\r\n```py\r\nPaste your code here.\r\n```\r\n\r\n</details>\r\n\r\n\r\n## Additional media files\r\n<!-- Paste in the files manim produced on rendering the code above. -->\r\n\r\n<details><summary>Images/GIFs</summary>\r\n\r\n<!-- PASTE MEDIA HERE -->\r\n\r\n</details>\r\n\r\n\r\n## Logs\r\n<details><summary>Terminal output</summary>\r\n<!-- Add \"-v DEBUG\" when calling manim to generate more detailed logs -->\r\n\r\n```\r\nPASTE HERE OR PROVIDE LINK TO https://pastebin.com/ OR SIMILAR\r\n```\r\n\r\n<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->\r\n\r\n</details>\r\n\r\n\r\n## System specifications\r\n\r\n<details><summary>System Details</summary>\r\n\r\n- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):\r\n- RAM:\r\n- Python version (`python/py/python3 --version`):\r\n- Installed modules (provide output from `pip list`):\r\n```\r\nPASTE HERE\r\n```\r\n</details>\r\n\r\n<details><summary>LaTeX details</summary>\r\n\r\n+ LaTeX distribution (e.g. TeX Live 2020):\r\n+ Installed LaTeX packages:\r\n<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->\r\n</details>\r\n\r\n<details><summary>FFMPEG</summary>\r\n\r\nOutput of `ffmpeg -version`:\r\n\r\n```\r\nPASTE HERE\r\n```\r\n</details>\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant for this issue here. -->\r\n\n", "before_files": [{"content": "\"\"\"Manim's default subcommand, render.\n\nManim's render subcommand is accessed in the command-line interface via\n``manim``, but can be more explicitly accessed with ``manim render``. Here you\ncan specify options, and arguments for the render command.\n\n\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport sys\nfrom pathlib import Path\n\nimport click\nimport cloup\nimport requests\n\nfrom ... import __version__, config, console, error_console, logger\nfrom ...constants import EPILOG\nfrom ...utils.module_ops import scene_classes_from_file\nfrom .ease_of_access_options import ease_of_access_options\nfrom .global_options import global_options\nfrom .output_options import output_options\nfrom .render_options import render_options\n\n\[email protected](\n context_settings=None,\n no_args_is_help=True,\n epilog=EPILOG,\n)\[email protected](\"file\", type=Path, required=True)\[email protected](\"scene_names\", required=False, nargs=-1)\n@global_options\n@output_options\n@render_options # type: ignore\n@ease_of_access_options\ndef render(\n **args,\n):\n \"\"\"Render SCENE(S) from the input FILE.\n\n FILE is the file path of the script or a config file.\n\n SCENES is an optional list of scenes in the file.\n \"\"\"\n\n if args[\"use_opengl_renderer\"]:\n logger.warning(\n \"--use_opengl_renderer is deprecated, please use --renderer=opengl instead!\",\n )\n args[\"renderer\"] = \"opengl\"\n\n if args[\"save_as_gif\"]:\n logger.warning(\"--save_as_gif is deprecated, please use --format=gif instead!\")\n args[\"format\"] = \"gif\"\n\n if args[\"save_pngs\"]:\n logger.warning(\"--save_pngs is deprecated, please use --format=png instead!\")\n args[\"format\"] = \"png\"\n\n if args[\"show_in_file_browser\"]:\n logger.warning(\n \"The short form of show_in_file_browser is deprecated and will be moved to support --format.\",\n )\n\n class ClickArgs:\n def __init__(self, args):\n for name in args:\n setattr(self, name, args[name])\n\n def _get_kwargs(self):\n return list(self.__dict__.items())\n\n def __eq__(self, other):\n if not isinstance(other, ClickArgs):\n return NotImplemented\n return vars(self) == vars(other)\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __repr__(self):\n return str(self.__dict__)\n\n click_args = ClickArgs(args)\n if args[\"jupyter\"]:\n return click_args\n\n config.digest_args(click_args)\n file = Path(config.input_file)\n if config.renderer == \"opengl\":\n from manim.renderer.opengl_renderer import OpenGLRenderer\n\n try:\n renderer = OpenGLRenderer()\n keep_running = True\n while keep_running:\n for SceneClass in scene_classes_from_file(file):\n scene = SceneClass(renderer)\n rerun = scene.render()\n if rerun or config[\"write_all\"]:\n renderer.num_plays = 0\n continue\n else:\n keep_running = False\n break\n if config[\"write_all\"]:\n keep_running = False\n\n except Exception:\n error_console.print_exception()\n sys.exit(1)\n else:\n for SceneClass in scene_classes_from_file(file):\n try:\n scene = SceneClass()\n scene.render()\n except Exception:\n error_console.print_exception()\n sys.exit(1)\n\n if config.notify_outdated_version:\n manim_info_url = \"https://pypi.org/pypi/manim/json\"\n warn_prompt = \"Cannot check if latest release of manim is installed\"\n req_info = {}\n\n try:\n req_info = requests.get(manim_info_url)\n req_info.raise_for_status()\n\n stable = req_info.json()[\"info\"][\"version\"]\n if stable != __version__:\n console.print(\n f\"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.\",\n )\n console.print(\n \"You should consider upgrading via [yellow]pip install -U manim[/yellow]\",\n )\n except requests.exceptions.HTTPError:\n logger.debug(f\"HTTP Error: {warn_prompt}\")\n except requests.exceptions.ConnectionError:\n logger.debug(f\"Connection Error: {warn_prompt}\")\n except requests.exceptions.Timeout:\n logger.debug(f\"Timed Out: {warn_prompt}\")\n except json.JSONDecodeError:\n logger.debug(warn_prompt)\n logger.debug(f\"Error decoding JSON from {manim_info_url}\")\n except Exception:\n logger.debug(f\"Something went wrong: {warn_prompt}\")\n\n return args\n", "path": "manim/cli/render/commands.py"}], "after_files": [{"content": "\"\"\"Manim's default subcommand, render.\n\nManim's render subcommand is accessed in the command-line interface via\n``manim``, but can be more explicitly accessed with ``manim render``. Here you\ncan specify options, and arguments for the render command.\n\n\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport sys\nfrom pathlib import Path\n\nimport click\nimport cloup\nimport requests\n\nfrom ... import __version__, config, console, error_console, logger\nfrom ..._config import tempconfig\nfrom ...constants import EPILOG\nfrom ...utils.module_ops import scene_classes_from_file\nfrom .ease_of_access_options import ease_of_access_options\nfrom .global_options import global_options\nfrom .output_options import output_options\nfrom .render_options import render_options\n\n\[email protected](\n context_settings=None,\n no_args_is_help=True,\n epilog=EPILOG,\n)\[email protected](\"file\", type=Path, required=True)\[email protected](\"scene_names\", required=False, nargs=-1)\n@global_options\n@output_options\n@render_options # type: ignore\n@ease_of_access_options\ndef render(\n **args,\n):\n \"\"\"Render SCENE(S) from the input FILE.\n\n FILE is the file path of the script or a config file.\n\n SCENES is an optional list of scenes in the file.\n \"\"\"\n\n if args[\"use_opengl_renderer\"]:\n logger.warning(\n \"--use_opengl_renderer is deprecated, please use --renderer=opengl instead!\",\n )\n args[\"renderer\"] = \"opengl\"\n\n if args[\"save_as_gif\"]:\n logger.warning(\"--save_as_gif is deprecated, please use --format=gif instead!\")\n args[\"format\"] = \"gif\"\n\n if args[\"save_pngs\"]:\n logger.warning(\"--save_pngs is deprecated, please use --format=png instead!\")\n args[\"format\"] = \"png\"\n\n if args[\"show_in_file_browser\"]:\n logger.warning(\n \"The short form of show_in_file_browser is deprecated and will be moved to support --format.\",\n )\n\n class ClickArgs:\n def __init__(self, args):\n for name in args:\n setattr(self, name, args[name])\n\n def _get_kwargs(self):\n return list(self.__dict__.items())\n\n def __eq__(self, other):\n if not isinstance(other, ClickArgs):\n return NotImplemented\n return vars(self) == vars(other)\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __repr__(self):\n return str(self.__dict__)\n\n click_args = ClickArgs(args)\n if args[\"jupyter\"]:\n return click_args\n\n config.digest_args(click_args)\n file = Path(config.input_file)\n if config.renderer == \"opengl\":\n from manim.renderer.opengl_renderer import OpenGLRenderer\n\n try:\n renderer = OpenGLRenderer()\n keep_running = True\n while keep_running:\n for SceneClass in scene_classes_from_file(file):\n with tempconfig(config):\n scene = SceneClass(renderer)\n rerun = scene.render()\n if rerun or config[\"write_all\"]:\n renderer.num_plays = 0\n continue\n else:\n keep_running = False\n break\n if config[\"write_all\"]:\n keep_running = False\n\n except Exception:\n error_console.print_exception()\n sys.exit(1)\n else:\n for SceneClass in scene_classes_from_file(file):\n try:\n with tempconfig(config):\n scene = SceneClass()\n scene.render()\n except Exception:\n error_console.print_exception()\n sys.exit(1)\n\n if config.notify_outdated_version:\n manim_info_url = \"https://pypi.org/pypi/manim/json\"\n warn_prompt = \"Cannot check if latest release of manim is installed\"\n req_info = {}\n\n try:\n req_info = requests.get(manim_info_url)\n req_info.raise_for_status()\n\n stable = req_info.json()[\"info\"][\"version\"]\n if stable != __version__:\n console.print(\n f\"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.\",\n )\n console.print(\n \"You should consider upgrading via [yellow]pip install -U manim[/yellow]\",\n )\n except requests.exceptions.HTTPError:\n logger.debug(f\"HTTP Error: {warn_prompt}\")\n except requests.exceptions.ConnectionError:\n logger.debug(f\"Connection Error: {warn_prompt}\")\n except requests.exceptions.Timeout:\n logger.debug(f\"Timed Out: {warn_prompt}\")\n except json.JSONDecodeError:\n logger.debug(warn_prompt)\n logger.debug(f\"Error decoding JSON from {manim_info_url}\")\n except Exception:\n logger.debug(f\"Something went wrong: {warn_prompt}\")\n\n return args\n", "path": "manim/cli/render/commands.py"}]} | 2,211 | 283 |
gh_patches_debug_22804 | rasdani/github-patches | git_diff | pypi__warehouse-4184 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expose project_urls in JSON API
https://packaging.python.org/tutorials/distributing-packages/#project-urls
Related to #3798 / #3820
I realized project_urls is not currently exposed by the JSON API. I propose adding it.
Though the keys in the project_urls dict can be anything, they're fairly standardized, enough to be useful when querying for them over and API. For example, [Flask's API response](https://pypi.org/pypi/Flask/json) lists its home_page as https://www.palletsprojects.com/p/flask/ (not it's github account which is fairly typical), and puts it's GitHub link in `project_urls['Code']`, which is not currently in the API response.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/legacy/api/json.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
14 from pyramid.view import view_config
15 from sqlalchemy.orm import Load
16 from sqlalchemy.orm.exc import NoResultFound
17
18 from warehouse.cache.http import cache_control
19 from warehouse.cache.origin import origin_cache
20 from warehouse.packaging.models import File, Release, Project
21
22
23 # Generate appropriate CORS headers for the JSON endpoint.
24 # We want to allow Cross-Origin requests here so that users can interact
25 # with these endpoints via XHR/Fetch APIs in the browser.
26 _CORS_HEADERS = {
27 "Access-Control-Allow-Origin": "*",
28 "Access-Control-Allow-Headers": ", ".join(
29 [
30 "Content-Type",
31 "If-Match",
32 "If-Modified-Since",
33 "If-None-Match",
34 "If-Unmodified-Since",
35 ]
36 ),
37 "Access-Control-Allow-Methods": "GET",
38 "Access-Control-Max-Age": "86400", # 1 day.
39 "Access-Control-Expose-Headers": ", ".join(["X-PyPI-Last-Serial"]),
40 }
41
42
43 @view_config(
44 route_name="legacy.api.json.project",
45 context=Project,
46 renderer="json",
47 decorator=[
48 cache_control(15 * 60), # 15 minutes
49 origin_cache(
50 1 * 24 * 60 * 60, # 1 day
51 stale_while_revalidate=5 * 60, # 5 minutes
52 stale_if_error=1 * 24 * 60 * 60, # 1 day
53 ),
54 ],
55 )
56 def json_project(project, request):
57 if project.name != request.matchdict.get("name", project.name):
58 return HTTPMovedPermanently(
59 request.current_route_path(name=project.name), headers=_CORS_HEADERS
60 )
61
62 try:
63 release = (
64 request.db.query(Release)
65 .filter(Release.project == project)
66 .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())
67 .limit(1)
68 .one()
69 )
70 except NoResultFound:
71 return HTTPNotFound(headers=_CORS_HEADERS)
72
73 return json_release(release, request)
74
75
76 @view_config(
77 route_name="legacy.api.json.release",
78 context=Release,
79 renderer="json",
80 decorator=[
81 cache_control(15 * 60), # 15 minutes
82 origin_cache(
83 1 * 24 * 60 * 60, # 1 day
84 stale_while_revalidate=5 * 60, # 5 minutes
85 stale_if_error=1 * 24 * 60 * 60, # 1 day
86 ),
87 ],
88 )
89 def json_release(release, request):
90 project = release.project
91
92 if project.name != request.matchdict.get("name", project.name):
93 return HTTPMovedPermanently(
94 request.current_route_path(name=project.name), headers=_CORS_HEADERS
95 )
96
97 # Apply CORS headers.
98 request.response.headers.update(_CORS_HEADERS)
99
100 # Get the latest serial number for this project.
101 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
102
103 # Get all of the releases and files for this project.
104 release_files = (
105 request.db.query(Release, File)
106 .options(Load(Release).load_only("version"))
107 .outerjoin(File)
108 .filter(Release.project == project)
109 .order_by(Release._pypi_ordering.desc(), File.filename)
110 .all()
111 )
112
113 # Map our releases + files into a dictionary that maps each release to a
114 # list of all its files.
115 releases = {}
116 for r, file_ in release_files:
117 files = releases.setdefault(r, [])
118 if file_ is not None:
119 files.append(file_)
120
121 # Serialize our database objects to match the way that PyPI legacy
122 # presented this data.
123 releases = {
124 r.version: [
125 {
126 "filename": f.filename,
127 "packagetype": f.packagetype,
128 "python_version": f.python_version,
129 "has_sig": f.has_signature,
130 "comment_text": f.comment_text,
131 "md5_digest": f.md5_digest,
132 "digests": {"md5": f.md5_digest, "sha256": f.sha256_digest},
133 "size": f.size,
134 # TODO: Remove this once we've had a long enough time with it
135 # here to consider it no longer in use.
136 "downloads": -1,
137 "upload_time": f.upload_time.strftime("%Y-%m-%dT%H:%M:%S"),
138 "url": request.route_url("packaging.file", path=f.path),
139 }
140 for f in fs
141 ]
142 for r, fs in releases.items()
143 }
144
145 return {
146 "info": {
147 "name": project.name,
148 "version": release.version,
149 "summary": release.summary,
150 "description_content_type": release.description_content_type,
151 "description": release.description,
152 "keywords": release.keywords,
153 "license": release.license,
154 "classifiers": list(release.classifiers),
155 "author": release.author,
156 "author_email": release.author_email,
157 "maintainer": release.maintainer,
158 "maintainer_email": release.maintainer_email,
159 "requires_python": release.requires_python,
160 "platform": release.platform,
161 "downloads": {"last_day": -1, "last_week": -1, "last_month": -1},
162 "package_url": request.route_url("packaging.project", name=project.name),
163 "project_url": request.route_url("packaging.project", name=project.name),
164 "release_url": request.route_url(
165 "packaging.release", name=project.name, version=release.version
166 ),
167 "requires_dist": (
168 list(release.requires_dist) if release.requires_dist else None
169 ),
170 "docs_url": project.documentation_url,
171 "bugtrack_url": project.bugtrack_url,
172 "home_page": release.home_page,
173 "download_url": release.download_url,
174 },
175 "urls": releases[release.version],
176 "releases": releases,
177 "last_serial": project.last_serial,
178 }
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py
--- a/warehouse/legacy/api/json.py
+++ b/warehouse/legacy/api/json.py
@@ -10,6 +10,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from collections import OrderedDict
+
from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
from pyramid.view import view_config
from sqlalchemy.orm import Load
@@ -161,6 +163,7 @@
"downloads": {"last_day": -1, "last_week": -1, "last_month": -1},
"package_url": request.route_url("packaging.project", name=project.name),
"project_url": request.route_url("packaging.project", name=project.name),
+ "project_urls": OrderedDict(release.urls) if release.urls else None,
"release_url": request.route_url(
"packaging.release", name=project.name, version=release.version
),
| {"golden_diff": "diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py\n--- a/warehouse/legacy/api/json.py\n+++ b/warehouse/legacy/api/json.py\n@@ -10,6 +10,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from collections import OrderedDict\n+\n from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\n from pyramid.view import view_config\n from sqlalchemy.orm import Load\n@@ -161,6 +163,7 @@\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"package_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_url\": request.route_url(\"packaging.project\", name=project.name),\n+ \"project_urls\": OrderedDict(release.urls) if release.urls else None,\n \"release_url\": request.route_url(\n \"packaging.release\", name=project.name, version=release.version\n ),\n", "issue": "Expose project_urls in JSON API\nhttps://packaging.python.org/tutorials/distributing-packages/#project-urls\r\n\r\nRelated to #3798 / #3820\r\n\r\nI realized project_urls is not currently exposed by the JSON API. I propose adding it.\r\n\r\nThough the keys in the project_urls dict can be anything, they're fairly standardized, enough to be useful when querying for them over and API. For example, [Flask's API response](https://pypi.org/pypi/Flask/json) lists its home_page as https://www.palletsprojects.com/p/flask/ (not it's github account which is fairly typical), and puts it's GitHub link in `project_urls['Code']`, which is not currently in the API response.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Release, Project\n\n\n# Generate appropriate CORS headers for the JSON endpoint.\n# We want to allow Cross-Origin requests here so that users can interact\n# with these endpoints via XHR/Fetch APIs in the browser.\n_CORS_HEADERS = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \", \".join(\n [\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ]\n ),\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Access-Control-Max-Age\": \"86400\", # 1 day.\n \"Access-Control-Expose-Headers\": \", \".join([\"X-PyPI-Last-Serial\"]),\n}\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n context=Project,\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project)\n .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound(headers=_CORS_HEADERS)\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n context=Release,\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n # Apply CORS headers.\n request.response.headers.update(_CORS_HEADERS)\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(Load(Release).load_only(\"version\"))\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\"md5\": f.md5_digest, \"sha256\": f.sha256_digest},\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description_content_type\": release.description_content_type,\n \"description\": release.description,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"package_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_url\": request.route_url(\"packaging.project\", name=project.name),\n \"release_url\": request.route_url(\n \"packaging.release\", name=project.name, version=release.version\n ),\n \"requires_dist\": (\n list(release.requires_dist) if release.requires_dist else None\n ),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": project.bugtrack_url,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n \"last_serial\": project.last_serial,\n }\n", "path": "warehouse/legacy/api/json.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Release, Project\n\n\n# Generate appropriate CORS headers for the JSON endpoint.\n# We want to allow Cross-Origin requests here so that users can interact\n# with these endpoints via XHR/Fetch APIs in the browser.\n_CORS_HEADERS = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \", \".join(\n [\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ]\n ),\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Access-Control-Max-Age\": \"86400\", # 1 day.\n \"Access-Control-Expose-Headers\": \", \".join([\"X-PyPI-Last-Serial\"]),\n}\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n context=Project,\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project)\n .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound(headers=_CORS_HEADERS)\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n context=Release,\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n # Apply CORS headers.\n request.response.headers.update(_CORS_HEADERS)\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(Load(Release).load_only(\"version\"))\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\"md5\": f.md5_digest, \"sha256\": f.sha256_digest},\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description_content_type\": release.description_content_type,\n \"description\": release.description,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"package_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_urls\": OrderedDict(release.urls) if release.urls else None,\n \"release_url\": request.route_url(\n \"packaging.release\", name=project.name, version=release.version\n ),\n \"requires_dist\": (\n list(release.requires_dist) if release.requires_dist else None\n ),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": project.bugtrack_url,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n \"last_serial\": project.last_serial,\n }\n", "path": "warehouse/legacy/api/json.py"}]} | 2,332 | 224 |
gh_patches_debug_8018 | rasdani/github-patches | git_diff | kymatio__kymatio-289 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC 3D benchmarks
Currently none are listed in the user's guide. We should probably include something here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kymatio/scattering2d/scattering2d.py`
Content:
```
1 # Authors: Edouard Oyallon
2 # Scientific Ancestry: Edouard Oyallon, Laurent Sifre, Joan Bruna
3
4
5 __all__ = ['Scattering2D']
6
7 import torch
8 from .backend import cdgmm, Modulus, SubsampleFourier, fft, Pad, unpad
9 from .filter_bank import filter_bank
10 from .utils import compute_padding
11
12
13 class Scattering2D(object):
14 """Main module implementing the scattering transform in 2D.
15 The scattering transform computes two wavelet transform followed
16 by modulus non-linearity.
17 It can be summarized as::
18
19 S_J x = [S_J^0 x, S_J^1 x, S_J^2 x]
20
21 where::
22
23 S_J^0 x = x * phi_J
24 S_J^1 x = [|x * psi^1_lambda| * phi_J]_lambda
25 S_J^2 x = [||x * psi^1_lambda| * psi^2_mu| * phi_J]_{lambda, mu}
26
27 where * denotes the convolution (in space), phi_J is a low pass
28 filter, psi^1_lambda is a family of band pass
29 filters and psi^2_mu is another family of band pass filters.
30 Only Morlet filters are used in this implementation.
31 Convolutions are efficiently performed in the Fourier domain
32 with this implementation.
33
34 Example
35 -------
36 # 1) Define a Scattering object as:
37 s = Scattering2D(J, M, N)
38 # where (M, N) are the image sizes and 2**J the scale of the scattering
39 # 2) Forward on an input Variable x of shape B x 1 x M x N,
40 # where B is the batch size.
41 result_s = s(x)
42
43 Parameters
44 ----------
45 J : int
46 logscale of the scattering
47 shape : tuple of int
48 spatial support (M, N) of the input
49 L : int, optional
50 number of angles used for the wavelet transform
51 max_order : int, optional
52 The maximum order of scattering coefficients to compute. Must be either
53 `1` or `2`. Defaults to `2`.
54 pre_pad : boolean, optional
55 controls the padding: if set to False, a symmetric padding is applied
56 on the signal. If set to true, the software will assume the signal was
57 padded externally.
58
59 Attributes
60 ----------
61 J : int
62 logscale of the scattering
63 shape : tuple of int
64 spatial support (M, N) of the input
65 L : int, optional
66 number of angles used for the wavelet transform
67 max_order : int, optional
68 The maximum order of scattering coefficients to compute.
69 Must be either equal to `1` or `2`. Defaults to `2`.
70 pre_pad : boolean
71 controls the padding
72 Psi : dictionary
73 containing the wavelets filters at all resolutions. See
74 filter_bank.filter_bank for an exact description.
75 Phi : dictionary
76 containing the low-pass filters at all resolutions. See
77 filter_bank.filter_bank for an exact description.
78 M_padded, N_padded : int
79 spatial support of the padded input
80
81 Notes
82 -----
83 The design of the filters is optimized for the value L = 8
84
85 pre_pad is particularly useful when doing crops of a bigger
86 image because the padding is then extremely accurate. Defaults
87 to False.
88
89 """
90 def __init__(self, J, shape, L=8, max_order=2, pre_pad=False):
91 self.J, self.L = J, L
92 self.pre_pad = pre_pad
93 self.max_order = max_order
94 self.shape = shape
95
96 self.build()
97
98 def build(self):
99 self.M, self.N = self.shape
100 self.modulus = Modulus()
101 self.pad = Pad(2**self.J, pre_pad = self.pre_pad)
102 self.subsample_fourier = SubsampleFourier()
103 # Create the filters
104 self.M_padded, self.N_padded = compute_padding(self.M, self.N, self.J)
105 filters = filter_bank(self.M_padded, self.N_padded, self.J, self.L)
106 self.Psi = filters['psi']
107 self.Phi = [filters['phi'][j] for j in range(self.J)]
108
109 def _type(self, _type):
110 for key, item in enumerate(self.Psi):
111 for key2, item2 in self.Psi[key].items():
112 if torch.is_tensor(item2):
113 self.Psi[key][key2] = item2.type(_type)
114 self.Phi = [v.type(_type) for v in self.Phi]
115 self.pad.padding_module.type(_type)
116 return self
117
118 def cuda(self):
119 """
120 Moves the parameters of the scattering to the GPU
121 """
122 return self._type(torch.cuda.FloatTensor)
123
124 def cpu(self):
125 """
126 Moves the parameters of the scattering to the CPU
127 """
128 return self._type(torch.FloatTensor)
129
130 def forward(self, input):
131 """Forward pass of the scattering.
132
133 Parameters
134 ----------
135 input : tensor
136 tensor with 3 dimensions :math:`(B, C, M, N)` where :math:`(B, C)` are arbitrary.
137 :math:`B` typically is the batch size, whereas :math:`C` is the number of input channels.
138
139 Returns
140 -------
141 S : tensor
142 scattering of the input, a 4D tensor :math:`(B, C, D, Md, Nd)` where :math:`D` corresponds
143 to a new channel dimension and :math:`(Md, Nd)` are downsampled sizes by a factor :math:`2^J`.
144
145 """
146 if not torch.is_tensor(input):
147 raise(TypeError('The input should be a torch.cuda.FloatTensor, a torch.FloatTensor or a torch.DoubleTensor'))
148
149 if len(input.shape) < 2:
150 raise (RuntimeError('Input tensor must have at least two '
151 'dimensions'))
152
153 if (not input.is_contiguous()):
154 raise (RuntimeError('Tensor must be contiguous!'))
155
156 if((input.size(-1)!=self.N or input.size(-2)!=self.M) and not self.pre_pad):
157 raise (RuntimeError('Tensor must be of spatial size (%i,%i)!'%(self.M,self.N)))
158
159 if ((input.size(-1) != self.N_padded or input.size(-2) != self.M_padded) and self.pre_pad):
160 raise (RuntimeError('Padded tensor must be of spatial size (%i,%i)!' % (self.M_padded, self.N_padded)))
161
162 batch_shape = input.shape[:-2]
163 signal_shape = input.shape[-2:]
164
165 input = input.reshape((-1, 1) + signal_shape)
166
167 J = self.J
168 phi = self.Phi
169 psi = self.Psi
170
171 subsample_fourier = self.subsample_fourier
172 modulus = self.modulus
173 pad = self.pad
174 order0_size = 1
175 order1_size = self.L * J
176 order2_size = self.L ** 2 * J * (J - 1) // 2
177 output_size = order0_size + order1_size
178
179 if self.max_order == 2:
180 output_size += order2_size
181
182 S = input.new(input.size(0),
183 input.size(1),
184 output_size,
185 self.M_padded//(2**J)-2,
186 self.N_padded//(2**J)-2)
187 U_r = pad(input)
188 U_0_c = fft(U_r, 'C2C') # We trick here with U_r and U_2_c
189
190 # First low pass filter
191 U_1_c = subsample_fourier(cdgmm(U_0_c, phi[0]), k=2**J)
192
193 U_J_r = fft(U_1_c, 'C2R')
194
195 S[..., 0, :, :] = unpad(U_J_r)
196 n_order1 = 1
197 n_order2 = 1 + order1_size
198
199 for n1 in range(len(psi)):
200 j1 = psi[n1]['j']
201 U_1_c = cdgmm(U_0_c, psi[n1][0])
202 if(j1 > 0):
203 U_1_c = subsample_fourier(U_1_c, k=2 ** j1)
204 U_1_c = fft(U_1_c, 'C2C', inverse=True)
205 U_1_c = fft(modulus(U_1_c), 'C2C')
206
207 # Second low pass filter
208 U_2_c = subsample_fourier(cdgmm(U_1_c, phi[j1]), k=2**(J-j1))
209 U_J_r = fft(U_2_c, 'C2R')
210 S[..., n_order1, :, :] = unpad(U_J_r)
211 n_order1 += 1
212
213 if self.max_order == 2:
214 for n2 in range(len(psi)):
215 j2 = psi[n2]['j']
216 if(j1 < j2):
217 U_2_c = subsample_fourier(cdgmm(U_1_c, psi[n2][j1]), k=2 ** (j2-j1))
218 U_2_c = fft(U_2_c, 'C2C', inverse=True)
219 U_2_c = fft(modulus(U_2_c), 'C2C')
220
221 # Third low pass filter
222 U_2_c = subsample_fourier(cdgmm(U_2_c, phi[j2]), k=2 ** (J-j2))
223 U_J_r = fft(U_2_c, 'C2R')
224
225 S[..., n_order2, :, :] = unpad(U_J_r)
226 n_order2 += 1
227
228 scattering_shape = S.shape[-3:]
229 S = S.reshape(batch_shape + scattering_shape)
230
231 return S
232
233 def __call__(self, input):
234 return self.forward(input)
235
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kymatio/scattering2d/scattering2d.py b/kymatio/scattering2d/scattering2d.py
--- a/kymatio/scattering2d/scattering2d.py
+++ b/kymatio/scattering2d/scattering2d.py
@@ -34,9 +34,9 @@
Example
-------
# 1) Define a Scattering object as:
- s = Scattering2D(J, M, N)
+ s = Scattering2D(J, shape=(M, N))
# where (M, N) are the image sizes and 2**J the scale of the scattering
- # 2) Forward on an input Variable x of shape B x 1 x M x N,
+ # 2) Forward on an input Tensor x of shape B x M x N,
# where B is the batch size.
result_s = s(x)
| {"golden_diff": "diff --git a/kymatio/scattering2d/scattering2d.py b/kymatio/scattering2d/scattering2d.py\n--- a/kymatio/scattering2d/scattering2d.py\n+++ b/kymatio/scattering2d/scattering2d.py\n@@ -34,9 +34,9 @@\n Example\n -------\n # 1) Define a Scattering object as:\n- s = Scattering2D(J, M, N)\n+ s = Scattering2D(J, shape=(M, N))\n # where (M, N) are the image sizes and 2**J the scale of the scattering\n- # 2) Forward on an input Variable x of shape B x 1 x M x N,\n+ # 2) Forward on an input Tensor x of shape B x M x N,\n # where B is the batch size.\n result_s = s(x)\n", "issue": "DOC 3D benchmarks\nCurrently none are listed in the user's guide. We should probably include something here.\n", "before_files": [{"content": "# Authors: Edouard Oyallon\n# Scientific Ancestry: Edouard Oyallon, Laurent Sifre, Joan Bruna\n\n\n__all__ = ['Scattering2D']\n\nimport torch\nfrom .backend import cdgmm, Modulus, SubsampleFourier, fft, Pad, unpad\nfrom .filter_bank import filter_bank\nfrom .utils import compute_padding\n\n\nclass Scattering2D(object):\n \"\"\"Main module implementing the scattering transform in 2D.\n The scattering transform computes two wavelet transform followed\n by modulus non-linearity.\n It can be summarized as::\n\n S_J x = [S_J^0 x, S_J^1 x, S_J^2 x]\n\n where::\n\n S_J^0 x = x * phi_J\n S_J^1 x = [|x * psi^1_lambda| * phi_J]_lambda\n S_J^2 x = [||x * psi^1_lambda| * psi^2_mu| * phi_J]_{lambda, mu}\n\n where * denotes the convolution (in space), phi_J is a low pass\n filter, psi^1_lambda is a family of band pass\n filters and psi^2_mu is another family of band pass filters.\n Only Morlet filters are used in this implementation.\n Convolutions are efficiently performed in the Fourier domain\n with this implementation.\n\n Example\n -------\n # 1) Define a Scattering object as:\n s = Scattering2D(J, M, N)\n # where (M, N) are the image sizes and 2**J the scale of the scattering\n # 2) Forward on an input Variable x of shape B x 1 x M x N,\n # where B is the batch size.\n result_s = s(x)\n\n Parameters\n ----------\n J : int\n logscale of the scattering\n shape : tuple of int\n spatial support (M, N) of the input\n L : int, optional\n number of angles used for the wavelet transform\n max_order : int, optional\n The maximum order of scattering coefficients to compute. Must be either\n `1` or `2`. Defaults to `2`.\n pre_pad : boolean, optional\n controls the padding: if set to False, a symmetric padding is applied\n on the signal. If set to true, the software will assume the signal was\n padded externally.\n\n Attributes\n ----------\n J : int\n logscale of the scattering\n shape : tuple of int\n spatial support (M, N) of the input\n L : int, optional\n number of angles used for the wavelet transform\n max_order : int, optional\n The maximum order of scattering coefficients to compute.\n Must be either equal to `1` or `2`. Defaults to `2`.\n pre_pad : boolean\n controls the padding\n Psi : dictionary\n containing the wavelets filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n Phi : dictionary\n containing the low-pass filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n M_padded, N_padded : int\n spatial support of the padded input\n\n Notes\n -----\n The design of the filters is optimized for the value L = 8\n\n pre_pad is particularly useful when doing crops of a bigger\n image because the padding is then extremely accurate. Defaults\n to False.\n\n \"\"\"\n def __init__(self, J, shape, L=8, max_order=2, pre_pad=False):\n self.J, self.L = J, L\n self.pre_pad = pre_pad\n self.max_order = max_order\n self.shape = shape\n\n self.build()\n\n def build(self):\n self.M, self.N = self.shape\n self.modulus = Modulus()\n self.pad = Pad(2**self.J, pre_pad = self.pre_pad)\n self.subsample_fourier = SubsampleFourier()\n # Create the filters\n self.M_padded, self.N_padded = compute_padding(self.M, self.N, self.J)\n filters = filter_bank(self.M_padded, self.N_padded, self.J, self.L)\n self.Psi = filters['psi']\n self.Phi = [filters['phi'][j] for j in range(self.J)]\n\n def _type(self, _type):\n for key, item in enumerate(self.Psi):\n for key2, item2 in self.Psi[key].items():\n if torch.is_tensor(item2):\n self.Psi[key][key2] = item2.type(_type)\n self.Phi = [v.type(_type) for v in self.Phi]\n self.pad.padding_module.type(_type)\n return self\n\n def cuda(self):\n \"\"\"\n Moves the parameters of the scattering to the GPU\n \"\"\"\n return self._type(torch.cuda.FloatTensor)\n\n def cpu(self):\n \"\"\"\n Moves the parameters of the scattering to the CPU\n \"\"\"\n return self._type(torch.FloatTensor)\n\n def forward(self, input):\n \"\"\"Forward pass of the scattering.\n\n Parameters\n ----------\n input : tensor\n tensor with 3 dimensions :math:`(B, C, M, N)` where :math:`(B, C)` are arbitrary.\n :math:`B` typically is the batch size, whereas :math:`C` is the number of input channels.\n\n Returns\n -------\n S : tensor\n scattering of the input, a 4D tensor :math:`(B, C, D, Md, Nd)` where :math:`D` corresponds\n to a new channel dimension and :math:`(Md, Nd)` are downsampled sizes by a factor :math:`2^J`.\n\n \"\"\"\n if not torch.is_tensor(input):\n raise(TypeError('The input should be a torch.cuda.FloatTensor, a torch.FloatTensor or a torch.DoubleTensor'))\n\n if len(input.shape) < 2:\n raise (RuntimeError('Input tensor must have at least two '\n 'dimensions'))\n\n if (not input.is_contiguous()):\n raise (RuntimeError('Tensor must be contiguous!'))\n\n if((input.size(-1)!=self.N or input.size(-2)!=self.M) and not self.pre_pad):\n raise (RuntimeError('Tensor must be of spatial size (%i,%i)!'%(self.M,self.N)))\n\n if ((input.size(-1) != self.N_padded or input.size(-2) != self.M_padded) and self.pre_pad):\n raise (RuntimeError('Padded tensor must be of spatial size (%i,%i)!' % (self.M_padded, self.N_padded)))\n\n batch_shape = input.shape[:-2]\n signal_shape = input.shape[-2:]\n\n input = input.reshape((-1, 1) + signal_shape)\n\n J = self.J\n phi = self.Phi\n psi = self.Psi\n\n subsample_fourier = self.subsample_fourier\n modulus = self.modulus\n pad = self.pad\n order0_size = 1\n order1_size = self.L * J\n order2_size = self.L ** 2 * J * (J - 1) // 2\n output_size = order0_size + order1_size\n\n if self.max_order == 2:\n output_size += order2_size\n\n S = input.new(input.size(0),\n input.size(1),\n output_size,\n self.M_padded//(2**J)-2,\n self.N_padded//(2**J)-2)\n U_r = pad(input)\n U_0_c = fft(U_r, 'C2C') # We trick here with U_r and U_2_c\n\n # First low pass filter\n U_1_c = subsample_fourier(cdgmm(U_0_c, phi[0]), k=2**J)\n\n U_J_r = fft(U_1_c, 'C2R')\n\n S[..., 0, :, :] = unpad(U_J_r)\n n_order1 = 1\n n_order2 = 1 + order1_size\n\n for n1 in range(len(psi)):\n j1 = psi[n1]['j']\n U_1_c = cdgmm(U_0_c, psi[n1][0])\n if(j1 > 0):\n U_1_c = subsample_fourier(U_1_c, k=2 ** j1)\n U_1_c = fft(U_1_c, 'C2C', inverse=True)\n U_1_c = fft(modulus(U_1_c), 'C2C')\n\n # Second low pass filter\n U_2_c = subsample_fourier(cdgmm(U_1_c, phi[j1]), k=2**(J-j1))\n U_J_r = fft(U_2_c, 'C2R')\n S[..., n_order1, :, :] = unpad(U_J_r)\n n_order1 += 1\n\n if self.max_order == 2:\n for n2 in range(len(psi)):\n j2 = psi[n2]['j']\n if(j1 < j2):\n U_2_c = subsample_fourier(cdgmm(U_1_c, psi[n2][j1]), k=2 ** (j2-j1))\n U_2_c = fft(U_2_c, 'C2C', inverse=True)\n U_2_c = fft(modulus(U_2_c), 'C2C')\n \n # Third low pass filter\n U_2_c = subsample_fourier(cdgmm(U_2_c, phi[j2]), k=2 ** (J-j2))\n U_J_r = fft(U_2_c, 'C2R')\n \n S[..., n_order2, :, :] = unpad(U_J_r)\n n_order2 += 1\n\n scattering_shape = S.shape[-3:]\n S = S.reshape(batch_shape + scattering_shape)\n\n return S\n\n def __call__(self, input):\n return self.forward(input)\n", "path": "kymatio/scattering2d/scattering2d.py"}], "after_files": [{"content": "# Authors: Edouard Oyallon\n# Scientific Ancestry: Edouard Oyallon, Laurent Sifre, Joan Bruna\n\n\n__all__ = ['Scattering2D']\n\nimport torch\nfrom .backend import cdgmm, Modulus, SubsampleFourier, fft, Pad, unpad\nfrom .filter_bank import filter_bank\nfrom .utils import compute_padding\n\n\nclass Scattering2D(object):\n \"\"\"Main module implementing the scattering transform in 2D.\n The scattering transform computes two wavelet transform followed\n by modulus non-linearity.\n It can be summarized as::\n\n S_J x = [S_J^0 x, S_J^1 x, S_J^2 x]\n\n where::\n\n S_J^0 x = x * phi_J\n S_J^1 x = [|x * psi^1_lambda| * phi_J]_lambda\n S_J^2 x = [||x * psi^1_lambda| * psi^2_mu| * phi_J]_{lambda, mu}\n\n where * denotes the convolution (in space), phi_J is a low pass\n filter, psi^1_lambda is a family of band pass\n filters and psi^2_mu is another family of band pass filters.\n Only Morlet filters are used in this implementation.\n Convolutions are efficiently performed in the Fourier domain\n with this implementation.\n\n Example\n -------\n # 1) Define a Scattering object as:\n s = Scattering2D(J, shape=(M, N))\n # where (M, N) are the image sizes and 2**J the scale of the scattering\n # 2) Forward on an input Tensor x of shape B x M x N,\n # where B is the batch size.\n result_s = s(x)\n\n Parameters\n ----------\n J : int\n logscale of the scattering\n shape : tuple of int\n spatial support (M, N) of the input\n L : int, optional\n number of angles used for the wavelet transform\n max_order : int, optional\n The maximum order of scattering coefficients to compute. Must be either\n `1` or `2`. Defaults to `2`.\n pre_pad : boolean, optional\n controls the padding: if set to False, a symmetric padding is applied\n on the signal. If set to true, the software will assume the signal was\n padded externally.\n\n Attributes\n ----------\n J : int\n logscale of the scattering\n shape : tuple of int\n spatial support (M, N) of the input\n L : int, optional\n number of angles used for the wavelet transform\n max_order : int, optional\n The maximum order of scattering coefficients to compute.\n Must be either equal to `1` or `2`. Defaults to `2`.\n pre_pad : boolean\n controls the padding\n Psi : dictionary\n containing the wavelets filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n Phi : dictionary\n containing the low-pass filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n M_padded, N_padded : int\n spatial support of the padded input\n\n Notes\n -----\n The design of the filters is optimized for the value L = 8\n\n pre_pad is particularly useful when doing crops of a bigger\n image because the padding is then extremely accurate. Defaults\n to False.\n\n \"\"\"\n def __init__(self, J, shape, L=8, max_order=2, pre_pad=False):\n self.J, self.L = J, L\n self.pre_pad = pre_pad\n self.max_order = max_order\n self.shape = shape\n\n self.build()\n\n def build(self):\n self.M, self.N = self.shape\n self.modulus = Modulus()\n self.pad = Pad(2**self.J, pre_pad = self.pre_pad)\n self.subsample_fourier = SubsampleFourier()\n # Create the filters\n self.M_padded, self.N_padded = compute_padding(self.M, self.N, self.J)\n filters = filter_bank(self.M_padded, self.N_padded, self.J, self.L)\n self.Psi = filters['psi']\n self.Phi = [filters['phi'][j] for j in range(self.J)]\n\n def _type(self, _type):\n for key, item in enumerate(self.Psi):\n for key2, item2 in self.Psi[key].items():\n if torch.is_tensor(item2):\n self.Psi[key][key2] = item2.type(_type)\n self.Phi = [v.type(_type) for v in self.Phi]\n self.pad.padding_module.type(_type)\n return self\n\n def cuda(self):\n \"\"\"\n Moves the parameters of the scattering to the GPU\n \"\"\"\n return self._type(torch.cuda.FloatTensor)\n\n def cpu(self):\n \"\"\"\n Moves the parameters of the scattering to the CPU\n \"\"\"\n return self._type(torch.FloatTensor)\n\n def forward(self, input):\n \"\"\"Forward pass of the scattering.\n\n Parameters\n ----------\n input : tensor\n tensor with 3 dimensions :math:`(B, C, M, N)` where :math:`(B, C)` are arbitrary.\n :math:`B` typically is the batch size, whereas :math:`C` is the number of input channels.\n\n Returns\n -------\n S : tensor\n scattering of the input, a 4D tensor :math:`(B, C, D, Md, Nd)` where :math:`D` corresponds\n to a new channel dimension and :math:`(Md, Nd)` are downsampled sizes by a factor :math:`2^J`.\n\n \"\"\"\n if not torch.is_tensor(input):\n raise(TypeError('The input should be a torch.cuda.FloatTensor, a torch.FloatTensor or a torch.DoubleTensor'))\n\n if len(input.shape) < 2:\n raise (RuntimeError('Input tensor must have at least two '\n 'dimensions'))\n\n if (not input.is_contiguous()):\n raise (RuntimeError('Tensor must be contiguous!'))\n\n if((input.size(-1)!=self.N or input.size(-2)!=self.M) and not self.pre_pad):\n raise (RuntimeError('Tensor must be of spatial size (%i,%i)!'%(self.M,self.N)))\n\n if ((input.size(-1) != self.N_padded or input.size(-2) != self.M_padded) and self.pre_pad):\n raise (RuntimeError('Padded tensor must be of spatial size (%i,%i)!' % (self.M_padded, self.N_padded)))\n\n batch_shape = input.shape[:-2]\n signal_shape = input.shape[-2:]\n\n input = input.reshape((-1, 1) + signal_shape)\n\n J = self.J\n phi = self.Phi\n psi = self.Psi\n\n subsample_fourier = self.subsample_fourier\n modulus = self.modulus\n pad = self.pad\n order0_size = 1\n order1_size = self.L * J\n order2_size = self.L ** 2 * J * (J - 1) // 2\n output_size = order0_size + order1_size\n\n if self.max_order == 2:\n output_size += order2_size\n\n S = input.new(input.size(0),\n input.size(1),\n output_size,\n self.M_padded//(2**J)-2,\n self.N_padded//(2**J)-2)\n U_r = pad(input)\n U_0_c = fft(U_r, 'C2C') # We trick here with U_r and U_2_c\n\n # First low pass filter\n U_1_c = subsample_fourier(cdgmm(U_0_c, phi[0]), k=2**J)\n\n U_J_r = fft(U_1_c, 'C2R')\n\n S[..., 0, :, :] = unpad(U_J_r)\n n_order1 = 1\n n_order2 = 1 + order1_size\n\n for n1 in range(len(psi)):\n j1 = psi[n1]['j']\n U_1_c = cdgmm(U_0_c, psi[n1][0])\n if(j1 > 0):\n U_1_c = subsample_fourier(U_1_c, k=2 ** j1)\n U_1_c = fft(U_1_c, 'C2C', inverse=True)\n U_1_c = fft(modulus(U_1_c), 'C2C')\n\n # Second low pass filter\n U_2_c = subsample_fourier(cdgmm(U_1_c, phi[j1]), k=2**(J-j1))\n U_J_r = fft(U_2_c, 'C2R')\n S[..., n_order1, :, :] = unpad(U_J_r)\n n_order1 += 1\n\n if self.max_order == 2:\n for n2 in range(len(psi)):\n j2 = psi[n2]['j']\n if(j1 < j2):\n U_2_c = subsample_fourier(cdgmm(U_1_c, psi[n2][j1]), k=2 ** (j2-j1))\n U_2_c = fft(U_2_c, 'C2C', inverse=True)\n U_2_c = fft(modulus(U_2_c), 'C2C')\n \n # Third low pass filter\n U_2_c = subsample_fourier(cdgmm(U_2_c, phi[j2]), k=2 ** (J-j2))\n U_J_r = fft(U_2_c, 'C2R')\n \n S[..., n_order2, :, :] = unpad(U_J_r)\n n_order2 += 1\n\n scattering_shape = S.shape[-3:]\n S = S.reshape(batch_shape + scattering_shape)\n\n return S\n\n def __call__(self, input):\n return self.forward(input)\n", "path": "kymatio/scattering2d/scattering2d.py"}]} | 3,090 | 203 |
gh_patches_debug_31934 | rasdani/github-patches | git_diff | mlflow__mlflow-1800 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] HdfsArtifactRepository list_artifacts recursively lists all items under directory
Thank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)
for information on what types of issues we address.
For help with debugging your code, please refer to [Stack Overflow](https://stackoverflow.com/questions/tagged/mlflow).
Please do not delete this template unless you are sure your issue is outside its scope.
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Centos 7
- **MLflow installed from (source or binary)**: binary
- **MLflow version (run ``mlflow --version``)**: 1.1.0
- **Python version**: 3.6.8
- **npm version, if running the dev UI**: N/A
- **Exact command to reproduce**: mlflow artifacts list -r <run id for artifacts stored on hdfs>
### Describe the problem
list_artifacts of an artifact repository is expected to only list the files directly under the provided path (see https://github.com/mlflow/mlflow/blob/4b1868719837d1844f19b6242643222549ee2794/mlflow/store/cli.py#L74 ). HdfsArtifactRepository walks all files under the given path and returns them (see https://github.com/mlflow/mlflow/blob/4b1868719837d1844f19b6242643222549ee2794/mlflow/store/hdfs_artifact_repo.py#L89 ).
This behavior breaks the mflow server as it expects the behavior specified in the cli file.
### Code to reproduce issue
Provide a reproducible test case that is the bare minimum necessary to generate the problem.
### Other info / logs
Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks,
please include the full traceback. Large logs and files should be attached.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/store/hdfs_artifact_repo.py`
Content:
```
1 import os
2 import posixpath
3 import tempfile
4 from contextlib import contextmanager
5
6 from six.moves import urllib
7
8 from mlflow.entities import FileInfo
9 from mlflow.exceptions import MlflowException
10 from mlflow.store.artifact_repo import ArtifactRepository
11 from mlflow.utils.file_utils import mkdir, relative_path_to_artifact_path
12
13
14 class HdfsArtifactRepository(ArtifactRepository):
15 """
16 Stores artifacts on HDFS.
17
18 This repository is used with URIs of the form ``hdfs:/<path>``. The repository can only be used
19 together with the RestStore.
20 """
21
22 def __init__(self, artifact_uri):
23 self.host, self.port, self.path = _resolve_connection_params(artifact_uri)
24 super(HdfsArtifactRepository, self).__init__(artifact_uri)
25
26 def log_artifact(self, local_file, artifact_path=None):
27 """
28 Log artifact in hdfs.
29 :param local_file: source file path
30 :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path
31 """
32 hdfs_base_path = _resolve_base_path(self.path, artifact_path)
33
34 with hdfs_system(host=self.host, port=self.port) as hdfs:
35 _, file_name = os.path.split(local_file)
36 destination = posixpath.join(hdfs_base_path, file_name)
37 with hdfs.open(destination, 'wb') as output:
38 output.write(open(local_file, "rb").read())
39
40 def log_artifacts(self, local_dir, artifact_path=None):
41 """
42 Log artifacts in hdfs.
43 Missing remote sub-directories will be created if needed.
44 :param local_dir: source dir path
45 :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path
46 """
47 hdfs_base_path = _resolve_base_path(self.path, artifact_path)
48
49 with hdfs_system(host=self.host, port=self.port) as hdfs:
50
51 if not hdfs.exists(hdfs_base_path):
52 hdfs.mkdir(hdfs_base_path)
53
54 for subdir_path, _, files in os.walk(local_dir):
55
56 relative_path = _relative_path_local(local_dir, subdir_path)
57
58 hdfs_subdir_path = posixpath.join(hdfs_base_path, relative_path) \
59 if relative_path else hdfs_base_path
60
61 if not hdfs.exists(hdfs_subdir_path):
62 hdfs.mkdir(hdfs_subdir_path)
63
64 for each_file in files:
65 source = os.path.join(subdir_path, each_file)
66 destination = posixpath.join(hdfs_subdir_path, each_file)
67 with hdfs.open(destination, 'wb') as output_stream:
68 output_stream.write(open(source, "rb").read())
69
70 def list_artifacts(self, path=None):
71 """
72 Lists files and directories under artifacts directory for the current run_id.
73 (self.path contains the base path - hdfs:/some/path/run_id/artifacts)
74
75 :param path: Relative source path. Possible subdirectory existing under
76 hdfs:/some/path/run_id/artifacts
77 :return: List of files and directories under given path -
78 example:
79 ['conda.yaml', 'MLmodel', 'model.pkl']
80 """
81 hdfs_base_path = _resolve_base_path(self.path, path)
82 base_path_len = len(hdfs_base_path) + 1
83
84 with hdfs_system(host=self.host, port=self.port) as hdfs:
85 paths = []
86 for path, is_dir, size in self._walk_path(hdfs, hdfs_base_path):
87 paths.append(FileInfo(path[base_path_len:], is_dir, size))
88 return sorted(paths, key=lambda f: paths)
89
90 def _walk_path(self, hdfs, hdfs_path):
91 if hdfs.exists(hdfs_path):
92 if hdfs.isdir(hdfs_path):
93 for subdir, _, files in hdfs.walk(hdfs_path):
94 if subdir != hdfs_path:
95 yield subdir, hdfs.isdir(subdir), hdfs.info(subdir).get("size")
96 for f in files:
97 file_path = posixpath.join(subdir, f)
98 yield file_path, hdfs.isdir(file_path), hdfs.info(file_path).get("size")
99 else:
100 yield hdfs_path, False, hdfs.info(hdfs_path).get("size")
101
102 def download_artifacts(self, artifact_path, dst_path=None):
103 """
104 Download an artifact file or directory to a local directory/file if applicable, and
105 return a local path for it.
106 The caller is responsible for managing the lifecycle of the downloaded artifacts.
107
108 (self.path contains the base path - hdfs:/some/path/run_id/artifacts)
109
110 :param artifact_path: Relative source path to the desired artifacts file or directory.
111 :param dst_path: Absolute path of the local filesystem destination directory to which
112 to download the specified artifacts. This directory must already
113 exist. If unspecified, the artifacts will be downloaded to a new,
114 uniquely-named
115 directory on the local filesystem.
116
117 :return: Absolute path of the local filesystem location containing the downloaded
118 artifacts - file/directory.
119 """
120
121 hdfs_base_path = _resolve_base_path(self.path, artifact_path)
122 local_dir = _tmp_dir(dst_path)
123
124 with hdfs_system(host=self.host, port=self.port) as hdfs:
125
126 if not hdfs.isdir(hdfs_base_path):
127 local_path = os.path.join(local_dir, os.path.normpath(artifact_path))
128 _download_hdfs_file(hdfs, hdfs_base_path, local_path)
129 return local_path
130
131 for path, is_dir, _ in self._walk_path(hdfs, hdfs_base_path):
132
133 relative_path = _relative_path_remote(hdfs_base_path, path)
134 local_path = os.path.join(local_dir, relative_path) \
135 if relative_path else local_dir
136
137 if is_dir:
138 mkdir(local_path)
139 else:
140 _download_hdfs_file(hdfs, path, local_path)
141 return local_dir
142
143 def _download_file(self, remote_file_path, local_path):
144 raise MlflowException('This is not implemented. Should never be called.')
145
146
147 @contextmanager
148 def hdfs_system(host, port):
149 """
150 hdfs system context - Attempt to establish the connection to hdfs
151 and yields HadoopFileSystem
152
153 :param host: hostname or when relaying on the core-site.xml config use 'default'
154 :param port: port or when relaying on the core-site.xml config use 0
155 """
156 import pyarrow as pa
157
158 driver = os.getenv('MLFLOW_HDFS_DRIVER') or 'libhdfs'
159 kerb_ticket = os.getenv('MLFLOW_KERBEROS_TICKET_CACHE')
160 kerberos_user = os.getenv('MLFLOW_KERBEROS_USER')
161 extra_conf = _parse_extra_conf(os.getenv('MLFLOW_PYARROW_EXTRA_CONF'))
162
163 connected = pa.hdfs.connect(host=host or 'default',
164 port=port or 0,
165 user=kerberos_user,
166 driver=driver,
167 kerb_ticket=kerb_ticket,
168 extra_conf=extra_conf)
169 yield connected
170 connected.close()
171
172
173 def _resolve_connection_params(artifact_uri):
174 parsed = urllib.parse.urlparse(artifact_uri)
175 return parsed.hostname, parsed.port, parsed.path
176
177
178 def _resolve_base_path(path, artifact_path):
179 if path == artifact_path:
180 return path
181 if artifact_path:
182 return posixpath.join(path, artifact_path)
183 return path
184
185
186 def _relative_path(base_dir, subdir_path, path_module):
187 relative_path = path_module.relpath(subdir_path, base_dir)
188 return relative_path if relative_path is not '.' else None
189
190
191 def _relative_path_local(base_dir, subdir_path):
192 rel_path = _relative_path(base_dir, subdir_path, os.path)
193 return relative_path_to_artifact_path(rel_path) if rel_path is not None else None
194
195
196 def _relative_path_remote(base_dir, subdir_path):
197 return _relative_path(base_dir, subdir_path, posixpath)
198
199
200 def _tmp_dir(local_path):
201 return os.path.abspath(tempfile.mkdtemp(dir=local_path))
202
203
204 def _download_hdfs_file(hdfs, remote_file_path, local_file_path):
205 with open(local_file_path, 'wb') as f:
206 f.write(hdfs.open(remote_file_path, 'rb').read())
207
208
209 def _parse_extra_conf(extra_conf):
210 if extra_conf:
211 def as_pair(config):
212 key, val = config.split('=')
213 return key, val
214
215 list_of_key_val = [as_pair(conf) for conf in extra_conf.split(',')]
216 return dict(list_of_key_val)
217 return None
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/store/hdfs_artifact_repo.py b/mlflow/store/hdfs_artifact_repo.py
--- a/mlflow/store/hdfs_artifact_repo.py
+++ b/mlflow/store/hdfs_artifact_repo.py
@@ -74,17 +74,21 @@
:param path: Relative source path. Possible subdirectory existing under
hdfs:/some/path/run_id/artifacts
- :return: List of files and directories under given path -
- example:
- ['conda.yaml', 'MLmodel', 'model.pkl']
+ :return: List of FileInfos under given path
"""
hdfs_base_path = _resolve_base_path(self.path, path)
- base_path_len = len(hdfs_base_path) + 1
with hdfs_system(host=self.host, port=self.port) as hdfs:
paths = []
- for path, is_dir, size in self._walk_path(hdfs, hdfs_base_path):
- paths.append(FileInfo(path[base_path_len:], is_dir, size))
+ if hdfs.exists(hdfs_base_path):
+ for file_detail in hdfs.ls(hdfs_base_path, detail=True):
+ file_name = file_detail.get("name")
+ # Strip off anything that comes before the artifact root e.g. hdfs://name
+ offset = file_name.index(self.path)
+ rel_path = _relative_path_remote(self.path, file_name[offset:])
+ is_dir = file_detail.get("kind") == "directory"
+ size = file_detail.get("size")
+ paths.append(FileInfo(rel_path, is_dir, size))
return sorted(paths, key=lambda f: paths)
def _walk_path(self, hdfs, hdfs_path):
@@ -202,6 +206,9 @@
def _download_hdfs_file(hdfs, remote_file_path, local_file_path):
+ # Ensure all required directories exist. Without doing this nested files can't be downloaded.
+ dirs = os.path.dirname(local_file_path)
+ os.makedirs(dirs)
with open(local_file_path, 'wb') as f:
f.write(hdfs.open(remote_file_path, 'rb').read())
| {"golden_diff": "diff --git a/mlflow/store/hdfs_artifact_repo.py b/mlflow/store/hdfs_artifact_repo.py\n--- a/mlflow/store/hdfs_artifact_repo.py\n+++ b/mlflow/store/hdfs_artifact_repo.py\n@@ -74,17 +74,21 @@\n \n :param path: Relative source path. Possible subdirectory existing under\n hdfs:/some/path/run_id/artifacts\n- :return: List of files and directories under given path -\n- example:\n- ['conda.yaml', 'MLmodel', 'model.pkl']\n+ :return: List of FileInfos under given path\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, path)\n- base_path_len = len(hdfs_base_path) + 1\n \n with hdfs_system(host=self.host, port=self.port) as hdfs:\n paths = []\n- for path, is_dir, size in self._walk_path(hdfs, hdfs_base_path):\n- paths.append(FileInfo(path[base_path_len:], is_dir, size))\n+ if hdfs.exists(hdfs_base_path):\n+ for file_detail in hdfs.ls(hdfs_base_path, detail=True):\n+ file_name = file_detail.get(\"name\")\n+ # Strip off anything that comes before the artifact root e.g. hdfs://name\n+ offset = file_name.index(self.path)\n+ rel_path = _relative_path_remote(self.path, file_name[offset:])\n+ is_dir = file_detail.get(\"kind\") == \"directory\"\n+ size = file_detail.get(\"size\")\n+ paths.append(FileInfo(rel_path, is_dir, size))\n return sorted(paths, key=lambda f: paths)\n \n def _walk_path(self, hdfs, hdfs_path):\n@@ -202,6 +206,9 @@\n \n \n def _download_hdfs_file(hdfs, remote_file_path, local_file_path):\n+ # Ensure all required directories exist. Without doing this nested files can't be downloaded.\n+ dirs = os.path.dirname(local_file_path)\n+ os.makedirs(dirs)\n with open(local_file_path, 'wb') as f:\n f.write(hdfs.open(remote_file_path, 'rb').read())\n", "issue": "[BUG] HdfsArtifactRepository list_artifacts recursively lists all items under directory\nThank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)\r\nfor information on what types of issues we address.\r\n\r\nFor help with debugging your code, please refer to [Stack Overflow](https://stackoverflow.com/questions/tagged/mlflow).\r\n\r\n \r\nPlease do not delete this template unless you are sure your issue is outside its scope.\r\n\r\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Centos 7\r\n- **MLflow installed from (source or binary)**: binary\r\n- **MLflow version (run ``mlflow --version``)**: 1.1.0\r\n- **Python version**: 3.6.8\r\n- **npm version, if running the dev UI**: N/A\r\n- **Exact command to reproduce**: mlflow artifacts list -r <run id for artifacts stored on hdfs>\r\n\r\n### Describe the problem\r\nlist_artifacts of an artifact repository is expected to only list the files directly under the provided path (see https://github.com/mlflow/mlflow/blob/4b1868719837d1844f19b6242643222549ee2794/mlflow/store/cli.py#L74 ). HdfsArtifactRepository walks all files under the given path and returns them (see https://github.com/mlflow/mlflow/blob/4b1868719837d1844f19b6242643222549ee2794/mlflow/store/hdfs_artifact_repo.py#L89 ).\r\n\r\nThis behavior breaks the mflow server as it expects the behavior specified in the cli file.\r\n\r\n### Code to reproduce issue\r\nProvide a reproducible test case that is the bare minimum necessary to generate the problem.\r\n\r\n### Other info / logs\r\nInclude any logs or source code that would be helpful to diagnose the problem. If including tracebacks,\r\nplease include the full traceback. Large logs and files should be attached.\r\n\n", "before_files": [{"content": "import os\nimport posixpath\nimport tempfile\nfrom contextlib import contextmanager\n\nfrom six.moves import urllib\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.store.artifact_repo import ArtifactRepository\nfrom mlflow.utils.file_utils import mkdir, relative_path_to_artifact_path\n\n\nclass HdfsArtifactRepository(ArtifactRepository):\n \"\"\"\n Stores artifacts on HDFS.\n\n This repository is used with URIs of the form ``hdfs:/<path>``. The repository can only be used\n together with the RestStore.\n \"\"\"\n\n def __init__(self, artifact_uri):\n self.host, self.port, self.path = _resolve_connection_params(artifact_uri)\n super(HdfsArtifactRepository, self).__init__(artifact_uri)\n\n def log_artifact(self, local_file, artifact_path=None):\n \"\"\"\n Log artifact in hdfs.\n :param local_file: source file path\n :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, artifact_path)\n\n with hdfs_system(host=self.host, port=self.port) as hdfs:\n _, file_name = os.path.split(local_file)\n destination = posixpath.join(hdfs_base_path, file_name)\n with hdfs.open(destination, 'wb') as output:\n output.write(open(local_file, \"rb\").read())\n\n def log_artifacts(self, local_dir, artifact_path=None):\n \"\"\"\n Log artifacts in hdfs.\n Missing remote sub-directories will be created if needed.\n :param local_dir: source dir path\n :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, artifact_path)\n\n with hdfs_system(host=self.host, port=self.port) as hdfs:\n\n if not hdfs.exists(hdfs_base_path):\n hdfs.mkdir(hdfs_base_path)\n\n for subdir_path, _, files in os.walk(local_dir):\n\n relative_path = _relative_path_local(local_dir, subdir_path)\n\n hdfs_subdir_path = posixpath.join(hdfs_base_path, relative_path) \\\n if relative_path else hdfs_base_path\n\n if not hdfs.exists(hdfs_subdir_path):\n hdfs.mkdir(hdfs_subdir_path)\n\n for each_file in files:\n source = os.path.join(subdir_path, each_file)\n destination = posixpath.join(hdfs_subdir_path, each_file)\n with hdfs.open(destination, 'wb') as output_stream:\n output_stream.write(open(source, \"rb\").read())\n\n def list_artifacts(self, path=None):\n \"\"\"\n Lists files and directories under artifacts directory for the current run_id.\n (self.path contains the base path - hdfs:/some/path/run_id/artifacts)\n\n :param path: Relative source path. Possible subdirectory existing under\n hdfs:/some/path/run_id/artifacts\n :return: List of files and directories under given path -\n example:\n ['conda.yaml', 'MLmodel', 'model.pkl']\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, path)\n base_path_len = len(hdfs_base_path) + 1\n\n with hdfs_system(host=self.host, port=self.port) as hdfs:\n paths = []\n for path, is_dir, size in self._walk_path(hdfs, hdfs_base_path):\n paths.append(FileInfo(path[base_path_len:], is_dir, size))\n return sorted(paths, key=lambda f: paths)\n\n def _walk_path(self, hdfs, hdfs_path):\n if hdfs.exists(hdfs_path):\n if hdfs.isdir(hdfs_path):\n for subdir, _, files in hdfs.walk(hdfs_path):\n if subdir != hdfs_path:\n yield subdir, hdfs.isdir(subdir), hdfs.info(subdir).get(\"size\")\n for f in files:\n file_path = posixpath.join(subdir, f)\n yield file_path, hdfs.isdir(file_path), hdfs.info(file_path).get(\"size\")\n else:\n yield hdfs_path, False, hdfs.info(hdfs_path).get(\"size\")\n\n def download_artifacts(self, artifact_path, dst_path=None):\n \"\"\"\n Download an artifact file or directory to a local directory/file if applicable, and\n return a local path for it.\n The caller is responsible for managing the lifecycle of the downloaded artifacts.\n\n (self.path contains the base path - hdfs:/some/path/run_id/artifacts)\n\n :param artifact_path: Relative source path to the desired artifacts file or directory.\n :param dst_path: Absolute path of the local filesystem destination directory to which\n to download the specified artifacts. This directory must already\n exist. If unspecified, the artifacts will be downloaded to a new,\n uniquely-named\n directory on the local filesystem.\n\n :return: Absolute path of the local filesystem location containing the downloaded\n artifacts - file/directory.\n \"\"\"\n\n hdfs_base_path = _resolve_base_path(self.path, artifact_path)\n local_dir = _tmp_dir(dst_path)\n\n with hdfs_system(host=self.host, port=self.port) as hdfs:\n\n if not hdfs.isdir(hdfs_base_path):\n local_path = os.path.join(local_dir, os.path.normpath(artifact_path))\n _download_hdfs_file(hdfs, hdfs_base_path, local_path)\n return local_path\n\n for path, is_dir, _ in self._walk_path(hdfs, hdfs_base_path):\n\n relative_path = _relative_path_remote(hdfs_base_path, path)\n local_path = os.path.join(local_dir, relative_path) \\\n if relative_path else local_dir\n\n if is_dir:\n mkdir(local_path)\n else:\n _download_hdfs_file(hdfs, path, local_path)\n return local_dir\n\n def _download_file(self, remote_file_path, local_path):\n raise MlflowException('This is not implemented. Should never be called.')\n\n\n@contextmanager\ndef hdfs_system(host, port):\n \"\"\"\n hdfs system context - Attempt to establish the connection to hdfs\n and yields HadoopFileSystem\n\n :param host: hostname or when relaying on the core-site.xml config use 'default'\n :param port: port or when relaying on the core-site.xml config use 0\n \"\"\"\n import pyarrow as pa\n\n driver = os.getenv('MLFLOW_HDFS_DRIVER') or 'libhdfs'\n kerb_ticket = os.getenv('MLFLOW_KERBEROS_TICKET_CACHE')\n kerberos_user = os.getenv('MLFLOW_KERBEROS_USER')\n extra_conf = _parse_extra_conf(os.getenv('MLFLOW_PYARROW_EXTRA_CONF'))\n\n connected = pa.hdfs.connect(host=host or 'default',\n port=port or 0,\n user=kerberos_user,\n driver=driver,\n kerb_ticket=kerb_ticket,\n extra_conf=extra_conf)\n yield connected\n connected.close()\n\n\ndef _resolve_connection_params(artifact_uri):\n parsed = urllib.parse.urlparse(artifact_uri)\n return parsed.hostname, parsed.port, parsed.path\n\n\ndef _resolve_base_path(path, artifact_path):\n if path == artifact_path:\n return path\n if artifact_path:\n return posixpath.join(path, artifact_path)\n return path\n\n\ndef _relative_path(base_dir, subdir_path, path_module):\n relative_path = path_module.relpath(subdir_path, base_dir)\n return relative_path if relative_path is not '.' else None\n\n\ndef _relative_path_local(base_dir, subdir_path):\n rel_path = _relative_path(base_dir, subdir_path, os.path)\n return relative_path_to_artifact_path(rel_path) if rel_path is not None else None\n\n\ndef _relative_path_remote(base_dir, subdir_path):\n return _relative_path(base_dir, subdir_path, posixpath)\n\n\ndef _tmp_dir(local_path):\n return os.path.abspath(tempfile.mkdtemp(dir=local_path))\n\n\ndef _download_hdfs_file(hdfs, remote_file_path, local_file_path):\n with open(local_file_path, 'wb') as f:\n f.write(hdfs.open(remote_file_path, 'rb').read())\n\n\ndef _parse_extra_conf(extra_conf):\n if extra_conf:\n def as_pair(config):\n key, val = config.split('=')\n return key, val\n\n list_of_key_val = [as_pair(conf) for conf in extra_conf.split(',')]\n return dict(list_of_key_val)\n return None\n", "path": "mlflow/store/hdfs_artifact_repo.py"}], "after_files": [{"content": "import os\nimport posixpath\nimport tempfile\nfrom contextlib import contextmanager\n\nfrom six.moves import urllib\n\nfrom mlflow.entities import FileInfo\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.store.artifact_repo import ArtifactRepository\nfrom mlflow.utils.file_utils import mkdir, relative_path_to_artifact_path\n\n\nclass HdfsArtifactRepository(ArtifactRepository):\n \"\"\"\n Stores artifacts on HDFS.\n\n This repository is used with URIs of the form ``hdfs:/<path>``. The repository can only be used\n together with the RestStore.\n \"\"\"\n\n def __init__(self, artifact_uri):\n self.host, self.port, self.path = _resolve_connection_params(artifact_uri)\n super(HdfsArtifactRepository, self).__init__(artifact_uri)\n\n def log_artifact(self, local_file, artifact_path=None):\n \"\"\"\n Log artifact in hdfs.\n :param local_file: source file path\n :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, artifact_path)\n\n with hdfs_system(host=self.host, port=self.port) as hdfs:\n _, file_name = os.path.split(local_file)\n destination = posixpath.join(hdfs_base_path, file_name)\n with hdfs.open(destination, 'wb') as output:\n output.write(open(local_file, \"rb\").read())\n\n def log_artifacts(self, local_dir, artifact_path=None):\n \"\"\"\n Log artifacts in hdfs.\n Missing remote sub-directories will be created if needed.\n :param local_dir: source dir path\n :param artifact_path: when specified will attempt to write under artifact_uri/artifact_path\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, artifact_path)\n\n with hdfs_system(host=self.host, port=self.port) as hdfs:\n\n if not hdfs.exists(hdfs_base_path):\n hdfs.mkdir(hdfs_base_path)\n\n for subdir_path, _, files in os.walk(local_dir):\n\n relative_path = _relative_path_local(local_dir, subdir_path)\n\n hdfs_subdir_path = posixpath.join(hdfs_base_path, relative_path) \\\n if relative_path else hdfs_base_path\n\n if not hdfs.exists(hdfs_subdir_path):\n hdfs.mkdir(hdfs_subdir_path)\n\n for each_file in files:\n source = os.path.join(subdir_path, each_file)\n destination = posixpath.join(hdfs_subdir_path, each_file)\n with hdfs.open(destination, 'wb') as output_stream:\n output_stream.write(open(source, \"rb\").read())\n\n def list_artifacts(self, path=None):\n \"\"\"\n Lists files and directories under artifacts directory for the current run_id.\n (self.path contains the base path - hdfs:/some/path/run_id/artifacts)\n\n :param path: Relative source path. Possible subdirectory existing under\n hdfs:/some/path/run_id/artifacts\n :return: List of FileInfos under given path\n \"\"\"\n hdfs_base_path = _resolve_base_path(self.path, path)\n\n with hdfs_system(host=self.host, port=self.port) as hdfs:\n paths = []\n if hdfs.exists(hdfs_base_path):\n for file_detail in hdfs.ls(hdfs_base_path, detail=True):\n file_name = file_detail.get(\"name\")\n # Strip off anything that comes before the artifact root e.g. hdfs://name\n offset = file_name.index(self.path)\n rel_path = _relative_path_remote(self.path, file_name[offset:])\n is_dir = file_detail.get(\"kind\") == \"directory\"\n size = file_detail.get(\"size\")\n paths.append(FileInfo(rel_path, is_dir, size))\n return sorted(paths, key=lambda f: paths)\n\n def _walk_path(self, hdfs, hdfs_path):\n if hdfs.exists(hdfs_path):\n if hdfs.isdir(hdfs_path):\n for subdir, _, files in hdfs.walk(hdfs_path):\n if subdir != hdfs_path:\n yield subdir, hdfs.isdir(subdir), hdfs.info(subdir).get(\"size\")\n for f in files:\n file_path = posixpath.join(subdir, f)\n yield file_path, hdfs.isdir(file_path), hdfs.info(file_path).get(\"size\")\n else:\n yield hdfs_path, False, hdfs.info(hdfs_path).get(\"size\")\n\n def download_artifacts(self, artifact_path, dst_path=None):\n \"\"\"\n Download an artifact file or directory to a local directory/file if applicable, and\n return a local path for it.\n The caller is responsible for managing the lifecycle of the downloaded artifacts.\n\n (self.path contains the base path - hdfs:/some/path/run_id/artifacts)\n\n :param artifact_path: Relative source path to the desired artifacts file or directory.\n :param dst_path: Absolute path of the local filesystem destination directory to which\n to download the specified artifacts. This directory must already\n exist. If unspecified, the artifacts will be downloaded to a new,\n uniquely-named\n directory on the local filesystem.\n\n :return: Absolute path of the local filesystem location containing the downloaded\n artifacts - file/directory.\n \"\"\"\n\n hdfs_base_path = _resolve_base_path(self.path, artifact_path)\n local_dir = _tmp_dir(dst_path)\n\n with hdfs_system(host=self.host, port=self.port) as hdfs:\n\n if not hdfs.isdir(hdfs_base_path):\n local_path = os.path.join(local_dir, os.path.normpath(artifact_path))\n _download_hdfs_file(hdfs, hdfs_base_path, local_path)\n return local_path\n\n for path, is_dir, _ in self._walk_path(hdfs, hdfs_base_path):\n\n relative_path = _relative_path_remote(hdfs_base_path, path)\n local_path = os.path.join(local_dir, relative_path) \\\n if relative_path else local_dir\n\n if is_dir:\n mkdir(local_path)\n else:\n _download_hdfs_file(hdfs, path, local_path)\n return local_dir\n\n def _download_file(self, remote_file_path, local_path):\n raise MlflowException('This is not implemented. Should never be called.')\n\n\n@contextmanager\ndef hdfs_system(host, port):\n \"\"\"\n hdfs system context - Attempt to establish the connection to hdfs\n and yields HadoopFileSystem\n\n :param host: hostname or when relaying on the core-site.xml config use 'default'\n :param port: port or when relaying on the core-site.xml config use 0\n \"\"\"\n import pyarrow as pa\n\n driver = os.getenv('MLFLOW_HDFS_DRIVER') or 'libhdfs'\n kerb_ticket = os.getenv('MLFLOW_KERBEROS_TICKET_CACHE')\n kerberos_user = os.getenv('MLFLOW_KERBEROS_USER')\n extra_conf = _parse_extra_conf(os.getenv('MLFLOW_PYARROW_EXTRA_CONF'))\n\n connected = pa.hdfs.connect(host=host or 'default',\n port=port or 0,\n user=kerberos_user,\n driver=driver,\n kerb_ticket=kerb_ticket,\n extra_conf=extra_conf)\n yield connected\n connected.close()\n\n\ndef _resolve_connection_params(artifact_uri):\n parsed = urllib.parse.urlparse(artifact_uri)\n return parsed.hostname, parsed.port, parsed.path\n\n\ndef _resolve_base_path(path, artifact_path):\n if path == artifact_path:\n return path\n if artifact_path:\n return posixpath.join(path, artifact_path)\n return path\n\n\ndef _relative_path(base_dir, subdir_path, path_module):\n relative_path = path_module.relpath(subdir_path, base_dir)\n return relative_path if relative_path is not '.' else None\n\n\ndef _relative_path_local(base_dir, subdir_path):\n rel_path = _relative_path(base_dir, subdir_path, os.path)\n return relative_path_to_artifact_path(rel_path) if rel_path is not None else None\n\n\ndef _relative_path_remote(base_dir, subdir_path):\n return _relative_path(base_dir, subdir_path, posixpath)\n\n\ndef _tmp_dir(local_path):\n return os.path.abspath(tempfile.mkdtemp(dir=local_path))\n\n\ndef _download_hdfs_file(hdfs, remote_file_path, local_file_path):\n # Ensure all required directories exist. Without doing this nested files can't be downloaded.\n dirs = os.path.dirname(local_file_path)\n os.makedirs(dirs)\n with open(local_file_path, 'wb') as f:\n f.write(hdfs.open(remote_file_path, 'rb').read())\n\n\ndef _parse_extra_conf(extra_conf):\n if extra_conf:\n def as_pair(config):\n key, val = config.split('=')\n return key, val\n\n list_of_key_val = [as_pair(conf) for conf in extra_conf.split(',')]\n return dict(list_of_key_val)\n return None\n", "path": "mlflow/store/hdfs_artifact_repo.py"}]} | 3,160 | 474 |
gh_patches_debug_13127 | rasdani/github-patches | git_diff | GoogleCloudPlatform__PerfKitBenchmarker-680 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
silo benchmark fails behind proxy
From @mateusz-blaszkowski in #475:
> silo - failed with Clone of 'git://github.com/kohler/masstree-beta.git' into submodule path 'masstree' failed. I run the test behind the proxy and this is the case. I would have changed the path to Git repository to https:// but it is hidden somewhere in 'dbtest' (look a the command which failed: cd /tmp/pkb/silo && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make -j80 dbtest). Oh, i found that the exact path is specified here: https://github.com/stephentu/silo/blob/cc11ca1ea949ef266ee12a9b1c310392519d9e3b/.gitmodules
We should switch it to `https://`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `perfkitbenchmarker/linux_packages/silo.py`
Content:
```
1 # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 """Module containing Silo installation and cleanup functions."""
17
18 from perfkitbenchmarker import vm_util
19
20 GIT_REPO = 'https://github.com/stephentu/silo.git'
21 GIT_TAG = '62d2d498984bf69d3b46a74e310e1fd12fd1f692'
22 SILO_DIR = '%s/silo' % vm_util.VM_TMP_DIR
23 APT_PACKAGES = ('libjemalloc-dev libnuma-dev libdb++-dev '
24 'libmysqld-dev libaio-dev libssl-dev')
25 YUM_PACKAGES = ('jemalloc-devel numactl-devel libdb-cxx-devel mysql-devel '
26 'libaio-devel openssl-devel')
27
28
29 def _Install(vm):
30 """Installs the Silo package on the VM."""
31 nthreads = vm.num_cpus * 2
32 vm.Install('build_tools')
33 vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))
34 vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,
35 GIT_TAG))
36 vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\
37 -j{1} dbtest'.format(SILO_DIR, nthreads))
38
39
40 def YumInstall(vm):
41 """Installs the Silo package on the VM."""
42 vm.InstallPackages(YUM_PACKAGES)
43 _Install(vm)
44
45
46 def AptInstall(vm):
47 """Installs the Silo package on the VM."""
48 vm.InstallPackages(APT_PACKAGES)
49 _Install(vm)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/perfkitbenchmarker/linux_packages/silo.py b/perfkitbenchmarker/linux_packages/silo.py
--- a/perfkitbenchmarker/linux_packages/silo.py
+++ b/perfkitbenchmarker/linux_packages/silo.py
@@ -33,6 +33,9 @@
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,
GIT_TAG))
+ # This is due to a failing clone command when executing behind a proxy.
+ # Replacing the protocol to https instead of git fixes the issue.
+ vm.RemoteCommand('git config --global url."https://".insteadOf git://')
vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\
-j{1} dbtest'.format(SILO_DIR, nthreads))
| {"golden_diff": "diff --git a/perfkitbenchmarker/linux_packages/silo.py b/perfkitbenchmarker/linux_packages/silo.py\n--- a/perfkitbenchmarker/linux_packages/silo.py\n+++ b/perfkitbenchmarker/linux_packages/silo.py\n@@ -33,6 +33,9 @@\n vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))\n vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,\n GIT_TAG))\n+ # This is due to a failing clone command when executing behind a proxy.\n+ # Replacing the protocol to https instead of git fixes the issue.\n+ vm.RemoteCommand('git config --global url.\"https://\".insteadOf git://')\n vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\\\n -j{1} dbtest'.format(SILO_DIR, nthreads))\n", "issue": "silo benchmark fails behind proxy\nFrom @mateusz-blaszkowski in #475: \n\n> silo - failed with Clone of 'git://github.com/kohler/masstree-beta.git' into submodule path 'masstree' failed. I run the test behind the proxy and this is the case. I would have changed the path to Git repository to https:// but it is hidden somewhere in 'dbtest' (look a the command which failed: cd /tmp/pkb/silo && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make -j80 dbtest). Oh, i found that the exact path is specified here: https://github.com/stephentu/silo/blob/cc11ca1ea949ef266ee12a9b1c310392519d9e3b/.gitmodules\n\nWe should switch it to `https://`.\n\n", "before_files": [{"content": "# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Module containing Silo installation and cleanup functions.\"\"\"\n\nfrom perfkitbenchmarker import vm_util\n\nGIT_REPO = 'https://github.com/stephentu/silo.git'\nGIT_TAG = '62d2d498984bf69d3b46a74e310e1fd12fd1f692'\nSILO_DIR = '%s/silo' % vm_util.VM_TMP_DIR\nAPT_PACKAGES = ('libjemalloc-dev libnuma-dev libdb++-dev '\n 'libmysqld-dev libaio-dev libssl-dev')\nYUM_PACKAGES = ('jemalloc-devel numactl-devel libdb-cxx-devel mysql-devel '\n 'libaio-devel openssl-devel')\n\n\ndef _Install(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n nthreads = vm.num_cpus * 2\n vm.Install('build_tools')\n vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))\n vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,\n GIT_TAG))\n vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\\\n -j{1} dbtest'.format(SILO_DIR, nthreads))\n\n\ndef YumInstall(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n vm.InstallPackages(YUM_PACKAGES)\n _Install(vm)\n\n\ndef AptInstall(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n vm.InstallPackages(APT_PACKAGES)\n _Install(vm)\n", "path": "perfkitbenchmarker/linux_packages/silo.py"}], "after_files": [{"content": "# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Module containing Silo installation and cleanup functions.\"\"\"\n\nfrom perfkitbenchmarker import vm_util\n\nGIT_REPO = 'https://github.com/stephentu/silo.git'\nGIT_TAG = '62d2d498984bf69d3b46a74e310e1fd12fd1f692'\nSILO_DIR = '%s/silo' % vm_util.VM_TMP_DIR\nAPT_PACKAGES = ('libjemalloc-dev libnuma-dev libdb++-dev '\n 'libmysqld-dev libaio-dev libssl-dev')\nYUM_PACKAGES = ('jemalloc-devel numactl-devel libdb-cxx-devel mysql-devel '\n 'libaio-devel openssl-devel')\n\n\ndef _Install(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n nthreads = vm.num_cpus * 2\n vm.Install('build_tools')\n vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, SILO_DIR))\n vm.RemoteCommand('cd {0} && git checkout {1}'.format(SILO_DIR,\n GIT_TAG))\n # This is due to a failing clone command when executing behind a proxy.\n # Replacing the protocol to https instead of git fixes the issue.\n vm.RemoteCommand('git config --global url.\"https://\".insteadOf git://')\n vm.RemoteCommand('cd {0} && MODE=perf DEBUG=0 CHECK_INVARIANTS=0 make\\\n -j{1} dbtest'.format(SILO_DIR, nthreads))\n\n\ndef YumInstall(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n vm.InstallPackages(YUM_PACKAGES)\n _Install(vm)\n\n\ndef AptInstall(vm):\n \"\"\"Installs the Silo package on the VM.\"\"\"\n vm.InstallPackages(APT_PACKAGES)\n _Install(vm)\n", "path": "perfkitbenchmarker/linux_packages/silo.py"}]} | 1,051 | 206 |
gh_patches_debug_9322 | rasdani/github-patches | git_diff | cobbler__cobbler-3507 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cobbler import rsync fails from mounted ISO media with SELinux enabled with return code 23
### Describe the bug
When you mount an ISO image - the permissions of the mounted files are read only:
```
# ls -la /mnt
total 38
dr-xr-xr-x. 1 root root 2048 Oct 29 22:06 ./
dr-xr-xr-x. 25 root root 4096 Oct 26 08:44 ../
dr-xr-xr-x. 1 root root 2048 Oct 29 22:06 AppStream/
dr-xr-xr-x. 1 root root 2048 Oct 29 22:06 BaseOS/
-r--r--r--. 1 root root 45 Oct 29 22:06 .discinfo
dr-xr-xr-x. 1 root root 2048 Oct 29 21:53 EFI/
-r--r--r--. 1 root root 299 Oct 29 22:06 EULA
-r--r--r--. 1 root root 745 Oct 29 22:06 extra_files.json
dr-xr-xr-x. 1 root root 2048 Oct 29 21:53 images/
dr-xr-xr-x. 1 root root 2048 Oct 29 21:53 isolinux/
-r--r--r--. 1 root root 18092 Oct 29 22:06 LICENSE
-r--r--r--. 1 root root 88 Oct 29 22:06 media.repo
-r--r--r--. 1 root root 1530 Oct 29 22:06 .treeinfo
```
When you run `cobbler import --path=/mnt` the rsync will fail:
```
running: ['rsync', '--archive', '--progress', '/mnt/', '/var/www/cobbler/distro_mirror/centos-stream-9']
received on stdout: sending incremental file list
Exception occurred: <class 'RuntimeError'>
Exception value: rsync import failed with return code 23!
Exception Info:
!!! TASK FAILED !!!
```
### Steps to reproduce
1. mount -o loop /path/to/ISO /mnt
2. cobbler import --path=/mnt
### Expected behavior
Distro is imported.
### Cobbler version
<!--- Paste output from `cobbler version` -->
````paste below
Cobbler 3.4.0
source: ?, ?
build time: Sat Nov 4 21:15:48 2023
````
### Operating system
CentOS Stream 8
### Cobbler log
<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->
````paste below
2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | importing from a network location, running rsync to fetch the files first
[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | running: ['rsync', '--archive', '--progress', '/tmp/Fedora-Server-x86_64-38-1.6.iso/', '/var/www/cobbler/distro_mirror/fedora-38']
[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | received on stdout: sending incremental file list
[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | Exception occurred: <class 'RuntimeError'>
[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | Exception value: rsync import failed with return code 23!
[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | Exception Info:
File "/usr/lib/python3.6/site-packages/cobbler/utils/thread.py", line 103, in run
return_code = self._run(self)
File "/usr/lib/python3.6/site-packages/cobbler/remote.py", line 398, in runner
self.options.get("os_version", None),
File "/usr/lib/python3.6/site-packages/cobbler/api.py", line 2327, in import_tree
os_version,
File "/usr/lib/python3.6/site-packages/cobbler/actions/importer.py", line 127, in run
f"rsync import failed with return code {rsync_return_code}!"
[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - ERROR | ### TASK FAILED ###
````
### Additional information
The problem is that the read-only permissions are copied to the destination, and then cobbler does not have permission to write to the destination without the `dac_override` permission which is not granted:
```
type=AVC msg=audit(1699229796.164:5238): avc: denied { dac_override } for pid=142026 comm="rsync" capability=1 scontext=system_u:system_r:cobblerd_t:s0 tcontext=system_u:system_r:cobblerd_t:s0 tclass=capability permissive=0
```
I will be submitting a pull request the changes the rsync options to grant write permissions on the destination, which is what we would want anyway.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cobbler/actions/importer.py`
Content:
```
1 """
2 This module contains the logic that kicks of the ``cobbler import`` process. This is extracted logic from ``api.py``
3 that is essentially calling ``modules/mangers/import_signatures.py`` with some preparatory code.
4 """
5 import logging
6 import os
7 from typing import TYPE_CHECKING, Optional
8
9 from cobbler import utils
10 from cobbler.utils import filesystem_helpers
11
12 if TYPE_CHECKING:
13 from cobbler.api import CobblerAPI
14
15
16 class Importer:
17 """
18 Wrapper class to adhere to the style of all other actions.
19 """
20
21 def __init__(self, api: "CobblerAPI") -> None:
22 """
23 Constructor to initialize the class.
24
25 :param api: The CobblerAPI.
26 """
27 self.api = api
28 self.logger = logging.getLogger()
29
30 def run(
31 self,
32 mirror_url: str,
33 mirror_name: str,
34 network_root: Optional[str] = None,
35 autoinstall_file: Optional[str] = None,
36 rsync_flags: Optional[str] = None,
37 arch: Optional[str] = None,
38 breed: Optional[str] = None,
39 os_version: Optional[str] = None,
40 ) -> bool:
41 """
42 Automatically import a directory tree full of distribution files.
43
44 :param mirror_url: Can be a string that represents a path, a user@host syntax for SSH, or an rsync:// address.
45 If mirror_url is a filesystem path and mirroring is not desired, set network_root to
46 something like "nfs://path/to/mirror_url/root"
47 :param mirror_name: The name of the mirror.
48 :param network_root: the remote path (nfs/http/ftp) for the distro files
49 :param autoinstall_file: user-specified response file, which will override the default
50 :param rsync_flags: Additional flags that will be passed to the rsync call that will sync everything to the
51 Cobbler webroot.
52 :param arch: user-specified architecture
53 :param breed: user-specified breed
54 :param os_version: user-specified OS version
55 """
56 self.api.log(
57 "import_tree",
58 [mirror_url, mirror_name, network_root, autoinstall_file, rsync_flags],
59 )
60
61 # Both --path and --name are required arguments.
62 if mirror_url is None or not mirror_url:
63 self.logger.info("import failed. no --path specified")
64 return False
65 if not mirror_name:
66 self.logger.info("import failed. no --name specified")
67 return False
68
69 path = os.path.normpath(
70 f"{self.api.settings().webdir}/distro_mirror/{mirror_name}"
71 )
72 if arch is not None:
73 arch = arch.lower()
74 if arch == "x86":
75 # be consistent
76 arch = "i386"
77 if path.split("-")[-1] != arch:
78 path += f"-{arch}"
79
80 # We need to mirror (copy) the files.
81 self.logger.info(
82 "importing from a network location, running rsync to fetch the files first"
83 )
84
85 filesystem_helpers.mkdir(path)
86
87 # Prevent rsync from creating the directory name twice if we are copying via rsync.
88
89 if not mirror_url.endswith("/"):
90 mirror_url = f"{mirror_url}/"
91
92 if (
93 mirror_url.startswith("http://")
94 or mirror_url.startswith("https://")
95 or mirror_url.startswith("ftp://")
96 or mirror_url.startswith("nfs://")
97 ):
98 # HTTP mirrors are kind of primitive. rsync is better. That's why this isn't documented in the manpage and
99 # we don't support them.
100 # TODO: how about adding recursive FTP as an option?
101 self.logger.info("unsupported protocol")
102 return False
103
104 # Good, we're going to use rsync.. We don't use SSH for public mirrors and local files.
105 # Presence of user@host syntax means use SSH
106 spacer = ""
107 if not mirror_url.startswith("rsync://") and not mirror_url.startswith("/"):
108 spacer = ' -e "ssh" '
109 rsync_cmd = ["rsync", "--archive"]
110 if spacer != "":
111 rsync_cmd.append(spacer)
112 rsync_cmd.append("--progress")
113 if rsync_flags:
114 rsync_cmd.append(rsync_flags)
115
116 # If --available-as was specified, limit the files we pull down via rsync to just those that are critical
117 # to detecting what the distro is
118 if network_root is not None:
119 rsync_cmd.append("--include-from=/etc/cobbler/import_rsync_whitelist")
120
121 rsync_cmd += [mirror_url, path]
122
123 # kick off the rsync now
124 rsync_return_code = utils.subprocess_call(rsync_cmd, shell=False)
125 if rsync_return_code != 0:
126 raise RuntimeError(
127 f"rsync import failed with return code {rsync_return_code}!"
128 )
129
130 if network_root is not None:
131 # In addition to mirroring, we're going to assume the path is available over http, ftp, and nfs, perhaps on
132 # an external filer. Scanning still requires --mirror is a filesystem path, but --available-as marks the
133 # network path. This allows users to point the path at a directory containing just the network boot files
134 # while the rest of the distro files are available somewhere else.
135
136 # Find the filesystem part of the path, after the server bits, as each distro URL needs to be calculated
137 # relative to this.
138
139 if not network_root.endswith("/"):
140 network_root += "/"
141 valid_roots = ["nfs://", "ftp://", "http://", "https://"]
142 for valid_root in valid_roots:
143 if network_root.startswith(valid_root):
144 break
145 else:
146 self.logger.info(
147 "Network root given to --available-as must be nfs://, ftp://, http://, or https://"
148 )
149 return False
150
151 if network_root.startswith("nfs://"):
152 try:
153 (_, _, _) = network_root.split(":", 3)
154 except ValueError:
155 self.logger.info(
156 "Network root given to --available-as is missing a colon, please see the manpage example."
157 )
158 return False
159
160 import_module = self.api.get_module_by_name("managers.import_signatures")
161 if import_module is None:
162 raise ImportError("Could not retrieve import signatures module!")
163 import_manager = import_module.get_import_manager(self.api)
164 import_manager.run(
165 path, mirror_name, network_root, autoinstall_file, arch, breed, os_version
166 )
167 return True
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cobbler/actions/importer.py b/cobbler/actions/importer.py
--- a/cobbler/actions/importer.py
+++ b/cobbler/actions/importer.py
@@ -106,7 +106,8 @@
spacer = ""
if not mirror_url.startswith("rsync://") and not mirror_url.startswith("/"):
spacer = ' -e "ssh" '
- rsync_cmd = ["rsync", "--archive"]
+ # --archive but without -p to avoid copying read-only ISO permissions and making sure we have write access
+ rsync_cmd = ["rsync", "-rltgoD", "--chmod=ug=rwX"]
if spacer != "":
rsync_cmd.append(spacer)
rsync_cmd.append("--progress")
| {"golden_diff": "diff --git a/cobbler/actions/importer.py b/cobbler/actions/importer.py\n--- a/cobbler/actions/importer.py\n+++ b/cobbler/actions/importer.py\n@@ -106,7 +106,8 @@\n spacer = \"\"\n if not mirror_url.startswith(\"rsync://\") and not mirror_url.startswith(\"/\"):\n spacer = ' -e \"ssh\" '\n- rsync_cmd = [\"rsync\", \"--archive\"]\n+ # --archive but without -p to avoid copying read-only ISO permissions and making sure we have write access\n+ rsync_cmd = [\"rsync\", \"-rltgoD\", \"--chmod=ug=rwX\"]\n if spacer != \"\":\n rsync_cmd.append(spacer)\n rsync_cmd.append(\"--progress\")\n", "issue": "cobbler import rsync fails from mounted ISO media with SELinux enabled with return code 23\n### Describe the bug\r\n\r\nWhen you mount an ISO image - the permissions of the mounted files are read only:\r\n```\r\n# ls -la /mnt\r\ntotal 38\r\ndr-xr-xr-x. 1 root root 2048 Oct 29 22:06 ./\r\ndr-xr-xr-x. 25 root root 4096 Oct 26 08:44 ../\r\ndr-xr-xr-x. 1 root root 2048 Oct 29 22:06 AppStream/\r\ndr-xr-xr-x. 1 root root 2048 Oct 29 22:06 BaseOS/\r\n-r--r--r--. 1 root root 45 Oct 29 22:06 .discinfo\r\ndr-xr-xr-x. 1 root root 2048 Oct 29 21:53 EFI/\r\n-r--r--r--. 1 root root 299 Oct 29 22:06 EULA\r\n-r--r--r--. 1 root root 745 Oct 29 22:06 extra_files.json\r\ndr-xr-xr-x. 1 root root 2048 Oct 29 21:53 images/\r\ndr-xr-xr-x. 1 root root 2048 Oct 29 21:53 isolinux/\r\n-r--r--r--. 1 root root 18092 Oct 29 22:06 LICENSE\r\n-r--r--r--. 1 root root 88 Oct 29 22:06 media.repo\r\n-r--r--r--. 1 root root 1530 Oct 29 22:06 .treeinfo\r\n```\r\nWhen you run `cobbler import --path=/mnt` the rsync will fail:\r\n```\r\nrunning: ['rsync', '--archive', '--progress', '/mnt/', '/var/www/cobbler/distro_mirror/centos-stream-9']\r\nreceived on stdout: sending incremental file list\r\nException occurred: <class 'RuntimeError'>\r\nException value: rsync import failed with return code 23!\r\nException Info:\r\n!!! TASK FAILED !!!\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. mount -o loop /path/to/ISO /mnt\r\n2. cobbler import --path=/mnt\r\n\r\n### Expected behavior\r\n\r\nDistro is imported.\r\n\r\n### Cobbler version\r\n\r\n<!--- Paste output from `cobbler version` -->\r\n````paste below\r\nCobbler 3.4.0\r\n source: ?, ?\r\n build time: Sat Nov 4 21:15:48 2023\r\n````\r\n\r\n### Operating system\r\n\r\nCentOS Stream 8\r\n\r\n### Cobbler log\r\n\r\n<!--- Paste (partial) output from `/var/log/cobbler/cobbler.log` -->\r\n````paste below\r\n2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | importing from a network location, running rsync to fetch the files first\r\n[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | running: ['rsync', '--archive', '--progress', '/tmp/Fedora-Server-x86_64-38-1.6.iso/', '/var/www/cobbler/distro_mirror/fedora-38']\r\n[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | received on stdout: sending incremental file list\r\n\r\n[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | Exception occurred: <class 'RuntimeError'>\r\n[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | Exception value: rsync import failed with return code 23!\r\n[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - INFO | Exception Info:\r\n File \"/usr/lib/python3.6/site-packages/cobbler/utils/thread.py\", line 103, in run\r\n return_code = self._run(self)\r\n\r\n File \"/usr/lib/python3.6/site-packages/cobbler/remote.py\", line 398, in runner\r\n self.options.get(\"os_version\", None),\r\n\r\n File \"/usr/lib/python3.6/site-packages/cobbler/api.py\", line 2327, in import_tree\r\n os_version,\r\n\r\n File \"/usr/lib/python3.6/site-packages/cobbler/actions/importer.py\", line 127, in run\r\n f\"rsync import failed with return code {rsync_return_code}!\"\r\n\r\n[2023-11-04_160526_Media import_8a52b82c5b54433587b15bc9e9d2e21f] 2023-11-04T16:05:26 - ERROR | ### TASK FAILED ###\r\n````\r\n\r\n### Additional information\r\n\r\nThe problem is that the read-only permissions are copied to the destination, and then cobbler does not have permission to write to the destination without the `dac_override` permission which is not granted:\r\n```\r\ntype=AVC msg=audit(1699229796.164:5238): avc: denied { dac_override } for pid=142026 comm=\"rsync\" capability=1 scontext=system_u:system_r:cobblerd_t:s0 tcontext=system_u:system_r:cobblerd_t:s0 tclass=capability permissive=0\r\n```\r\n\r\nI will be submitting a pull request the changes the rsync options to grant write permissions on the destination, which is what we would want anyway.\n", "before_files": [{"content": "\"\"\"\nThis module contains the logic that kicks of the ``cobbler import`` process. This is extracted logic from ``api.py``\nthat is essentially calling ``modules/mangers/import_signatures.py`` with some preparatory code.\n\"\"\"\nimport logging\nimport os\nfrom typing import TYPE_CHECKING, Optional\n\nfrom cobbler import utils\nfrom cobbler.utils import filesystem_helpers\n\nif TYPE_CHECKING:\n from cobbler.api import CobblerAPI\n\n\nclass Importer:\n \"\"\"\n Wrapper class to adhere to the style of all other actions.\n \"\"\"\n\n def __init__(self, api: \"CobblerAPI\") -> None:\n \"\"\"\n Constructor to initialize the class.\n\n :param api: The CobblerAPI.\n \"\"\"\n self.api = api\n self.logger = logging.getLogger()\n\n def run(\n self,\n mirror_url: str,\n mirror_name: str,\n network_root: Optional[str] = None,\n autoinstall_file: Optional[str] = None,\n rsync_flags: Optional[str] = None,\n arch: Optional[str] = None,\n breed: Optional[str] = None,\n os_version: Optional[str] = None,\n ) -> bool:\n \"\"\"\n Automatically import a directory tree full of distribution files.\n\n :param mirror_url: Can be a string that represents a path, a user@host syntax for SSH, or an rsync:// address.\n If mirror_url is a filesystem path and mirroring is not desired, set network_root to\n something like \"nfs://path/to/mirror_url/root\"\n :param mirror_name: The name of the mirror.\n :param network_root: the remote path (nfs/http/ftp) for the distro files\n :param autoinstall_file: user-specified response file, which will override the default\n :param rsync_flags: Additional flags that will be passed to the rsync call that will sync everything to the\n Cobbler webroot.\n :param arch: user-specified architecture\n :param breed: user-specified breed\n :param os_version: user-specified OS version\n \"\"\"\n self.api.log(\n \"import_tree\",\n [mirror_url, mirror_name, network_root, autoinstall_file, rsync_flags],\n )\n\n # Both --path and --name are required arguments.\n if mirror_url is None or not mirror_url:\n self.logger.info(\"import failed. no --path specified\")\n return False\n if not mirror_name:\n self.logger.info(\"import failed. no --name specified\")\n return False\n\n path = os.path.normpath(\n f\"{self.api.settings().webdir}/distro_mirror/{mirror_name}\"\n )\n if arch is not None:\n arch = arch.lower()\n if arch == \"x86\":\n # be consistent\n arch = \"i386\"\n if path.split(\"-\")[-1] != arch:\n path += f\"-{arch}\"\n\n # We need to mirror (copy) the files.\n self.logger.info(\n \"importing from a network location, running rsync to fetch the files first\"\n )\n\n filesystem_helpers.mkdir(path)\n\n # Prevent rsync from creating the directory name twice if we are copying via rsync.\n\n if not mirror_url.endswith(\"/\"):\n mirror_url = f\"{mirror_url}/\"\n\n if (\n mirror_url.startswith(\"http://\")\n or mirror_url.startswith(\"https://\")\n or mirror_url.startswith(\"ftp://\")\n or mirror_url.startswith(\"nfs://\")\n ):\n # HTTP mirrors are kind of primitive. rsync is better. That's why this isn't documented in the manpage and\n # we don't support them.\n # TODO: how about adding recursive FTP as an option?\n self.logger.info(\"unsupported protocol\")\n return False\n\n # Good, we're going to use rsync.. We don't use SSH for public mirrors and local files.\n # Presence of user@host syntax means use SSH\n spacer = \"\"\n if not mirror_url.startswith(\"rsync://\") and not mirror_url.startswith(\"/\"):\n spacer = ' -e \"ssh\" '\n rsync_cmd = [\"rsync\", \"--archive\"]\n if spacer != \"\":\n rsync_cmd.append(spacer)\n rsync_cmd.append(\"--progress\")\n if rsync_flags:\n rsync_cmd.append(rsync_flags)\n\n # If --available-as was specified, limit the files we pull down via rsync to just those that are critical\n # to detecting what the distro is\n if network_root is not None:\n rsync_cmd.append(\"--include-from=/etc/cobbler/import_rsync_whitelist\")\n\n rsync_cmd += [mirror_url, path]\n\n # kick off the rsync now\n rsync_return_code = utils.subprocess_call(rsync_cmd, shell=False)\n if rsync_return_code != 0:\n raise RuntimeError(\n f\"rsync import failed with return code {rsync_return_code}!\"\n )\n\n if network_root is not None:\n # In addition to mirroring, we're going to assume the path is available over http, ftp, and nfs, perhaps on\n # an external filer. Scanning still requires --mirror is a filesystem path, but --available-as marks the\n # network path. This allows users to point the path at a directory containing just the network boot files\n # while the rest of the distro files are available somewhere else.\n\n # Find the filesystem part of the path, after the server bits, as each distro URL needs to be calculated\n # relative to this.\n\n if not network_root.endswith(\"/\"):\n network_root += \"/\"\n valid_roots = [\"nfs://\", \"ftp://\", \"http://\", \"https://\"]\n for valid_root in valid_roots:\n if network_root.startswith(valid_root):\n break\n else:\n self.logger.info(\n \"Network root given to --available-as must be nfs://, ftp://, http://, or https://\"\n )\n return False\n\n if network_root.startswith(\"nfs://\"):\n try:\n (_, _, _) = network_root.split(\":\", 3)\n except ValueError:\n self.logger.info(\n \"Network root given to --available-as is missing a colon, please see the manpage example.\"\n )\n return False\n\n import_module = self.api.get_module_by_name(\"managers.import_signatures\")\n if import_module is None:\n raise ImportError(\"Could not retrieve import signatures module!\")\n import_manager = import_module.get_import_manager(self.api)\n import_manager.run(\n path, mirror_name, network_root, autoinstall_file, arch, breed, os_version\n )\n return True\n", "path": "cobbler/actions/importer.py"}], "after_files": [{"content": "\"\"\"\nThis module contains the logic that kicks of the ``cobbler import`` process. This is extracted logic from ``api.py``\nthat is essentially calling ``modules/mangers/import_signatures.py`` with some preparatory code.\n\"\"\"\nimport logging\nimport os\nfrom typing import TYPE_CHECKING, Optional\n\nfrom cobbler import utils\nfrom cobbler.utils import filesystem_helpers\n\nif TYPE_CHECKING:\n from cobbler.api import CobblerAPI\n\n\nclass Importer:\n \"\"\"\n Wrapper class to adhere to the style of all other actions.\n \"\"\"\n\n def __init__(self, api: \"CobblerAPI\") -> None:\n \"\"\"\n Constructor to initialize the class.\n\n :param api: The CobblerAPI.\n \"\"\"\n self.api = api\n self.logger = logging.getLogger()\n\n def run(\n self,\n mirror_url: str,\n mirror_name: str,\n network_root: Optional[str] = None,\n autoinstall_file: Optional[str] = None,\n rsync_flags: Optional[str] = None,\n arch: Optional[str] = None,\n breed: Optional[str] = None,\n os_version: Optional[str] = None,\n ) -> bool:\n \"\"\"\n Automatically import a directory tree full of distribution files.\n\n :param mirror_url: Can be a string that represents a path, a user@host syntax for SSH, or an rsync:// address.\n If mirror_url is a filesystem path and mirroring is not desired, set network_root to\n something like \"nfs://path/to/mirror_url/root\"\n :param mirror_name: The name of the mirror.\n :param network_root: the remote path (nfs/http/ftp) for the distro files\n :param autoinstall_file: user-specified response file, which will override the default\n :param rsync_flags: Additional flags that will be passed to the rsync call that will sync everything to the\n Cobbler webroot.\n :param arch: user-specified architecture\n :param breed: user-specified breed\n :param os_version: user-specified OS version\n \"\"\"\n self.api.log(\n \"import_tree\",\n [mirror_url, mirror_name, network_root, autoinstall_file, rsync_flags],\n )\n\n # Both --path and --name are required arguments.\n if mirror_url is None or not mirror_url:\n self.logger.info(\"import failed. no --path specified\")\n return False\n if not mirror_name:\n self.logger.info(\"import failed. no --name specified\")\n return False\n\n path = os.path.normpath(\n f\"{self.api.settings().webdir}/distro_mirror/{mirror_name}\"\n )\n if arch is not None:\n arch = arch.lower()\n if arch == \"x86\":\n # be consistent\n arch = \"i386\"\n if path.split(\"-\")[-1] != arch:\n path += f\"-{arch}\"\n\n # We need to mirror (copy) the files.\n self.logger.info(\n \"importing from a network location, running rsync to fetch the files first\"\n )\n\n filesystem_helpers.mkdir(path)\n\n # Prevent rsync from creating the directory name twice if we are copying via rsync.\n\n if not mirror_url.endswith(\"/\"):\n mirror_url = f\"{mirror_url}/\"\n\n if (\n mirror_url.startswith(\"http://\")\n or mirror_url.startswith(\"https://\")\n or mirror_url.startswith(\"ftp://\")\n or mirror_url.startswith(\"nfs://\")\n ):\n # HTTP mirrors are kind of primitive. rsync is better. That's why this isn't documented in the manpage and\n # we don't support them.\n # TODO: how about adding recursive FTP as an option?\n self.logger.info(\"unsupported protocol\")\n return False\n\n # Good, we're going to use rsync.. We don't use SSH for public mirrors and local files.\n # Presence of user@host syntax means use SSH\n spacer = \"\"\n if not mirror_url.startswith(\"rsync://\") and not mirror_url.startswith(\"/\"):\n spacer = ' -e \"ssh\" '\n # --archive but without -p to avoid copying read-only ISO permissions and making sure we have write access\n rsync_cmd = [\"rsync\", \"-rltgoD\", \"--chmod=ug=rwX\"]\n if spacer != \"\":\n rsync_cmd.append(spacer)\n rsync_cmd.append(\"--progress\")\n if rsync_flags:\n rsync_cmd.append(rsync_flags)\n\n # If --available-as was specified, limit the files we pull down via rsync to just those that are critical\n # to detecting what the distro is\n if network_root is not None:\n rsync_cmd.append(\"--include-from=/etc/cobbler/import_rsync_whitelist\")\n\n rsync_cmd += [mirror_url, path]\n\n # kick off the rsync now\n rsync_return_code = utils.subprocess_call(rsync_cmd, shell=False)\n if rsync_return_code != 0:\n raise RuntimeError(\n f\"rsync import failed with return code {rsync_return_code}!\"\n )\n\n if network_root is not None:\n # In addition to mirroring, we're going to assume the path is available over http, ftp, and nfs, perhaps on\n # an external filer. Scanning still requires --mirror is a filesystem path, but --available-as marks the\n # network path. This allows users to point the path at a directory containing just the network boot files\n # while the rest of the distro files are available somewhere else.\n\n # Find the filesystem part of the path, after the server bits, as each distro URL needs to be calculated\n # relative to this.\n\n if not network_root.endswith(\"/\"):\n network_root += \"/\"\n valid_roots = [\"nfs://\", \"ftp://\", \"http://\", \"https://\"]\n for valid_root in valid_roots:\n if network_root.startswith(valid_root):\n break\n else:\n self.logger.info(\n \"Network root given to --available-as must be nfs://, ftp://, http://, or https://\"\n )\n return False\n\n if network_root.startswith(\"nfs://\"):\n try:\n (_, _, _) = network_root.split(\":\", 3)\n except ValueError:\n self.logger.info(\n \"Network root given to --available-as is missing a colon, please see the manpage example.\"\n )\n return False\n\n import_module = self.api.get_module_by_name(\"managers.import_signatures\")\n if import_module is None:\n raise ImportError(\"Could not retrieve import signatures module!\")\n import_manager = import_module.get_import_manager(self.api)\n import_manager.run(\n path, mirror_name, network_root, autoinstall_file, arch, breed, os_version\n )\n return True\n", "path": "cobbler/actions/importer.py"}]} | 3,761 | 168 |
gh_patches_debug_791 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-753 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
All training is failing with a `timm` error
**Describe the bug**
Unable to train on current master.
**To Reproduce**
Steps to reproduce the behavior:
1. Try to start any segmentation training.
2. See error:
```python-traceback
Traceback (most recent call last):
File "/software/gandlf_personal/gandlf_run", line 11, in <module>
from GANDLF.cli import main_run, copyrightMessage
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/cli/__init__.py", line 2, in <module>
from .main_run import main_run
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/cli/main_run.py", line 4, in <module>
from GANDLF.training_manager import TrainingManager, TrainingManager_split
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/training_manager.py", line 6, in <module>
from GANDLF.compute import training_loop
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/__init__.py", line 1, in <module>
from .training_loop import training_loop
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/training_loop.py", line 30, in <module>
from .generic import create_pytorch_objects
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/generic.py", line 3, in <module>
from GANDLF.models import get_model
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/models/__init__.py", line 32, in <module>
from .imagenet_unet import imagenet_unet_wrapper
File "/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/models/imagenet_unet.py", line 7, in <module>
from segmentation_models_pytorch.base import (
File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/segmentation_models_pytorch/__init__.py", line 2, in <module>
from . import encoders
File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/segmentation_models_pytorch/encoders/__init__.py", line 1, in <module>
import timm
File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/__init__.py", line 2, in <module>
from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \
File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/models/__init__.py", line 28, in <module>
from .maxxvit import *
File "/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/models/maxxvit.py", line 225, in <module>
@dataclass
^^^^^^^^^
File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 1230, in dataclass
return wrap(cls)
^^^^^^^^^
File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 1220, in wrap
return _process_class(cls, init, repr, eq, order, unsafe_hash,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 958, in _process_class
cls_fields.append(_get_field(cls, name, type, kw_only))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py", line 815, in _get_field
raise ValueError(f'mutable default {type(f.default)} for field '
ValueError: mutable default <class 'timm.models.maxxvit.MaxxVitConvCfg'> for field conv_cfg is not allowed: use default_factory
```
**Expected behavior**
It should work.
**Screenshots**
N.A.
**GaNDLF Version**
<!-- Put the output of the following command:
python -c 'import GANDLF as g;print(g.__version__)'
-->
0.0.18-dev
**Desktop (please complete the following information):**
N.A.
**Additional context**
N.A.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5
6 import sys, re, os
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10 from setuptools.command.egg_info import egg_info
11
12 try:
13 with open("README.md") as readme_file:
14 readme = readme_file.read()
15 except Exception as error:
16 readme = "No README information found."
17 sys.stderr.write(
18 "Warning: Could not open '%s' due %s\n" % ("README.md", error)
19 )
20
21
22 class CustomInstallCommand(install):
23 def run(self):
24 install.run(self)
25
26
27 class CustomDevelopCommand(develop):
28 def run(self):
29 develop.run(self)
30
31
32 class CustomEggInfoCommand(egg_info):
33 def run(self):
34 egg_info.run(self)
35
36
37 try:
38 filepath = "GANDLF/version.py"
39 version_file = open(filepath)
40 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
41
42 except Exception as error:
43 __version__ = "0.0.1"
44 sys.stderr.write(
45 "Warning: Could not open '%s' due %s\n" % (filepath, error)
46 )
47
48 # Handle cases where specific files need to be bundled into the final package as installed via PyPI
49 dockerfiles = [
50 item
51 for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))
52 if (os.path.isfile(item) and item.startswith("Dockerfile-"))
53 ]
54 entrypoint_files = [
55 item
56 for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))
57 if (os.path.isfile(item) and item.startswith("gandlf_"))
58 ]
59 setup_files = ["setup.py", ".dockerignore", "pyproject.toml", "MANIFEST.in"]
60 all_extra_files = dockerfiles + entrypoint_files + setup_files
61 all_extra_files_pathcorrected = [
62 os.path.join("../", item) for item in all_extra_files
63 ]
64 # find_packages should only ever find these as subpackages of gandlf, not as top-level packages
65 # generate this dynamically?
66 # GANDLF.GANDLF is needed to prevent recursion madness in deployments
67 toplevel_package_excludes = [
68 "GANDLF.GANDLF",
69 "anonymize",
70 "cli",
71 "compute",
72 "data",
73 "grad_clipping",
74 "losses",
75 "metrics",
76 "models",
77 "optimizers",
78 "schedulers",
79 "utils",
80 ]
81
82
83 requirements = [
84 "torch==2.1.0",
85 "black==23.11.0",
86 "numpy==1.25.0",
87 "scipy",
88 "SimpleITK!=2.0.*",
89 "SimpleITK!=2.2.1", # https://github.com/mlcommons/GaNDLF/issues/536
90 "torchvision",
91 "tqdm",
92 "torchio==0.19.3",
93 "pandas>=2.0.0",
94 "scikit-learn>=0.23.2",
95 "scikit-image>=0.19.1",
96 "setuptools",
97 "seaborn",
98 "pyyaml",
99 "tiffslide",
100 "matplotlib",
101 "gdown",
102 "pytest",
103 "coverage",
104 "pytest-cov",
105 "psutil",
106 "medcam",
107 "opencv-python",
108 "torchmetrics==1.1.2",
109 "zarr==2.10.3",
110 "pydicom",
111 "onnx",
112 "torchinfo==1.7.0",
113 "segmentation-models-pytorch==0.3.2",
114 "ACSConv==0.1.1",
115 "docker",
116 "dicom-anonymizer",
117 "twine",
118 "zarr",
119 "keyring",
120 ]
121
122 if __name__ == "__main__":
123 setup(
124 name="GANDLF",
125 version=__version__,
126 author="MLCommons",
127 author_email="[email protected]",
128 python_requires=">3.8, <3.12",
129 packages=find_packages(
130 where=os.path.dirname(os.path.abspath(__file__)),
131 exclude=toplevel_package_excludes,
132 ),
133 cmdclass={
134 "install": CustomInstallCommand,
135 "develop": CustomDevelopCommand,
136 "egg_info": CustomEggInfoCommand,
137 },
138 scripts=[
139 "gandlf_run",
140 "gandlf_constructCSV",
141 "gandlf_collectStats",
142 "gandlf_patchMiner",
143 "gandlf_preprocess",
144 "gandlf_anonymizer",
145 "gandlf_verifyInstall",
146 "gandlf_configGenerator",
147 "gandlf_recoverConfig",
148 "gandlf_deploy",
149 "gandlf_optimizeModel",
150 "gandlf_generateMetrics",
151 ],
152 classifiers=[
153 "Development Status :: 3 - Alpha",
154 "Intended Audience :: Science/Research",
155 "License :: OSI Approved :: Apache Software License",
156 "Natural Language :: English",
157 "Operating System :: OS Independent",
158 "Programming Language :: Python :: 3.9",
159 "Programming Language :: Python :: 3.10",
160 "Programming Language :: Python :: 3.11",
161 "Topic :: Scientific/Engineering :: Medical Science Apps.",
162 ],
163 description=(
164 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
165 ),
166 install_requires=requirements,
167 license="Apache-2.0",
168 long_description=readme,
169 long_description_content_type="text/markdown",
170 include_package_data=True,
171 package_data={"GANDLF": all_extra_files_pathcorrected},
172 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch",
173 zip_safe=False,
174 )
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -110,7 +110,7 @@
"pydicom",
"onnx",
"torchinfo==1.7.0",
- "segmentation-models-pytorch==0.3.2",
+ "segmentation-models-pytorch==0.3.3",
"ACSConv==0.1.1",
"docker",
"dicom-anonymizer",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -110,7 +110,7 @@\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n- \"segmentation-models-pytorch==0.3.2\",\n+ \"segmentation-models-pytorch==0.3.3\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n", "issue": "All training is failing with a `timm` error\n**Describe the bug**\r\nUnable to train on current master.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Try to start any segmentation training.\r\n2. See error:\r\n```python-traceback\r\nTraceback (most recent call last):\r\n File \"/software/gandlf_personal/gandlf_run\", line 11, in <module>\r\n from GANDLF.cli import main_run, copyrightMessage\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/cli/__init__.py\", line 2, in <module>\r\n from .main_run import main_run\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/cli/main_run.py\", line 4, in <module>\r\n from GANDLF.training_manager import TrainingManager, TrainingManager_split\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/training_manager.py\", line 6, in <module>\r\n from GANDLF.compute import training_loop\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/__init__.py\", line 1, in <module>\r\n from .training_loop import training_loop\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/training_loop.py\", line 30, in <module>\r\n from .generic import create_pytorch_objects\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/compute/generic.py\", line 3, in <module>\r\n from GANDLF.models import get_model\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/models/__init__.py\", line 32, in <module>\r\n from .imagenet_unet import imagenet_unet_wrapper\r\n File \"/geode2/home/u070/patis/BigRed200/projects/gandlf_mine/GANDLF/models/imagenet_unet.py\", line 7, in <module>\r\n from segmentation_models_pytorch.base import (\r\n File \"/software/gandlf_personal/venv11/lib/python3.11/site-packages/segmentation_models_pytorch/__init__.py\", line 2, in <module>\r\n from . import encoders\r\n File \"/software/gandlf_personal/venv11/lib/python3.11/site-packages/segmentation_models_pytorch/encoders/__init__.py\", line 1, in <module>\r\n import timm\r\n File \"/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/__init__.py\", line 2, in <module>\r\n from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \\\r\n File \"/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/models/__init__.py\", line 28, in <module>\r\n from .maxxvit import *\r\n File \"/software/gandlf_personal/venv11/lib/python3.11/site-packages/timm/models/maxxvit.py\", line 225, in <module>\r\n @dataclass\r\n ^^^^^^^^^\r\n File \"/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py\", line 1230, in dataclass\r\n return wrap(cls)\r\n ^^^^^^^^^\r\n File \"/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py\", line 1220, in wrap\r\n return _process_class(cls, init, repr, eq, order, unsafe_hash,\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py\", line 958, in _process_class\r\n cls_fields.append(_get_field(cls, name, type, kw_only))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/software/gandlf_personal/venv11/lib/python3.11/dataclasses.py\", line 815, in _get_field\r\n raise ValueError(f'mutable default {type(f.default)} for field '\r\nValueError: mutable default <class 'timm.models.maxxvit.MaxxVitConvCfg'> for field conv_cfg is not allowed: use default_factory\r\n```\r\n\r\n**Expected behavior**\r\nIt should work.\r\n\r\n**Screenshots**\r\nN.A.\r\n\r\n**GaNDLF Version**\r\n<!-- Put the output of the following command:\r\npython -c 'import GANDLF as g;print(g.__version__)'\r\n-->\r\n0.0.18-dev\r\n\r\n**Desktop (please complete the following information):**\r\nN.A.\r\n\r\n**Additional context**\r\nN.A.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.0\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.19.3\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.2\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">3.8, <3.12\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re, os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error)\n )\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\n \"Warning: Could not open '%s' due %s\\n\" % (filepath, error)\n )\n\n# Handle cases where specific files need to be bundled into the final package as installed via PyPI\ndockerfiles = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"Dockerfile-\"))\n]\nentrypoint_files = [\n item\n for item in os.listdir(os.path.dirname(os.path.abspath(__file__)))\n if (os.path.isfile(item) and item.startswith(\"gandlf_\"))\n]\nsetup_files = [\"setup.py\", \".dockerignore\", \"pyproject.toml\", \"MANIFEST.in\"]\nall_extra_files = dockerfiles + entrypoint_files + setup_files\nall_extra_files_pathcorrected = [\n os.path.join(\"../\", item) for item in all_extra_files\n]\n# find_packages should only ever find these as subpackages of gandlf, not as top-level packages\n# generate this dynamically?\n# GANDLF.GANDLF is needed to prevent recursion madness in deployments\ntoplevel_package_excludes = [\n \"GANDLF.GANDLF\",\n \"anonymize\",\n \"cli\",\n \"compute\",\n \"data\",\n \"grad_clipping\",\n \"losses\",\n \"metrics\",\n \"models\",\n \"optimizers\",\n \"schedulers\",\n \"utils\",\n]\n\n\nrequirements = [\n \"torch==2.1.0\",\n \"black==23.11.0\",\n \"numpy==1.25.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.19.3\",\n \"pandas>=2.0.0\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"gdown\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==1.1.2\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.3\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">3.8, <3.12\",\n packages=find_packages(\n where=os.path.dirname(os.path.abspath(__file__)),\n exclude=toplevel_package_excludes,\n ),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n \"gandlf_optimizeModel\",\n \"gandlf_generateMetrics\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n package_data={\"GANDLF\": all_extra_files_pathcorrected},\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}]} | 3,064 | 113 |
gh_patches_debug_27124 | rasdani/github-patches | git_diff | chainer__chainer-6807 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
F.mean_absolute_error numerically unstable with float16 arrays
In #5053, float16 support has been enabled for [F.mean_absolute_error](https://github.com/chainer/chainer/blob/master/chainer/functions/loss/mean_absolute_error.py), but it seems to produce NaN values quite easily. Usually this happens when using big batch sizes and/or if the absolute error difference is large.
The calculation is done by summing over all the absolute differences, and then dividing by the number of elements in the array. However, it appears that the summing can produce large numbers outside the possible range for `float16`. The current implementation can be broken down as follows:
```python
def forward_cpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
diff = self.diff.ravel()
abs_diff = abs(diff)
summed_abs_diff = abs_diff.sum() # numerically unstable, can result in inf
mean_abs_error = np.array(summed_abs_diff / diff.size, dtype=diff.dtype)
return mean_abs_error
```
Code to reproduce error:
```python
import chainer.functions as F
import numpy as np
a = np.full(shape=(64,1,16,16), fill_value=2, dtype=np.float16)
b = np.full(shape=(64,1,16,16), fill_value=-2, dtype=np.float16)
loss = F.mean_absolute_error(a,b)
# /home/user/.local/share/virtualenvs/.../lib/python3.6/site-packages/numpy/core/_methods.py:36: RuntimeWarning: overflow encountered in reduce
# return umr_sum(a, axis, dtype, out, keepdims, initial)
# variable(inf)
loss = F.mean_absolute_error(a.astype("float32"), b.astype("float32"))
# variable(4.)
```
Note that the actual loss (4) would still be valid in the float16 range, it is just that summing over many values results in an `inf`, which cannot then be divided to get a proper number.
Workaround ideas:
I've noticed the new `mixed16` mode that was implemented in #6456, and was wondering if there might be a similar way to do the intermediate calculations in `float32`, and cast the result back into `float16`? Thoughts?
System info:
```
Platform: Linux-4.15.0-46-generic-x86_64-with-debian-buster-sid
Chainer: 6.0.0b3
NumPy: 1.16.2
CuPy:
CuPy Version : 6.0.0b3
CUDA Root : /usr/local/cuda
CUDA Build Version : 10000
CUDA Driver Version : 10010
CUDA Runtime Version : 10000
cuDNN Build Version : 7402
cuDNN Version : 7402
NCCL Build Version : 2307
NCCL Runtime Version : 2307
iDeep: Not Available
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/loss/mean_absolute_error.py`
Content:
```
1 import numpy
2
3 import chainer
4 from chainer import backend
5 from chainer import function_node
6 from chainer.utils import type_check
7
8
9 class MeanAbsoluteError(function_node.FunctionNode):
10
11 """Mean absolute error function."""
12
13 def check_type_forward(self, in_types):
14 type_check._argname(in_types, ('x0', 'x1'))
15 type_check.expect(
16 in_types[0].dtype.kind == 'f',
17 in_types[0].dtype == in_types[1].dtype,
18 in_types[0].shape == in_types[1].shape
19 )
20
21 def forward_cpu(self, inputs):
22 x0, x1 = inputs
23 self.diff = x0 - x1
24 diff = self.diff.ravel()
25 return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),
26
27 def forward_gpu(self, inputs):
28 x0, x1 = inputs
29 self.diff = x0 - x1
30 diff = self.diff.ravel()
31 return abs(diff).sum() / diff.dtype.type(diff.size),
32
33 def backward(self, indexes, grad_outputs):
34 gy, = grad_outputs
35 coeff = gy * gy.data.dtype.type(1. / self.diff.size)
36 coeff = chainer.functions.broadcast_to(coeff, self.diff.shape)
37 gx0 = coeff * backend.get_array_module(gy.data).sign(self.diff)
38 return gx0, -gx0
39
40
41 def mean_absolute_error(x0, x1):
42 """Mean absolute error function.
43
44 The function computes the mean absolute error between two variables. The
45 mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the
46 same dimensions. This function first calculates the absolute value
47 differences between the corresponding elements in x0 and x1, and then
48 returns the mean of those differences.
49
50 Args:
51 x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
52 x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.
53
54 Returns:
55 ~chainer.Variable:
56 A variable holding an array representing the mean absolute
57 error of two inputs.
58
59 .. admonition:: Example
60
61 1D array examples:
62
63 >>> x = np.array([1, 2, 3]).astype(np.float32)
64 >>> y = np.array([0, 0, 0]).astype(np.float32)
65 >>> F.mean_absolute_error(x, y)
66 variable(2.)
67 >>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
68 >>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)
69 >>> F.mean_absolute_error(x, y)
70 variable(6.)
71
72 2D array example:
73
74 In this example, there are 4 elements, and thus 4 errors
75 >>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)
76 >>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)
77 >>> F.mean_absolute_error(x, y)
78 variable(5.5)
79
80 3D array example:
81
82 In this example, there are 8 elements, and thus 8 errors
83 >>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))
84 >>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))
85 >>> x = x.astype(np.float32)
86 >>> y = y.astype(np.float32)
87 >>> F.mean_absolute_error(x, y)
88 variable(3.5)
89
90 """
91 return MeanAbsoluteError().apply((x0, x1))[0]
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/loss/mean_absolute_error.py b/chainer/functions/loss/mean_absolute_error.py
--- a/chainer/functions/loss/mean_absolute_error.py
+++ b/chainer/functions/loss/mean_absolute_error.py
@@ -6,6 +6,15 @@
from chainer.utils import type_check
+def _get_intermediate_dtype(dtype):
+ # Returns the dtype for intermediate calculation.
+ # For float16 input, float32 is used.
+ # Otherwise the same dtype as the parameter is used.
+ if dtype == numpy.float16:
+ return numpy.float32
+ return dtype
+
+
class MeanAbsoluteError(function_node.FunctionNode):
"""Mean absolute error function."""
@@ -21,14 +30,19 @@
def forward_cpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
- diff = self.diff.ravel()
- return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),
+ orig_dtype = self.diff.dtype
+ dtype = _get_intermediate_dtype(orig_dtype)
+ diff = self.diff.ravel().astype(dtype, copy=False)
+ return numpy.array(abs(diff).sum() / diff.size, dtype=orig_dtype),
def forward_gpu(self, inputs):
x0, x1 = inputs
self.diff = x0 - x1
- diff = self.diff.ravel()
- return abs(diff).sum() / diff.dtype.type(diff.size),
+ orig_dtype = self.diff.dtype
+ dtype = _get_intermediate_dtype(orig_dtype)
+ diff = self.diff.ravel().astype(dtype, copy=False)
+ return (abs(diff).sum() / diff.dtype.type(diff.size)).astype(
+ orig_dtype, copy=False),
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
| {"golden_diff": "diff --git a/chainer/functions/loss/mean_absolute_error.py b/chainer/functions/loss/mean_absolute_error.py\n--- a/chainer/functions/loss/mean_absolute_error.py\n+++ b/chainer/functions/loss/mean_absolute_error.py\n@@ -6,6 +6,15 @@\n from chainer.utils import type_check\n \n \n+def _get_intermediate_dtype(dtype):\n+ # Returns the dtype for intermediate calculation.\n+ # For float16 input, float32 is used.\n+ # Otherwise the same dtype as the parameter is used.\n+ if dtype == numpy.float16:\n+ return numpy.float32\n+ return dtype\n+\n+\n class MeanAbsoluteError(function_node.FunctionNode):\n \n \"\"\"Mean absolute error function.\"\"\"\n@@ -21,14 +30,19 @@\n def forward_cpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n- diff = self.diff.ravel()\n- return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),\n+ orig_dtype = self.diff.dtype\n+ dtype = _get_intermediate_dtype(orig_dtype)\n+ diff = self.diff.ravel().astype(dtype, copy=False)\n+ return numpy.array(abs(diff).sum() / diff.size, dtype=orig_dtype),\n \n def forward_gpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n- diff = self.diff.ravel()\n- return abs(diff).sum() / diff.dtype.type(diff.size),\n+ orig_dtype = self.diff.dtype\n+ dtype = _get_intermediate_dtype(orig_dtype)\n+ diff = self.diff.ravel().astype(dtype, copy=False)\n+ return (abs(diff).sum() / diff.dtype.type(diff.size)).astype(\n+ orig_dtype, copy=False),\n \n def backward(self, indexes, grad_outputs):\n gy, = grad_outputs\n", "issue": "F.mean_absolute_error numerically unstable with float16 arrays\nIn #5053, float16 support has been enabled for [F.mean_absolute_error](https://github.com/chainer/chainer/blob/master/chainer/functions/loss/mean_absolute_error.py), but it seems to produce NaN values quite easily. Usually this happens when using big batch sizes and/or if the absolute error difference is large.\r\n\r\nThe calculation is done by summing over all the absolute differences, and then dividing by the number of elements in the array. However, it appears that the summing can produce large numbers outside the possible range for `float16`. The current implementation can be broken down as follows:\r\n\r\n```python\r\ndef forward_cpu(self, inputs):\r\n x0, x1 = inputs\r\n self.diff = x0 - x1\r\n diff = self.diff.ravel()\r\n abs_diff = abs(diff)\r\n summed_abs_diff = abs_diff.sum() # numerically unstable, can result in inf\r\n mean_abs_error = np.array(summed_abs_diff / diff.size, dtype=diff.dtype)\r\n return mean_abs_error\r\n```\r\n\r\nCode to reproduce error:\r\n\r\n```python\r\nimport chainer.functions as F\r\nimport numpy as np\r\n\r\na = np.full(shape=(64,1,16,16), fill_value=2, dtype=np.float16)\r\nb = np.full(shape=(64,1,16,16), fill_value=-2, dtype=np.float16)\r\n\r\nloss = F.mean_absolute_error(a,b)\r\n# /home/user/.local/share/virtualenvs/.../lib/python3.6/site-packages/numpy/core/_methods.py:36: RuntimeWarning: overflow encountered in reduce\r\n# return umr_sum(a, axis, dtype, out, keepdims, initial)\r\n# variable(inf)\r\n\r\nloss = F.mean_absolute_error(a.astype(\"float32\"), b.astype(\"float32\"))\r\n# variable(4.)\r\n```\r\n\r\nNote that the actual loss (4) would still be valid in the float16 range, it is just that summing over many values results in an `inf`, which cannot then be divided to get a proper number.\r\n\r\nWorkaround ideas:\r\n\r\nI've noticed the new `mixed16` mode that was implemented in #6456, and was wondering if there might be a similar way to do the intermediate calculations in `float32`, and cast the result back into `float16`? Thoughts?\r\n\r\nSystem info:\r\n```\r\nPlatform: Linux-4.15.0-46-generic-x86_64-with-debian-buster-sid\r\nChainer: 6.0.0b3\r\nNumPy: 1.16.2\r\nCuPy:\r\n CuPy Version : 6.0.0b3\r\n CUDA Root : /usr/local/cuda\r\n CUDA Build Version : 10000\r\n CUDA Driver Version : 10010\r\n CUDA Runtime Version : 10000\r\n cuDNN Build Version : 7402\r\n cuDNN Version : 7402\r\n NCCL Build Version : 2307\r\n NCCL Runtime Version : 2307\r\niDeep: Not Available\r\n```\n", "before_files": [{"content": "import numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\nclass MeanAbsoluteError(function_node.FunctionNode):\n\n \"\"\"Mean absolute error function.\"\"\"\n\n def check_type_forward(self, in_types):\n type_check._argname(in_types, ('x0', 'x1'))\n type_check.expect(\n in_types[0].dtype.kind == 'f',\n in_types[0].dtype == in_types[1].dtype,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward_cpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n diff = self.diff.ravel()\n return numpy.array(abs(diff).sum() / diff.size, dtype=diff.dtype),\n\n def forward_gpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n diff = self.diff.ravel()\n return abs(diff).sum() / diff.dtype.type(diff.size),\n\n def backward(self, indexes, grad_outputs):\n gy, = grad_outputs\n coeff = gy * gy.data.dtype.type(1. / self.diff.size)\n coeff = chainer.functions.broadcast_to(coeff, self.diff.shape)\n gx0 = coeff * backend.get_array_module(gy.data).sign(self.diff)\n return gx0, -gx0\n\n\ndef mean_absolute_error(x0, x1):\n \"\"\"Mean absolute error function.\n\n The function computes the mean absolute error between two variables. The\n mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the\n same dimensions. This function first calculates the absolute value\n differences between the corresponding elements in x0 and x1, and then\n returns the mean of those differences.\n\n Args:\n x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.\n x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.\n\n Returns:\n ~chainer.Variable:\n A variable holding an array representing the mean absolute\n error of two inputs.\n\n .. admonition:: Example\n\n 1D array examples:\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> y = np.array([0, 0, 0]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(2.)\n >>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)\n >>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(6.)\n\n 2D array example:\n\n In this example, there are 4 elements, and thus 4 errors\n >>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)\n >>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(5.5)\n\n 3D array example:\n\n In this example, there are 8 elements, and thus 8 errors\n >>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))\n >>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))\n >>> x = x.astype(np.float32)\n >>> y = y.astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(3.5)\n\n \"\"\"\n return MeanAbsoluteError().apply((x0, x1))[0]\n", "path": "chainer/functions/loss/mean_absolute_error.py"}], "after_files": [{"content": "import numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\ndef _get_intermediate_dtype(dtype):\n # Returns the dtype for intermediate calculation.\n # For float16 input, float32 is used.\n # Otherwise the same dtype as the parameter is used.\n if dtype == numpy.float16:\n return numpy.float32\n return dtype\n\n\nclass MeanAbsoluteError(function_node.FunctionNode):\n\n \"\"\"Mean absolute error function.\"\"\"\n\n def check_type_forward(self, in_types):\n type_check._argname(in_types, ('x0', 'x1'))\n type_check.expect(\n in_types[0].dtype.kind == 'f',\n in_types[0].dtype == in_types[1].dtype,\n in_types[0].shape == in_types[1].shape\n )\n\n def forward_cpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n orig_dtype = self.diff.dtype\n dtype = _get_intermediate_dtype(orig_dtype)\n diff = self.diff.ravel().astype(dtype, copy=False)\n return numpy.array(abs(diff).sum() / diff.size, dtype=orig_dtype),\n\n def forward_gpu(self, inputs):\n x0, x1 = inputs\n self.diff = x0 - x1\n orig_dtype = self.diff.dtype\n dtype = _get_intermediate_dtype(orig_dtype)\n diff = self.diff.ravel().astype(dtype, copy=False)\n return (abs(diff).sum() / diff.dtype.type(diff.size)).astype(\n orig_dtype, copy=False),\n\n def backward(self, indexes, grad_outputs):\n gy, = grad_outputs\n coeff = gy * gy.data.dtype.type(1. / self.diff.size)\n coeff = chainer.functions.broadcast_to(coeff, self.diff.shape)\n gx0 = coeff * backend.get_array_module(gy.data).sign(self.diff)\n return gx0, -gx0\n\n\ndef mean_absolute_error(x0, x1):\n \"\"\"Mean absolute error function.\n\n The function computes the mean absolute error between two variables. The\n mean is taken over the minibatch. Args ``x0`` and ``x1`` must have the\n same dimensions. This function first calculates the absolute value\n differences between the corresponding elements in x0 and x1, and then\n returns the mean of those differences.\n\n Args:\n x0 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.\n x1 (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable.\n\n Returns:\n ~chainer.Variable:\n A variable holding an array representing the mean absolute\n error of two inputs.\n\n .. admonition:: Example\n\n 1D array examples:\n\n >>> x = np.array([1, 2, 3]).astype(np.float32)\n >>> y = np.array([0, 0, 0]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(2.)\n >>> x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)\n >>> y = np.array([7, 8, 9, 10, 11, 12]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(6.)\n\n 2D array example:\n\n In this example, there are 4 elements, and thus 4 errors\n >>> x = np.array([[1, 2], [3, 4]]).astype(np.float32)\n >>> y = np.array([[8, 8], [8, 8]]).astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(5.5)\n\n 3D array example:\n\n In this example, there are 8 elements, and thus 8 errors\n >>> x = np.reshape(np.array([1, 2, 3, 4, 5, 6, 7, 8]), (2, 2, 2))\n >>> y = np.reshape(np.array([8, 8, 8, 8, 8, 8, 8, 8]), (2, 2, 2))\n >>> x = x.astype(np.float32)\n >>> y = y.astype(np.float32)\n >>> F.mean_absolute_error(x, y)\n variable(3.5)\n\n \"\"\"\n return MeanAbsoluteError().apply((x0, x1))[0]\n", "path": "chainer/functions/loss/mean_absolute_error.py"}]} | 2,017 | 413 |
gh_patches_debug_32491 | rasdani/github-patches | git_diff | openai__gym-1573 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support wrapper transformations to VecEnv
@tristandeleu @pzhokhov @christopherhesse It might be rather convenient for the user experience to provide a list of wrapper transformations for the atomic environments when creating vectorized environment, e.g.
```python
transforms = [AtariPreprocessing, SignReward, ...]
env = gym.vector.make('Pong-v0', 16, True, transforms=transforms)
```
For additional arguments, the user is required to use `partial()` to define them within the transform list. So that each internal environment is wrapped according to the transformation list.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/vector/__init__.py`
Content:
```
1 from gym.vector.async_vector_env import AsyncVectorEnv
2 from gym.vector.sync_vector_env import SyncVectorEnv
3 from gym.vector.vector_env import VectorEnv
4
5 __all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']
6
7 def make(id, num_envs=1, asynchronous=True, **kwargs):
8 """Create a vectorized environment from multiple copies of an environment,
9 from its id
10
11 Parameters
12 ----------
13 id : str
14 The environment ID. This must be a valid ID from the registry.
15
16 num_envs : int
17 Number of copies of the environment.
18
19 asynchronous : bool (default: `True`)
20 If `True`, wraps the environments in an `AsyncVectorEnv` (which uses
21 `multiprocessing` to run the environments in parallel). If `False`,
22 wraps the environments in a `SyncVectorEnv`.
23
24 Returns
25 -------
26 env : `gym.vector.VectorEnv` instance
27 The vectorized environment.
28
29 Example
30 -------
31 >>> import gym
32 >>> env = gym.vector.make('CartPole-v1', 3)
33 >>> env.reset()
34 array([[-0.04456399, 0.04653909, 0.01326909, -0.02099827],
35 [ 0.03073904, 0.00145001, -0.03088818, -0.03131252],
36 [ 0.03468829, 0.01500225, 0.01230312, 0.01825218]],
37 dtype=float32)
38 """
39 from gym.envs import make as make_
40 def _make_env():
41 return make_(id, **kwargs)
42 env_fns = [_make_env for _ in range(num_envs)]
43 return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/vector/__init__.py b/gym/vector/__init__.py
--- a/gym/vector/__init__.py
+++ b/gym/vector/__init__.py
@@ -1,10 +1,15 @@
+try:
+ from collections.abc import Iterable
+except ImportError:
+ Iterable = (tuple, list)
+
from gym.vector.async_vector_env import AsyncVectorEnv
from gym.vector.sync_vector_env import SyncVectorEnv
from gym.vector.vector_env import VectorEnv
__all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']
-def make(id, num_envs=1, asynchronous=True, **kwargs):
+def make(id, num_envs=1, asynchronous=True, wrappers=None, **kwargs):
"""Create a vectorized environment from multiple copies of an environment,
from its id
@@ -20,6 +25,10 @@
If `True`, wraps the environments in an `AsyncVectorEnv` (which uses
`multiprocessing` to run the environments in parallel). If `False`,
wraps the environments in a `SyncVectorEnv`.
+
+ wrappers : Callable or Iterable of Callables (default: `None`)
+ If not `None`, then apply the wrappers to each internal
+ environment during creation.
Returns
-------
@@ -38,6 +47,15 @@
"""
from gym.envs import make as make_
def _make_env():
- return make_(id, **kwargs)
+ env = make_(id, **kwargs)
+ if wrappers is not None:
+ if callable(wrappers):
+ env = wrappers(env)
+ elif isinstance(wrappers, Iterable) and all([callable(w) for w in wrappers]):
+ for wrapper in wrappers:
+ env = wrapper(env)
+ else:
+ raise NotImplementedError
+ return env
env_fns = [_make_env for _ in range(num_envs)]
return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)
| {"golden_diff": "diff --git a/gym/vector/__init__.py b/gym/vector/__init__.py\n--- a/gym/vector/__init__.py\n+++ b/gym/vector/__init__.py\n@@ -1,10 +1,15 @@\n+try:\n+ from collections.abc import Iterable\n+except ImportError:\n+ Iterable = (tuple, list)\n+\n from gym.vector.async_vector_env import AsyncVectorEnv\n from gym.vector.sync_vector_env import SyncVectorEnv\n from gym.vector.vector_env import VectorEnv\n \n __all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']\n \n-def make(id, num_envs=1, asynchronous=True, **kwargs):\n+def make(id, num_envs=1, asynchronous=True, wrappers=None, **kwargs):\n \"\"\"Create a vectorized environment from multiple copies of an environment,\n from its id\n \n@@ -20,6 +25,10 @@\n If `True`, wraps the environments in an `AsyncVectorEnv` (which uses \n `multiprocessing` to run the environments in parallel). If `False`,\n wraps the environments in a `SyncVectorEnv`.\n+ \n+ wrappers : Callable or Iterable of Callables (default: `None`)\n+ If not `None`, then apply the wrappers to each internal \n+ environment during creation. \n \n Returns\n -------\n@@ -38,6 +47,15 @@\n \"\"\"\n from gym.envs import make as make_\n def _make_env():\n- return make_(id, **kwargs)\n+ env = make_(id, **kwargs)\n+ if wrappers is not None:\n+ if callable(wrappers):\n+ env = wrappers(env)\n+ elif isinstance(wrappers, Iterable) and all([callable(w) for w in wrappers]):\n+ for wrapper in wrappers:\n+ env = wrapper(env)\n+ else:\n+ raise NotImplementedError\n+ return env\n env_fns = [_make_env for _ in range(num_envs)]\n return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)\n", "issue": "Support wrapper transformations to VecEnv\n@tristandeleu @pzhokhov @christopherhesse It might be rather convenient for the user experience to provide a list of wrapper transformations for the atomic environments when creating vectorized environment, e.g.\r\n\r\n```python\r\ntransforms = [AtariPreprocessing, SignReward, ...]\r\nenv = gym.vector.make('Pong-v0', 16, True, transforms=transforms)\r\n```\r\nFor additional arguments, the user is required to use `partial()` to define them within the transform list. So that each internal environment is wrapped according to the transformation list. \n", "before_files": [{"content": "from gym.vector.async_vector_env import AsyncVectorEnv\nfrom gym.vector.sync_vector_env import SyncVectorEnv\nfrom gym.vector.vector_env import VectorEnv\n\n__all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']\n\ndef make(id, num_envs=1, asynchronous=True, **kwargs):\n \"\"\"Create a vectorized environment from multiple copies of an environment,\n from its id\n\n Parameters\n ----------\n id : str\n The environment ID. This must be a valid ID from the registry.\n\n num_envs : int\n Number of copies of the environment. \n\n asynchronous : bool (default: `True`)\n If `True`, wraps the environments in an `AsyncVectorEnv` (which uses \n `multiprocessing` to run the environments in parallel). If `False`,\n wraps the environments in a `SyncVectorEnv`.\n\n Returns\n -------\n env : `gym.vector.VectorEnv` instance\n The vectorized environment.\n\n Example\n -------\n >>> import gym\n >>> env = gym.vector.make('CartPole-v1', 3)\n >>> env.reset()\n array([[-0.04456399, 0.04653909, 0.01326909, -0.02099827],\n [ 0.03073904, 0.00145001, -0.03088818, -0.03131252],\n [ 0.03468829, 0.01500225, 0.01230312, 0.01825218]],\n dtype=float32)\n \"\"\"\n from gym.envs import make as make_\n def _make_env():\n return make_(id, **kwargs)\n env_fns = [_make_env for _ in range(num_envs)]\n return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)\n", "path": "gym/vector/__init__.py"}], "after_files": [{"content": "try:\n from collections.abc import Iterable\nexcept ImportError:\n Iterable = (tuple, list)\n\nfrom gym.vector.async_vector_env import AsyncVectorEnv\nfrom gym.vector.sync_vector_env import SyncVectorEnv\nfrom gym.vector.vector_env import VectorEnv\n\n__all__ = ['AsyncVectorEnv', 'SyncVectorEnv', 'VectorEnv', 'make']\n\ndef make(id, num_envs=1, asynchronous=True, wrappers=None, **kwargs):\n \"\"\"Create a vectorized environment from multiple copies of an environment,\n from its id\n\n Parameters\n ----------\n id : str\n The environment ID. This must be a valid ID from the registry.\n\n num_envs : int\n Number of copies of the environment. \n\n asynchronous : bool (default: `True`)\n If `True`, wraps the environments in an `AsyncVectorEnv` (which uses \n `multiprocessing` to run the environments in parallel). If `False`,\n wraps the environments in a `SyncVectorEnv`.\n \n wrappers : Callable or Iterable of Callables (default: `None`)\n If not `None`, then apply the wrappers to each internal \n environment during creation. \n\n Returns\n -------\n env : `gym.vector.VectorEnv` instance\n The vectorized environment.\n\n Example\n -------\n >>> import gym\n >>> env = gym.vector.make('CartPole-v1', 3)\n >>> env.reset()\n array([[-0.04456399, 0.04653909, 0.01326909, -0.02099827],\n [ 0.03073904, 0.00145001, -0.03088818, -0.03131252],\n [ 0.03468829, 0.01500225, 0.01230312, 0.01825218]],\n dtype=float32)\n \"\"\"\n from gym.envs import make as make_\n def _make_env():\n env = make_(id, **kwargs)\n if wrappers is not None:\n if callable(wrappers):\n env = wrappers(env)\n elif isinstance(wrappers, Iterable) and all([callable(w) for w in wrappers]):\n for wrapper in wrappers:\n env = wrapper(env)\n else:\n raise NotImplementedError\n return env\n env_fns = [_make_env for _ in range(num_envs)]\n return AsyncVectorEnv(env_fns) if asynchronous else SyncVectorEnv(env_fns)\n", "path": "gym/vector/__init__.py"}]} | 938 | 449 |
gh_patches_debug_24975 | rasdani/github-patches | git_diff | streamlit__streamlit-7033 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using help param causes use_container_width to be ignored with st.button for version 1.18
### Checklist
- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
Using both `help` and `use_container_width` parameters with `st.button` with version 1.18 results in `use_container_width` being ignored
### Reproducible Code Example
[](https://issues.streamlitapp.com/?issue=gh-6161)
```Python
import streamlit as st
c1, c2, c3 = st.columns([1, 1, 1])
with c1:
st.button('button 1', use_container_width=True)
with c2:
st.button('button 2', use_container_width=True)
with c3:
st.button('button 3', use_container_width=True, help = 'example')
st.button("test", use_container_width=True, help='test')
```
### Steps To Reproduce
Run app that uses `help` and `use_container_width` parameters for `st.button` with version 1.18
### Expected Behavior
Expected behavior is that `use_container_width` impacts width of button widget
### Current Behavior
Current behavior:
<img width="631" alt="Screenshot 2023-02-21 at 11 48 14 AM" src="https://user-images.githubusercontent.com/16749069/220443951-e1ee3abc-0210-4a04-85b4-85b07ade9cc9.png">
`use_container_width` is ignored
### Is this a regression?
- [X] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.18.0
- Python version:
- Operating System:
- Browser:
- Virtual environment:
### Additional Information
_No response_
### Are you willing to submit a PR?
- [ ] Yes, I am willing to submit a PR!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e/scripts/st_button.py`
Content:
```
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16 from streamlit import runtime
17
18 # st.session_state can only be used in streamlit
19 if runtime.exists():
20
21 def on_click(x, y):
22 if "click_count" not in st.session_state:
23 st.session_state.click_count = 0
24
25 st.session_state.click_count += 1
26 st.session_state.x = x
27 st.session_state.y = y
28
29 i1 = st.button(
30 "button 1", key="button", on_click=on_click, args=(1,), kwargs={"y": 2}
31 )
32 st.write("value:", i1)
33 st.write("value from state:", st.session_state["button"])
34
35 button_was_clicked = "click_count" in st.session_state
36 st.write("Button was clicked:", button_was_clicked)
37
38 if button_was_clicked:
39 st.write("times clicked:", st.session_state.click_count)
40 st.write("arg value:", st.session_state.x)
41 st.write("kwarg value:", st.session_state.y)
42
43 i2 = st.checkbox("reset button return value")
44
45 i3 = st.button("button 2", disabled=True)
46 st.write("value 2:", i3)
47
48 i4 = st.button("button 3", type="primary")
49 st.write("value 3:", i4)
50
51 i5 = st.button("button 4", type="primary", disabled=True)
52 st.write("value 4:", i5)
53
54 st.button("button 5", use_container_width=True)
55
56 cols = st.columns(3)
57
58 # Order of conn_types matters to preserve the order in st_button.spec.js and the snapshot
59 conn_types = [
60 "snowflake",
61 "bigquery",
62 "huggingface",
63 "aws_s3",
64 "http_file",
65 "postgresql",
66 "gsheets",
67 "custom",
68 ]
69 for i in range(len(conn_types)):
70 cols[i % 3].button(conn_types[i], use_container_width=True)
71
```
Path: `e2e/scripts/st_form_use_container_width_submit_button.py`
Content:
```
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 with st.form("my_form"):
18 st.write("Inside the form")
19 slider_val = st.slider("Form slider")
20 checkbox_val = st.checkbox("Form checkbox")
21 submitted = st.form_submit_button("Submit", use_container_width=True)
22 if submitted:
23 st.write("slider", slider_val, "checkbox", checkbox_val)
24
```
Path: `e2e/scripts/st_download_button.py`
Content:
```
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 st.download_button(
18 "Download button label",
19 data="Hello world!",
20 file_name="hello.txt",
21 )
22
23 st.download_button(
24 "Download button label",
25 data="Hello world!",
26 file_name="hello.txt",
27 key="disabled_dl_button",
28 disabled=True,
29 )
30
31 st.download_button(
32 "Download RAR archive file",
33 data=b"bytes",
34 file_name="archive.rar",
35 mime="application/vnd.rar",
36 )
37
38 st.download_button(
39 "Download button with use_container_width=True",
40 data="Hello world!",
41 file_name="hello.txt",
42 use_container_width=True,
43 )
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/e2e/scripts/st_button.py b/e2e/scripts/st_button.py
--- a/e2e/scripts/st_button.py
+++ b/e2e/scripts/st_button.py
@@ -51,7 +51,11 @@
i5 = st.button("button 4", type="primary", disabled=True)
st.write("value 4:", i5)
-st.button("button 5", use_container_width=True)
+st.button("button 5 - containerWidth", use_container_width=True)
+
+st.button(
+ "button 6 - containerWidth + help", use_container_width=True, help="help text"
+)
cols = st.columns(3)
diff --git a/e2e/scripts/st_download_button.py b/e2e/scripts/st_download_button.py
--- a/e2e/scripts/st_download_button.py
+++ b/e2e/scripts/st_download_button.py
@@ -41,3 +41,11 @@
file_name="hello.txt",
use_container_width=True,
)
+
+st.download_button(
+ "Download button with help text and use_container_width=True",
+ data="Hello world!",
+ file_name="hello.txt",
+ use_container_width=True,
+ help="Example help text",
+)
diff --git a/e2e/scripts/st_form_use_container_width_submit_button.py b/e2e/scripts/st_form_use_container_width_submit_button.py
--- a/e2e/scripts/st_form_use_container_width_submit_button.py
+++ b/e2e/scripts/st_form_use_container_width_submit_button.py
@@ -21,3 +21,13 @@
submitted = st.form_submit_button("Submit", use_container_width=True)
if submitted:
st.write("slider", slider_val, "checkbox", checkbox_val)
+
+with st.form("my_form_2"):
+ st.write("Inside the second form")
+ slider_val = st.slider("Form slider 2")
+ checkbox_val = st.checkbox("Form checkbox 2")
+ submitted = st.form_submit_button(
+ "Submit", help="Submit by clicking", use_container_width=True
+ )
+ if submitted:
+ st.write("slider 2:", slider_val, "checkbox 2:", checkbox_val)
| {"golden_diff": "diff --git a/e2e/scripts/st_button.py b/e2e/scripts/st_button.py\n--- a/e2e/scripts/st_button.py\n+++ b/e2e/scripts/st_button.py\n@@ -51,7 +51,11 @@\n i5 = st.button(\"button 4\", type=\"primary\", disabled=True)\n st.write(\"value 4:\", i5)\n \n-st.button(\"button 5\", use_container_width=True)\n+st.button(\"button 5 - containerWidth\", use_container_width=True)\n+\n+st.button(\n+ \"button 6 - containerWidth + help\", use_container_width=True, help=\"help text\"\n+)\n \n cols = st.columns(3)\n \ndiff --git a/e2e/scripts/st_download_button.py b/e2e/scripts/st_download_button.py\n--- a/e2e/scripts/st_download_button.py\n+++ b/e2e/scripts/st_download_button.py\n@@ -41,3 +41,11 @@\n file_name=\"hello.txt\",\n use_container_width=True,\n )\n+\n+st.download_button(\n+ \"Download button with help text and use_container_width=True\",\n+ data=\"Hello world!\",\n+ file_name=\"hello.txt\",\n+ use_container_width=True,\n+ help=\"Example help text\",\n+)\ndiff --git a/e2e/scripts/st_form_use_container_width_submit_button.py b/e2e/scripts/st_form_use_container_width_submit_button.py\n--- a/e2e/scripts/st_form_use_container_width_submit_button.py\n+++ b/e2e/scripts/st_form_use_container_width_submit_button.py\n@@ -21,3 +21,13 @@\n submitted = st.form_submit_button(\"Submit\", use_container_width=True)\n if submitted:\n st.write(\"slider\", slider_val, \"checkbox\", checkbox_val)\n+\n+with st.form(\"my_form_2\"):\n+ st.write(\"Inside the second form\")\n+ slider_val = st.slider(\"Form slider 2\")\n+ checkbox_val = st.checkbox(\"Form checkbox 2\")\n+ submitted = st.form_submit_button(\n+ \"Submit\", help=\"Submit by clicking\", use_container_width=True\n+ )\n+ if submitted:\n+ st.write(\"slider 2:\", slider_val, \"checkbox 2:\", checkbox_val)\n", "issue": "Using help param causes use_container_width to be ignored with st.button for version 1.18\n### Checklist\r\n\r\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nUsing both `help` and `use_container_width` parameters with `st.button` with version 1.18 results in `use_container_width` being ignored\r\n\r\n### Reproducible Code Example\r\n\r\n[](https://issues.streamlitapp.com/?issue=gh-6161)\r\n\r\n```Python\r\nimport streamlit as st\r\nc1, c2, c3 = st.columns([1, 1, 1])\r\n\r\nwith c1:\r\n st.button('button 1', use_container_width=True)\r\nwith c2:\r\n st.button('button 2', use_container_width=True)\r\nwith c3:\r\n st.button('button 3', use_container_width=True, help = 'example')\r\nst.button(\"test\", use_container_width=True, help='test')\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\nRun app that uses `help` and `use_container_width` parameters for `st.button` with version 1.18\r\n\r\n### Expected Behavior\r\n\r\nExpected behavior is that `use_container_width` impacts width of button widget\r\n\r\n### Current Behavior\r\n\r\nCurrent behavior: \r\n<img width=\"631\" alt=\"Screenshot 2023-02-21 at 11 48 14 AM\" src=\"https://user-images.githubusercontent.com/16749069/220443951-e1ee3abc-0210-4a04-85b4-85b07ade9cc9.png\">\r\n\r\n`use_container_width` is ignored\r\n\r\n### Is this a regression?\r\n\r\n- [X] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.18.0\r\n- Python version:\r\n- Operating System:\r\n- Browser:\r\n- Virtual environment:\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [ ] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\nfrom streamlit import runtime\n\n# st.session_state can only be used in streamlit\nif runtime.exists():\n\n def on_click(x, y):\n if \"click_count\" not in st.session_state:\n st.session_state.click_count = 0\n\n st.session_state.click_count += 1\n st.session_state.x = x\n st.session_state.y = y\n\n i1 = st.button(\n \"button 1\", key=\"button\", on_click=on_click, args=(1,), kwargs={\"y\": 2}\n )\n st.write(\"value:\", i1)\n st.write(\"value from state:\", st.session_state[\"button\"])\n\n button_was_clicked = \"click_count\" in st.session_state\n st.write(\"Button was clicked:\", button_was_clicked)\n\n if button_was_clicked:\n st.write(\"times clicked:\", st.session_state.click_count)\n st.write(\"arg value:\", st.session_state.x)\n st.write(\"kwarg value:\", st.session_state.y)\n\ni2 = st.checkbox(\"reset button return value\")\n\ni3 = st.button(\"button 2\", disabled=True)\nst.write(\"value 2:\", i3)\n\ni4 = st.button(\"button 3\", type=\"primary\")\nst.write(\"value 3:\", i4)\n\ni5 = st.button(\"button 4\", type=\"primary\", disabled=True)\nst.write(\"value 4:\", i5)\n\nst.button(\"button 5\", use_container_width=True)\n\ncols = st.columns(3)\n\n# Order of conn_types matters to preserve the order in st_button.spec.js and the snapshot\nconn_types = [\n \"snowflake\",\n \"bigquery\",\n \"huggingface\",\n \"aws_s3\",\n \"http_file\",\n \"postgresql\",\n \"gsheets\",\n \"custom\",\n]\nfor i in range(len(conn_types)):\n cols[i % 3].button(conn_types[i], use_container_width=True)\n", "path": "e2e/scripts/st_button.py"}, {"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nwith st.form(\"my_form\"):\n st.write(\"Inside the form\")\n slider_val = st.slider(\"Form slider\")\n checkbox_val = st.checkbox(\"Form checkbox\")\n submitted = st.form_submit_button(\"Submit\", use_container_width=True)\n if submitted:\n st.write(\"slider\", slider_val, \"checkbox\", checkbox_val)\n", "path": "e2e/scripts/st_form_use_container_width_submit_button.py"}, {"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.download_button(\n \"Download button label\",\n data=\"Hello world!\",\n file_name=\"hello.txt\",\n)\n\nst.download_button(\n \"Download button label\",\n data=\"Hello world!\",\n file_name=\"hello.txt\",\n key=\"disabled_dl_button\",\n disabled=True,\n)\n\nst.download_button(\n \"Download RAR archive file\",\n data=b\"bytes\",\n file_name=\"archive.rar\",\n mime=\"application/vnd.rar\",\n)\n\nst.download_button(\n \"Download button with use_container_width=True\",\n data=\"Hello world!\",\n file_name=\"hello.txt\",\n use_container_width=True,\n)\n", "path": "e2e/scripts/st_download_button.py"}], "after_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\nfrom streamlit import runtime\n\n# st.session_state can only be used in streamlit\nif runtime.exists():\n\n def on_click(x, y):\n if \"click_count\" not in st.session_state:\n st.session_state.click_count = 0\n\n st.session_state.click_count += 1\n st.session_state.x = x\n st.session_state.y = y\n\n i1 = st.button(\n \"button 1\", key=\"button\", on_click=on_click, args=(1,), kwargs={\"y\": 2}\n )\n st.write(\"value:\", i1)\n st.write(\"value from state:\", st.session_state[\"button\"])\n\n button_was_clicked = \"click_count\" in st.session_state\n st.write(\"Button was clicked:\", button_was_clicked)\n\n if button_was_clicked:\n st.write(\"times clicked:\", st.session_state.click_count)\n st.write(\"arg value:\", st.session_state.x)\n st.write(\"kwarg value:\", st.session_state.y)\n\ni2 = st.checkbox(\"reset button return value\")\n\ni3 = st.button(\"button 2\", disabled=True)\nst.write(\"value 2:\", i3)\n\ni4 = st.button(\"button 3\", type=\"primary\")\nst.write(\"value 3:\", i4)\n\ni5 = st.button(\"button 4\", type=\"primary\", disabled=True)\nst.write(\"value 4:\", i5)\n\nst.button(\"button 5 - containerWidth\", use_container_width=True)\n\nst.button(\n \"button 6 - containerWidth + help\", use_container_width=True, help=\"help text\"\n)\n\ncols = st.columns(3)\n\n# Order of conn_types matters to preserve the order in st_button.spec.js and the snapshot\nconn_types = [\n \"snowflake\",\n \"bigquery\",\n \"huggingface\",\n \"aws_s3\",\n \"http_file\",\n \"postgresql\",\n \"gsheets\",\n \"custom\",\n]\nfor i in range(len(conn_types)):\n cols[i % 3].button(conn_types[i], use_container_width=True)\n", "path": "e2e/scripts/st_button.py"}, {"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nwith st.form(\"my_form\"):\n st.write(\"Inside the form\")\n slider_val = st.slider(\"Form slider\")\n checkbox_val = st.checkbox(\"Form checkbox\")\n submitted = st.form_submit_button(\"Submit\", use_container_width=True)\n if submitted:\n st.write(\"slider\", slider_val, \"checkbox\", checkbox_val)\n\nwith st.form(\"my_form_2\"):\n st.write(\"Inside the second form\")\n slider_val = st.slider(\"Form slider 2\")\n checkbox_val = st.checkbox(\"Form checkbox 2\")\n submitted = st.form_submit_button(\n \"Submit\", help=\"Submit by clicking\", use_container_width=True\n )\n if submitted:\n st.write(\"slider 2:\", slider_val, \"checkbox 2:\", checkbox_val)\n", "path": "e2e/scripts/st_form_use_container_width_submit_button.py"}, {"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.download_button(\n \"Download button label\",\n data=\"Hello world!\",\n file_name=\"hello.txt\",\n)\n\nst.download_button(\n \"Download button label\",\n data=\"Hello world!\",\n file_name=\"hello.txt\",\n key=\"disabled_dl_button\",\n disabled=True,\n)\n\nst.download_button(\n \"Download RAR archive file\",\n data=b\"bytes\",\n file_name=\"archive.rar\",\n mime=\"application/vnd.rar\",\n)\n\nst.download_button(\n \"Download button with use_container_width=True\",\n data=\"Hello world!\",\n file_name=\"hello.txt\",\n use_container_width=True,\n)\n\nst.download_button(\n \"Download button with help text and use_container_width=True\",\n data=\"Hello world!\",\n file_name=\"hello.txt\",\n use_container_width=True,\n help=\"Example help text\",\n)\n", "path": "e2e/scripts/st_download_button.py"}]} | 2,151 | 477 |
gh_patches_debug_17234 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1164 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TabularForecastingData must include predict_data_frame
## 📚 Documentation
The [NBEATS tutorial](https://lightning-flash.readthedocs.io/en/stable/notebooks/flash_tutorials/electricity_forecasting.html) uses `0.5.x`, which references `model.predict()` at the end to generate predictions. In `0.6.0`, `model.predict()` was deprecated in favor of `trainer.predict(model, datamodule=datamodule)`.
If you try to do this by passing the `datamodule` created via:
```python
datamodule = TabularForecastingData.from_data_frame(
time_idx="time_idx",
target="price actual",
group_ids=["constant"],
max_encoder_length=max_encoder_length,
max_prediction_length=max_prediction_length,
time_varying_unknown_reals=["price actual"],
train_data_frame=df_energy_daily[df_energy_daily["time_idx"] <= training_cutoff],
val_data_frame=df_energy_daily,
batch_size=256,
)
```
to `trainer.predict(...)`, you'll get the following error:
```python
MisconfigurationException: No `predict_dataloader()` method defined to run `Trainer.predict`.
```
The solution is to be found [here](https://lightning-flash.readthedocs.io/en/stable/reference/tabular_forecasting.html), which clearly shows how to make the prediction datamodule:
```python
# 4. Generate predictions
datamodule = TabularForecastingData.from_data_frame(predict_data_frame=data, parameters=datamodule.parameters)
predictions = trainer.predict(model, datamodule=datamodule)
```
__Suggestion:__
* Update the tutorial to use `0.6.0`
* Add a small section to the [API docs](https://lightning-flash.readthedocs.io/en/stable/api/generated/flash.tabular.forecasting.data.TabularForecastingData.html#flash.tabular.forecasting.data.TabularForecastingData.from_data_frame) explaining that `predict_data_frame` must be specified in order to make use of `trainer.predict(...)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 import glob
14 import os
15 import shutil
16 import sys
17 import warnings
18 from importlib.util import module_from_spec, spec_from_file_location
19
20 import pt_lightning_sphinx_theme
21
22 _PATH_HERE = os.path.abspath(os.path.dirname(__file__))
23 _PATH_ROOT = os.path.join(_PATH_HERE, "..", "..")
24 _PATH_RAW_NB = os.path.join(_PATH_ROOT, "_notebooks")
25 sys.path.insert(0, os.path.abspath(_PATH_ROOT))
26 sys.path.append(os.path.join(_PATH_RAW_NB, ".actions"))
27
28 _SHOULD_COPY_NOTEBOOKS = True
29
30 try:
31 from helpers import HelperCLI
32 except Exception:
33 _SHOULD_COPY_NOTEBOOKS = False
34 warnings.warn("To build the code, please run: `git submodule update --init --recursive`", stacklevel=2)
35
36
37 def _load_py_module(fname, pkg="flash"):
38 spec = spec_from_file_location(os.path.join(pkg, fname), os.path.join(_PATH_ROOT, pkg, fname))
39 py = module_from_spec(spec)
40 spec.loader.exec_module(py)
41 return py
42
43
44 try:
45 from flash import __about__ as about
46 from flash.core.utilities import providers
47
48 except ModuleNotFoundError:
49
50 about = _load_py_module("__about__.py")
51 providers = _load_py_module("core/utilities/providers.py")
52
53 SPHINX_MOCK_REQUIREMENTS = int(os.environ.get("SPHINX_MOCK_REQUIREMENTS", True))
54
55 html_favicon = "_static/images/icon.svg"
56
57 # -- Project information -----------------------------------------------------
58
59 project = "Flash"
60 copyright = "2020-2021, PyTorch Lightning"
61 author = "PyTorch Lightning"
62
63 # -- Project documents -------------------------------------------------------
64 if _SHOULD_COPY_NOTEBOOKS:
65 HelperCLI.copy_notebooks(_PATH_RAW_NB, _PATH_HERE, "notebooks", patterns=["flash_tutorials"])
66
67
68 def _transform_changelog(path_in: str, path_out: str) -> None:
69 with open(path_in) as fp:
70 chlog_lines = fp.readlines()
71 # enrich short subsub-titles to be unique
72 chlog_ver = ""
73 for i, ln in enumerate(chlog_lines):
74 if ln.startswith("## "):
75 chlog_ver = ln[2:].split("-")[0].strip()
76 elif ln.startswith("### "):
77 ln = ln.replace("###", f"### {chlog_ver} -")
78 chlog_lines[i] = ln
79 with open(path_out, "w") as fp:
80 fp.writelines(chlog_lines)
81
82
83 generated_dir = os.path.join(_PATH_HERE, "generated")
84
85 os.makedirs(generated_dir, exist_ok=True)
86 # copy all documents from GH templates like contribution guide
87 for md in glob.glob(os.path.join(_PATH_ROOT, ".github", "*.md")):
88 shutil.copy(md, os.path.join(generated_dir, os.path.basename(md)))
89 # copy also the changelog
90 _transform_changelog(os.path.join(_PATH_ROOT, "CHANGELOG.md"), os.path.join(generated_dir, "CHANGELOG.md"))
91
92 # -- Generate providers ------------------------------------------------------
93
94 lines = []
95 for provider in providers.PROVIDERS:
96 lines.append(f"- {str(provider)}\n")
97
98 generated_dir = os.path.join("integrations", "generated")
99 os.makedirs(generated_dir, exist_ok=True)
100
101 with open(os.path.join(generated_dir, "providers.rst"), "w") as f:
102 f.writelines(sorted(lines, key=str.casefold))
103
104 # -- General configuration ---------------------------------------------------
105
106 # Add any Sphinx extension module names here, as strings. They can be
107 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
108 # ones.
109 extensions = [
110 "sphinx.ext.autodoc",
111 "sphinx.ext.doctest",
112 "sphinx.ext.intersphinx",
113 "sphinx.ext.todo",
114 "sphinx.ext.viewcode",
115 "sphinx.ext.autosummary",
116 "sphinx.ext.napoleon",
117 "sphinx.ext.imgmath",
118 "myst_parser",
119 "nbsphinx",
120 "sphinx_autodoc_typehints",
121 "sphinx_copybutton",
122 "sphinx_paramlinks",
123 "sphinx_togglebutton",
124 "pt_lightning_sphinx_theme.extensions.lightning_tutorials",
125 ]
126
127 # autodoc: Default to members and undoc-members
128 autodoc_default_options = {"members": True}
129
130 # autodoc: Don't inherit docstrings (e.g. for nn.Module.forward)
131 autodoc_inherit_docstrings = False
132
133 # Add any paths that contain templates here, relative to this directory.
134 templates_path = ["_templates"]
135
136 # https://berkeley-stat159-f17.github.io/stat159-f17/lectures/14-sphinx..html#conf.py-(cont.)
137 # https://stackoverflow.com/questions/38526888/embed-ipython-notebook-in-sphinx-document
138 # I execute the notebooks manually in advance. If notebooks test the code,
139 # they should be run at build time.
140 nbsphinx_execute = "never"
141 nbsphinx_allow_errors = True
142 nbsphinx_requirejs_path = ""
143
144 # List of patterns, relative to source directory, that match files and
145 # directories to ignore when looking for source files.
146 # This pattern also affects html_static_path and html_extra_path.
147 exclude_patterns = ["generated/PULL_REQUEST_TEMPLATE.md"]
148
149 # myst-parser, forcing to parse all html pages with mathjax
150 # https://github.com/executablebooks/MyST-Parser/issues/394
151 myst_update_mathjax = False
152
153 # The suffix(es) of source filenames.
154 # You can specify multiple suffix as a list of string:
155 #
156 source_parsers = {".rst": "restructuredtext", ".txt": "markdown", ".md": "markdown", ".ipynb": "nbsphinx"}
157
158 # The master toctree document.
159 master_doc = "index"
160
161 needs_sphinx = "4.0"
162
163 # -- Options for intersphinx extension ---------------------------------------
164
165 # Example configuration for intersphinx: refer to the Python standard library.
166 intersphinx_mapping = {
167 "python": ("https://docs.python.org/3", None),
168 "torch": ("https://pytorch.org/docs/stable/", None),
169 "numpy": ("https://numpy.org/doc/stable/", None),
170 "PIL": ("https://pillow.readthedocs.io/en/stable/", None),
171 "pytorchvideo": ("https://pytorchvideo.readthedocs.io/en/latest/", None),
172 "pytorch_lightning": ("https://pytorch-lightning.readthedocs.io/en/stable/", None),
173 "fiftyone": ("https://voxel51.com/docs/fiftyone/", "fiftyone_objects.inv"),
174 }
175
176 # -- Options for HTML output -------------------------------------------------
177
178 # The theme to use for HTML and HTML Help pages. See the documentation for
179 # a list of builtin themes.
180 #
181 html_theme = "pt_lightning_sphinx_theme"
182 html_theme_path = [pt_lightning_sphinx_theme.get_html_theme_path()]
183
184 # Theme options are theme-specific and customize the look and feel of a theme
185 # further. For a list of options available for each theme, see the
186 # documentation.
187
188 html_theme_options = {
189 "pytorch_project": "https://pytorchlightning.ai",
190 "canonical_url": about.__docs_url__,
191 "collapse_navigation": False,
192 "display_version": True,
193 "logo_only": False,
194 }
195
196 # Add any paths that contain custom static files (such as style sheets) here,
197 # relative to this directory. They are copied after the builtin static files,
198 # so a file named "default.css" will overwrite the builtin "default.css".
199 html_static_path = ["_static"]
200
201 html_css_files = []
202
203
204 def setup(app):
205 # this is for hiding doctest decoration,
206 # see: http://z4r.github.io/python/2011/12/02/hides-the-prompts-and-output/
207 app.add_js_file("copybutton.js")
208 app.add_css_file("main.css")
209
210
211 # Ignoring Third-party packages
212 # https://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule
213 def _package_list_from_file(pfile):
214 assert os.path.isfile(pfile)
215 with open(pfile) as fp:
216 lines = fp.readlines()
217 list_pkgs = []
218 for ln in lines:
219 found = [ln.index(ch) for ch in list(",=<>#@") if ch in ln]
220 pkg = ln[: min(found)] if found else ln
221 if pkg.strip():
222 list_pkgs.append(pkg.strip())
223 return list_pkgs
224
225
226 # define mapping from PyPI names to python imports
227 PACKAGE_MAPPING = {
228 "pytorch-lightning": "pytorch_lightning",
229 "scikit-learn": "sklearn",
230 "Pillow": "PIL",
231 "PyYAML": "yaml",
232 "rouge-score": "rouge_score",
233 "lightning-bolts": "pl_bolts",
234 "pytorch-tabnet": "pytorch_tabnet",
235 "pyDeprecate": "deprecate",
236 }
237 MOCK_PACKAGES = ["numpy", "PyYAML", "tqdm"]
238 if SPHINX_MOCK_REQUIREMENTS:
239 # mock also base packages when we are on RTD since we don't install them there
240 MOCK_PACKAGES += _package_list_from_file(os.path.join(_PATH_ROOT, "requirements.txt"))
241 # replace PyPI packages by importing ones
242 MOCK_PACKAGES = [PACKAGE_MAPPING.get(pkg, pkg) for pkg in MOCK_PACKAGES]
243
244 autodoc_mock_imports = MOCK_PACKAGES
245
246 # only run doctests marked with a ".. doctest::" directive
247 doctest_test_doctest_blocks = ""
248 doctest_global_setup = """
249 import torch
250 import pytorch_lightning as pl
251 import flash
252 """
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -28,7 +28,7 @@
_SHOULD_COPY_NOTEBOOKS = True
try:
- from helpers import HelperCLI
+ from assistant import AssistantCLI
except Exception:
_SHOULD_COPY_NOTEBOOKS = False
warnings.warn("To build the code, please run: `git submodule update --init --recursive`", stacklevel=2)
@@ -62,7 +62,7 @@
# -- Project documents -------------------------------------------------------
if _SHOULD_COPY_NOTEBOOKS:
- HelperCLI.copy_notebooks(_PATH_RAW_NB, _PATH_HERE, "notebooks", patterns=["flash_tutorials"])
+ AssistantCLI.copy_notebooks(_PATH_RAW_NB, _PATH_HERE, "notebooks", patterns=["flash_tutorials"])
def _transform_changelog(path_in: str, path_out: str) -> None:
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -28,7 +28,7 @@\n _SHOULD_COPY_NOTEBOOKS = True\n \n try:\n- from helpers import HelperCLI\n+ from assistant import AssistantCLI\n except Exception:\n _SHOULD_COPY_NOTEBOOKS = False\n warnings.warn(\"To build the code, please run: `git submodule update --init --recursive`\", stacklevel=2)\n@@ -62,7 +62,7 @@\n \n # -- Project documents -------------------------------------------------------\n if _SHOULD_COPY_NOTEBOOKS:\n- HelperCLI.copy_notebooks(_PATH_RAW_NB, _PATH_HERE, \"notebooks\", patterns=[\"flash_tutorials\"])\n+ AssistantCLI.copy_notebooks(_PATH_RAW_NB, _PATH_HERE, \"notebooks\", patterns=[\"flash_tutorials\"])\n \n \n def _transform_changelog(path_in: str, path_out: str) -> None:\n", "issue": "TabularForecastingData must include predict_data_frame\n## \ud83d\udcda Documentation\r\n\r\nThe [NBEATS tutorial](https://lightning-flash.readthedocs.io/en/stable/notebooks/flash_tutorials/electricity_forecasting.html) uses `0.5.x`, which references `model.predict()` at the end to generate predictions. In `0.6.0`, `model.predict()` was deprecated in favor of `trainer.predict(model, datamodule=datamodule)`.\r\n\r\nIf you try to do this by passing the `datamodule` created via:\r\n\r\n```python\r\ndatamodule = TabularForecastingData.from_data_frame(\r\n time_idx=\"time_idx\",\r\n target=\"price actual\",\r\n group_ids=[\"constant\"],\r\n max_encoder_length=max_encoder_length,\r\n max_prediction_length=max_prediction_length,\r\n time_varying_unknown_reals=[\"price actual\"],\r\n train_data_frame=df_energy_daily[df_energy_daily[\"time_idx\"] <= training_cutoff],\r\n val_data_frame=df_energy_daily,\r\n batch_size=256,\r\n)\r\n```\r\n\r\nto `trainer.predict(...)`, you'll get the following error:\r\n\r\n```python\r\nMisconfigurationException: No `predict_dataloader()` method defined to run `Trainer.predict`.\r\n```\r\n\r\nThe solution is to be found [here](https://lightning-flash.readthedocs.io/en/stable/reference/tabular_forecasting.html), which clearly shows how to make the prediction datamodule:\r\n\r\n```python\r\n# 4. Generate predictions\r\ndatamodule = TabularForecastingData.from_data_frame(predict_data_frame=data, parameters=datamodule.parameters)\r\npredictions = trainer.predict(model, datamodule=datamodule)\r\n```\r\n\r\n__Suggestion:__\r\n* Update the tutorial to use `0.6.0`\r\n* Add a small section to the [API docs](https://lightning-flash.readthedocs.io/en/stable/api/generated/flash.tabular.forecasting.data.TabularForecastingData.html#flash.tabular.forecasting.data.TabularForecastingData.from_data_frame) explaining that `predict_data_frame` must be specified in order to make use of `trainer.predict(...)` \n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport glob\nimport os\nimport shutil\nimport sys\nimport warnings\nfrom importlib.util import module_from_spec, spec_from_file_location\n\nimport pt_lightning_sphinx_theme\n\n_PATH_HERE = os.path.abspath(os.path.dirname(__file__))\n_PATH_ROOT = os.path.join(_PATH_HERE, \"..\", \"..\")\n_PATH_RAW_NB = os.path.join(_PATH_ROOT, \"_notebooks\")\nsys.path.insert(0, os.path.abspath(_PATH_ROOT))\nsys.path.append(os.path.join(_PATH_RAW_NB, \".actions\"))\n\n_SHOULD_COPY_NOTEBOOKS = True\n\ntry:\n from helpers import HelperCLI\nexcept Exception:\n _SHOULD_COPY_NOTEBOOKS = False\n warnings.warn(\"To build the code, please run: `git submodule update --init --recursive`\", stacklevel=2)\n\n\ndef _load_py_module(fname, pkg=\"flash\"):\n spec = spec_from_file_location(os.path.join(pkg, fname), os.path.join(_PATH_ROOT, pkg, fname))\n py = module_from_spec(spec)\n spec.loader.exec_module(py)\n return py\n\n\ntry:\n from flash import __about__ as about\n from flash.core.utilities import providers\n\nexcept ModuleNotFoundError:\n\n about = _load_py_module(\"__about__.py\")\n providers = _load_py_module(\"core/utilities/providers.py\")\n\nSPHINX_MOCK_REQUIREMENTS = int(os.environ.get(\"SPHINX_MOCK_REQUIREMENTS\", True))\n\nhtml_favicon = \"_static/images/icon.svg\"\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Flash\"\ncopyright = \"2020-2021, PyTorch Lightning\"\nauthor = \"PyTorch Lightning\"\n\n# -- Project documents -------------------------------------------------------\nif _SHOULD_COPY_NOTEBOOKS:\n HelperCLI.copy_notebooks(_PATH_RAW_NB, _PATH_HERE, \"notebooks\", patterns=[\"flash_tutorials\"])\n\n\ndef _transform_changelog(path_in: str, path_out: str) -> None:\n with open(path_in) as fp:\n chlog_lines = fp.readlines()\n # enrich short subsub-titles to be unique\n chlog_ver = \"\"\n for i, ln in enumerate(chlog_lines):\n if ln.startswith(\"## \"):\n chlog_ver = ln[2:].split(\"-\")[0].strip()\n elif ln.startswith(\"### \"):\n ln = ln.replace(\"###\", f\"### {chlog_ver} -\")\n chlog_lines[i] = ln\n with open(path_out, \"w\") as fp:\n fp.writelines(chlog_lines)\n\n\ngenerated_dir = os.path.join(_PATH_HERE, \"generated\")\n\nos.makedirs(generated_dir, exist_ok=True)\n# copy all documents from GH templates like contribution guide\nfor md in glob.glob(os.path.join(_PATH_ROOT, \".github\", \"*.md\")):\n shutil.copy(md, os.path.join(generated_dir, os.path.basename(md)))\n# copy also the changelog\n_transform_changelog(os.path.join(_PATH_ROOT, \"CHANGELOG.md\"), os.path.join(generated_dir, \"CHANGELOG.md\"))\n\n# -- Generate providers ------------------------------------------------------\n\nlines = []\nfor provider in providers.PROVIDERS:\n lines.append(f\"- {str(provider)}\\n\")\n\ngenerated_dir = os.path.join(\"integrations\", \"generated\")\nos.makedirs(generated_dir, exist_ok=True)\n\nwith open(os.path.join(generated_dir, \"providers.rst\"), \"w\") as f:\n f.writelines(sorted(lines, key=str.casefold))\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.imgmath\",\n \"myst_parser\",\n \"nbsphinx\",\n \"sphinx_autodoc_typehints\",\n \"sphinx_copybutton\",\n \"sphinx_paramlinks\",\n \"sphinx_togglebutton\",\n \"pt_lightning_sphinx_theme.extensions.lightning_tutorials\",\n]\n\n# autodoc: Default to members and undoc-members\nautodoc_default_options = {\"members\": True}\n\n# autodoc: Don't inherit docstrings (e.g. for nn.Module.forward)\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# https://berkeley-stat159-f17.github.io/stat159-f17/lectures/14-sphinx..html#conf.py-(cont.)\n# https://stackoverflow.com/questions/38526888/embed-ipython-notebook-in-sphinx-document\n# I execute the notebooks manually in advance. If notebooks test the code,\n# they should be run at build time.\nnbsphinx_execute = \"never\"\nnbsphinx_allow_errors = True\nnbsphinx_requirejs_path = \"\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"generated/PULL_REQUEST_TEMPLATE.md\"]\n\n# myst-parser, forcing to parse all html pages with mathjax\n# https://github.com/executablebooks/MyST-Parser/issues/394\nmyst_update_mathjax = False\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_parsers = {\".rst\": \"restructuredtext\", \".txt\": \"markdown\", \".md\": \"markdown\", \".ipynb\": \"nbsphinx\"}\n\n# The master toctree document.\nmaster_doc = \"index\"\n\nneeds_sphinx = \"4.0\"\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"PIL\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n \"pytorchvideo\": (\"https://pytorchvideo.readthedocs.io/en/latest/\", None),\n \"pytorch_lightning\": (\"https://pytorch-lightning.readthedocs.io/en/stable/\", None),\n \"fiftyone\": (\"https://voxel51.com/docs/fiftyone/\", \"fiftyone_objects.inv\"),\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pt_lightning_sphinx_theme\"\nhtml_theme_path = [pt_lightning_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n \"pytorch_project\": \"https://pytorchlightning.ai\",\n \"canonical_url\": about.__docs_url__,\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": False,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_css_files = []\n\n\ndef setup(app):\n # this is for hiding doctest decoration,\n # see: http://z4r.github.io/python/2011/12/02/hides-the-prompts-and-output/\n app.add_js_file(\"copybutton.js\")\n app.add_css_file(\"main.css\")\n\n\n# Ignoring Third-party packages\n# https://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule\ndef _package_list_from_file(pfile):\n assert os.path.isfile(pfile)\n with open(pfile) as fp:\n lines = fp.readlines()\n list_pkgs = []\n for ln in lines:\n found = [ln.index(ch) for ch in list(\",=<>#@\") if ch in ln]\n pkg = ln[: min(found)] if found else ln\n if pkg.strip():\n list_pkgs.append(pkg.strip())\n return list_pkgs\n\n\n# define mapping from PyPI names to python imports\nPACKAGE_MAPPING = {\n \"pytorch-lightning\": \"pytorch_lightning\",\n \"scikit-learn\": \"sklearn\",\n \"Pillow\": \"PIL\",\n \"PyYAML\": \"yaml\",\n \"rouge-score\": \"rouge_score\",\n \"lightning-bolts\": \"pl_bolts\",\n \"pytorch-tabnet\": \"pytorch_tabnet\",\n \"pyDeprecate\": \"deprecate\",\n}\nMOCK_PACKAGES = [\"numpy\", \"PyYAML\", \"tqdm\"]\nif SPHINX_MOCK_REQUIREMENTS:\n # mock also base packages when we are on RTD since we don't install them there\n MOCK_PACKAGES += _package_list_from_file(os.path.join(_PATH_ROOT, \"requirements.txt\"))\n# replace PyPI packages by importing ones\nMOCK_PACKAGES = [PACKAGE_MAPPING.get(pkg, pkg) for pkg in MOCK_PACKAGES]\n\nautodoc_mock_imports = MOCK_PACKAGES\n\n# only run doctests marked with a \".. doctest::\" directive\ndoctest_test_doctest_blocks = \"\"\ndoctest_global_setup = \"\"\"\nimport torch\nimport pytorch_lightning as pl\nimport flash\n\"\"\"\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport glob\nimport os\nimport shutil\nimport sys\nimport warnings\nfrom importlib.util import module_from_spec, spec_from_file_location\n\nimport pt_lightning_sphinx_theme\n\n_PATH_HERE = os.path.abspath(os.path.dirname(__file__))\n_PATH_ROOT = os.path.join(_PATH_HERE, \"..\", \"..\")\n_PATH_RAW_NB = os.path.join(_PATH_ROOT, \"_notebooks\")\nsys.path.insert(0, os.path.abspath(_PATH_ROOT))\nsys.path.append(os.path.join(_PATH_RAW_NB, \".actions\"))\n\n_SHOULD_COPY_NOTEBOOKS = True\n\ntry:\n from assistant import AssistantCLI\nexcept Exception:\n _SHOULD_COPY_NOTEBOOKS = False\n warnings.warn(\"To build the code, please run: `git submodule update --init --recursive`\", stacklevel=2)\n\n\ndef _load_py_module(fname, pkg=\"flash\"):\n spec = spec_from_file_location(os.path.join(pkg, fname), os.path.join(_PATH_ROOT, pkg, fname))\n py = module_from_spec(spec)\n spec.loader.exec_module(py)\n return py\n\n\ntry:\n from flash import __about__ as about\n from flash.core.utilities import providers\n\nexcept ModuleNotFoundError:\n\n about = _load_py_module(\"__about__.py\")\n providers = _load_py_module(\"core/utilities/providers.py\")\n\nSPHINX_MOCK_REQUIREMENTS = int(os.environ.get(\"SPHINX_MOCK_REQUIREMENTS\", True))\n\nhtml_favicon = \"_static/images/icon.svg\"\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Flash\"\ncopyright = \"2020-2021, PyTorch Lightning\"\nauthor = \"PyTorch Lightning\"\n\n# -- Project documents -------------------------------------------------------\nif _SHOULD_COPY_NOTEBOOKS:\n AssistantCLI.copy_notebooks(_PATH_RAW_NB, _PATH_HERE, \"notebooks\", patterns=[\"flash_tutorials\"])\n\n\ndef _transform_changelog(path_in: str, path_out: str) -> None:\n with open(path_in) as fp:\n chlog_lines = fp.readlines()\n # enrich short subsub-titles to be unique\n chlog_ver = \"\"\n for i, ln in enumerate(chlog_lines):\n if ln.startswith(\"## \"):\n chlog_ver = ln[2:].split(\"-\")[0].strip()\n elif ln.startswith(\"### \"):\n ln = ln.replace(\"###\", f\"### {chlog_ver} -\")\n chlog_lines[i] = ln\n with open(path_out, \"w\") as fp:\n fp.writelines(chlog_lines)\n\n\ngenerated_dir = os.path.join(_PATH_HERE, \"generated\")\n\nos.makedirs(generated_dir, exist_ok=True)\n# copy all documents from GH templates like contribution guide\nfor md in glob.glob(os.path.join(_PATH_ROOT, \".github\", \"*.md\")):\n shutil.copy(md, os.path.join(generated_dir, os.path.basename(md)))\n# copy also the changelog\n_transform_changelog(os.path.join(_PATH_ROOT, \"CHANGELOG.md\"), os.path.join(generated_dir, \"CHANGELOG.md\"))\n\n# -- Generate providers ------------------------------------------------------\n\nlines = []\nfor provider in providers.PROVIDERS:\n lines.append(f\"- {str(provider)}\\n\")\n\ngenerated_dir = os.path.join(\"integrations\", \"generated\")\nos.makedirs(generated_dir, exist_ok=True)\n\nwith open(os.path.join(generated_dir, \"providers.rst\"), \"w\") as f:\n f.writelines(sorted(lines, key=str.casefold))\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.imgmath\",\n \"myst_parser\",\n \"nbsphinx\",\n \"sphinx_autodoc_typehints\",\n \"sphinx_copybutton\",\n \"sphinx_paramlinks\",\n \"sphinx_togglebutton\",\n \"pt_lightning_sphinx_theme.extensions.lightning_tutorials\",\n]\n\n# autodoc: Default to members and undoc-members\nautodoc_default_options = {\"members\": True}\n\n# autodoc: Don't inherit docstrings (e.g. for nn.Module.forward)\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# https://berkeley-stat159-f17.github.io/stat159-f17/lectures/14-sphinx..html#conf.py-(cont.)\n# https://stackoverflow.com/questions/38526888/embed-ipython-notebook-in-sphinx-document\n# I execute the notebooks manually in advance. If notebooks test the code,\n# they should be run at build time.\nnbsphinx_execute = \"never\"\nnbsphinx_allow_errors = True\nnbsphinx_requirejs_path = \"\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"generated/PULL_REQUEST_TEMPLATE.md\"]\n\n# myst-parser, forcing to parse all html pages with mathjax\n# https://github.com/executablebooks/MyST-Parser/issues/394\nmyst_update_mathjax = False\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_parsers = {\".rst\": \"restructuredtext\", \".txt\": \"markdown\", \".md\": \"markdown\", \".ipynb\": \"nbsphinx\"}\n\n# The master toctree document.\nmaster_doc = \"index\"\n\nneeds_sphinx = \"4.0\"\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"PIL\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n \"pytorchvideo\": (\"https://pytorchvideo.readthedocs.io/en/latest/\", None),\n \"pytorch_lightning\": (\"https://pytorch-lightning.readthedocs.io/en/stable/\", None),\n \"fiftyone\": (\"https://voxel51.com/docs/fiftyone/\", \"fiftyone_objects.inv\"),\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pt_lightning_sphinx_theme\"\nhtml_theme_path = [pt_lightning_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n \"pytorch_project\": \"https://pytorchlightning.ai\",\n \"canonical_url\": about.__docs_url__,\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": False,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_css_files = []\n\n\ndef setup(app):\n # this is for hiding doctest decoration,\n # see: http://z4r.github.io/python/2011/12/02/hides-the-prompts-and-output/\n app.add_js_file(\"copybutton.js\")\n app.add_css_file(\"main.css\")\n\n\n# Ignoring Third-party packages\n# https://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule\ndef _package_list_from_file(pfile):\n assert os.path.isfile(pfile)\n with open(pfile) as fp:\n lines = fp.readlines()\n list_pkgs = []\n for ln in lines:\n found = [ln.index(ch) for ch in list(\",=<>#@\") if ch in ln]\n pkg = ln[: min(found)] if found else ln\n if pkg.strip():\n list_pkgs.append(pkg.strip())\n return list_pkgs\n\n\n# define mapping from PyPI names to python imports\nPACKAGE_MAPPING = {\n \"pytorch-lightning\": \"pytorch_lightning\",\n \"scikit-learn\": \"sklearn\",\n \"Pillow\": \"PIL\",\n \"PyYAML\": \"yaml\",\n \"rouge-score\": \"rouge_score\",\n \"lightning-bolts\": \"pl_bolts\",\n \"pytorch-tabnet\": \"pytorch_tabnet\",\n \"pyDeprecate\": \"deprecate\",\n}\nMOCK_PACKAGES = [\"numpy\", \"PyYAML\", \"tqdm\"]\nif SPHINX_MOCK_REQUIREMENTS:\n # mock also base packages when we are on RTD since we don't install them there\n MOCK_PACKAGES += _package_list_from_file(os.path.join(_PATH_ROOT, \"requirements.txt\"))\n# replace PyPI packages by importing ones\nMOCK_PACKAGES = [PACKAGE_MAPPING.get(pkg, pkg) for pkg in MOCK_PACKAGES]\n\nautodoc_mock_imports = MOCK_PACKAGES\n\n# only run doctests marked with a \".. doctest::\" directive\ndoctest_test_doctest_blocks = \"\"\ndoctest_global_setup = \"\"\"\nimport torch\nimport pytorch_lightning as pl\nimport flash\n\"\"\"\n", "path": "docs/source/conf.py"}]} | 3,585 | 208 |
gh_patches_debug_33159 | rasdani/github-patches | git_diff | kubeflow__pipelines-6054 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[v2compat] re-evaluate execution custom properties schema
* [ ] rename task execution `task_name` to `display_name`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/compiler/_default_transformers.py`
Content:
```
1 # Copyright 2019 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import warnings
16 from kubernetes import client as k8s_client
17 from typing import Callable, Dict, Optional, Text
18 from kfp.dsl._container_op import BaseOp, ContainerOp
19
20 def add_pod_env(op: BaseOp) -> BaseOp:
21 """Adds environment info if the Pod has the label `add-pod-env = true`.
22 """
23 if isinstance(
24 op, ContainerOp
25 ) and op.pod_labels and 'add-pod-env' in op.pod_labels and op.pod_labels[
26 'add-pod-env'] == 'true':
27 return add_kfp_pod_env(op)
28
29
30 def add_kfp_pod_env(op: BaseOp) -> BaseOp:
31 """Adds KFP pod environment info to the specified ContainerOp.
32 """
33 if not isinstance(op, ContainerOp):
34 warnings.warn(
35 'Trying to add default KFP environment variables to an Op that is '
36 'not a ContainerOp. Ignoring request.')
37 return op
38
39 op.container.add_env_variable(
40 k8s_client.V1EnvVar(name='KFP_POD_NAME',
41 value_from=k8s_client.V1EnvVarSource(
42 field_ref=k8s_client.V1ObjectFieldSelector(
43 field_path='metadata.name')))
44 ).add_env_variable(
45 k8s_client.V1EnvVar(name='KFP_NAMESPACE',
46 value_from=k8s_client.V1EnvVarSource(
47 field_ref=k8s_client.V1ObjectFieldSelector(
48 field_path='metadata.namespace')))
49 ).add_env_variable(
50 k8s_client.V1EnvVar(
51 name='WORKFLOW_ID',
52 value_from=k8s_client.
53 V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(
54 field_path="metadata.labels['workflows.argoproj.io/workflow']")))
55 ).add_env_variable(
56 k8s_client.V1EnvVar(
57 name='ENABLE_CACHING',
58 value_from=k8s_client.
59 V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(
60 field_path="metadata.labels['pipelines.kubeflow.org/enable_caching']")))
61 )
62 return op
63
64
65 def add_pod_labels(labels: Optional[Dict] = None) -> Callable:
66 """Adds provided pod labels to each pod."""
67
68 def _add_pod_labels(task):
69 for k, v in labels.items():
70 # Only append but not update.
71 # This is needed to bypass TFX pipelines/components.
72 if k not in task.pod_labels:
73 task.add_pod_label(k, v)
74 return task
75
76 return _add_pod_labels
77
```
Path: `sdk/python/kfp/compiler/v2_compat.py`
Content:
```
1 # Copyright 2021 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Utility functions for enabling v2-compatible pipelines in v1."""
15 import collections
16 import json
17 from typing import Optional
18
19 from kfp import dsl
20 from kfp.compiler import _default_transformers
21 from kfp.pipeline_spec import pipeline_spec_pb2
22 from kfp.v2 import compiler
23
24 from kubernetes import client as k8s_client
25
26 _DEFAULT_LAUNCHER_IMAGE = "gcr.io/ml-pipeline/kfp-launcher:1.6.4"
27
28
29 def update_op(op: dsl.ContainerOp,
30 pipeline_name: dsl.PipelineParam,
31 pipeline_root: dsl.PipelineParam,
32 launcher_image: Optional[str] = None) -> None:
33 """Updates the passed in Op for running in v2-compatible mode.
34
35 Args:
36 op: The Op to update.
37 pipeline_spec: The PipelineSpec for the pipeline under which `op`
38 runs.
39 pipeline_root: The root output directory for pipeline artifacts.
40 launcher_image: An optional launcher image. Useful for tests.
41 """
42 op.is_v2 = True
43 # Inject the launcher binary and overwrite the entrypoint.
44 image_name = launcher_image or _DEFAULT_LAUNCHER_IMAGE
45 launcher_container = dsl.UserContainer(name="kfp-launcher",
46 image=image_name,
47 command="/bin/mount_launcher.sh",
48 mirror_volume_mounts=True)
49
50 op.add_init_container(launcher_container)
51 op.add_volume(k8s_client.V1Volume(name='kfp-launcher'))
52 op.add_volume_mount(
53 k8s_client.V1VolumeMount(name='kfp-launcher', mount_path='/kfp-launcher'))
54
55 # op.command + op.args will have the following sections:
56 # 1. args passed to kfp-launcher
57 # 2. a separator "--"
58 # 3. parameters in format "key1=value1", "key2=value2", ...
59 # 4. a separator "--" as end of arguments passed to launcher
60 # 5. (start of op.args) arguments of the original user program command + args
61 #
62 # example:
63 # - command:
64 # - /kfp-launcher/launch
65 # - '--mlmd_server_address'
66 # - $(METADATA_GRPC_SERVICE_HOST)
67 # - '--mlmd_server_port'
68 # - $(METADATA_GRPC_SERVICE_PORT)
69 # - ... # more launcher params
70 # - '--pipeline_task_id'
71 # - $(KFP_POD_NAME)
72 # - '--pipeline_root'
73 # - ''
74 # - '--' # start of parameter values
75 # - first=first
76 # - second=second
77 # - '--' # start of user command and args
78 # args:
79 # - sh
80 # - '-ec'
81 # - |
82 # program_path=$(mktemp)
83 # printf "%s" "$0" > "$program_path"
84 # python3 -u "$program_path" "$@"
85 # - >
86 # import json
87 # import xxx
88 # ...
89 op.command = [
90 "/kfp-launcher/launch",
91 "--mlmd_server_address",
92 "$(METADATA_GRPC_SERVICE_HOST)",
93 "--mlmd_server_port",
94 "$(METADATA_GRPC_SERVICE_PORT)",
95 "--runtime_info_json",
96 "$(KFP_V2_RUNTIME_INFO)",
97 "--container_image",
98 "$(KFP_V2_IMAGE)",
99 "--task_name",
100 op.name,
101 "--pipeline_name",
102 pipeline_name,
103 "--pipeline_run_id",
104 "$(WORKFLOW_ID)",
105 "--pipeline_task_id",
106 "$(KFP_POD_NAME)",
107 "--pipeline_root",
108 pipeline_root,
109 "--enable_caching",
110 "$(ENABLE_CACHING)",
111 ]
112
113 # Mount necessary environment variables.
114 op.apply(_default_transformers.add_kfp_pod_env)
115 op.container.add_env_variable(
116 k8s_client.V1EnvVar(name="KFP_V2_IMAGE", value=op.container.image))
117
118 config_map_ref = k8s_client.V1ConfigMapEnvSource(
119 name='metadata-grpc-configmap', optional=True)
120 op.container.add_env_from(
121 k8s_client.V1EnvFromSource(config_map_ref=config_map_ref))
122
123 op.arguments = list(op.container_spec.command) + list(op.container_spec.args)
124
125 runtime_info = {
126 "inputParameters": collections.OrderedDict(),
127 "inputArtifacts": collections.OrderedDict(),
128 "outputParameters": collections.OrderedDict(),
129 "outputArtifacts": collections.OrderedDict(),
130 }
131
132 op.command += ["--"]
133 component_spec = op.component_spec
134 for parameter, spec in sorted(
135 component_spec.input_definitions.parameters.items()):
136 parameter_info = {
137 "type":
138 pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),
139 }
140 op.command += [f"{parameter}={op._parameter_arguments[parameter]}"]
141 runtime_info["inputParameters"][parameter] = parameter_info
142 op.command += ["--"]
143
144 for artifact_name, spec in sorted(
145 component_spec.input_definitions.artifacts.items()):
146 artifact_info = {
147 "metadataPath": op.input_artifact_paths[artifact_name],
148 "schemaTitle": spec.artifact_type.schema_title,
149 "instanceSchema": spec.artifact_type.instance_schema,
150 }
151 runtime_info["inputArtifacts"][artifact_name] = artifact_info
152
153 for parameter, spec in sorted(
154 component_spec.output_definitions.parameters.items()):
155 parameter_info = {
156 "type":
157 pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),
158 "path":
159 op.file_outputs[parameter],
160 }
161 runtime_info["outputParameters"][parameter] = parameter_info
162
163 for artifact_name, spec in sorted(
164 component_spec.output_definitions.artifacts.items()):
165 # TODO: Assert instance_schema.
166 artifact_info = {
167 # Type used to register output artifacts.
168 "schemaTitle": spec.artifact_type.schema_title,
169 "instanceSchema": spec.artifact_type.instance_schema,
170 # File used to write out the registered artifact ID.
171 "metadataPath": op.file_outputs[artifact_name],
172 }
173 runtime_info["outputArtifacts"][artifact_name] = artifact_info
174
175 op.container.add_env_variable(
176 k8s_client.V1EnvVar(name="KFP_V2_RUNTIME_INFO",
177 value=json.dumps(runtime_info)))
178
179 op.pod_annotations['pipelines.kubeflow.org/v2_component'] = "true"
180 op.pod_labels['pipelines.kubeflow.org/v2_component']= "true"
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/kfp/compiler/_default_transformers.py b/sdk/python/kfp/compiler/_default_transformers.py
--- a/sdk/python/kfp/compiler/_default_transformers.py
+++ b/sdk/python/kfp/compiler/_default_transformers.py
@@ -41,6 +41,11 @@
value_from=k8s_client.V1EnvVarSource(
field_ref=k8s_client.V1ObjectFieldSelector(
field_path='metadata.name')))
+ ).add_env_variable(
+ k8s_client.V1EnvVar(name='KFP_POD_UID',
+ value_from=k8s_client.V1EnvVarSource(
+ field_ref=k8s_client.V1ObjectFieldSelector(
+ field_path='metadata.uid')))
).add_env_variable(
k8s_client.V1EnvVar(name='KFP_NAMESPACE',
value_from=k8s_client.V1EnvVarSource(
@@ -52,6 +57,12 @@
value_from=k8s_client.
V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(
field_path="metadata.labels['workflows.argoproj.io/workflow']")))
+ ).add_env_variable(
+ k8s_client.V1EnvVar(
+ name='KFP_RUN_ID',
+ value_from=k8s_client.
+ V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(
+ field_path="metadata.labels['pipeline/runid']")))
).add_env_variable(
k8s_client.V1EnvVar(
name='ENABLE_CACHING',
diff --git a/sdk/python/kfp/compiler/v2_compat.py b/sdk/python/kfp/compiler/v2_compat.py
--- a/sdk/python/kfp/compiler/v2_compat.py
+++ b/sdk/python/kfp/compiler/v2_compat.py
@@ -100,10 +100,16 @@
op.name,
"--pipeline_name",
pipeline_name,
- "--pipeline_run_id",
- "$(WORKFLOW_ID)",
- "--pipeline_task_id",
+ "--run_id",
+ "$(KFP_RUN_ID)",
+ "--run_resource",
+ "workflows.argoproj.io/$(WORKFLOW_ID)",
+ "--namespace",
+ "$(KFP_NAMESPACE)",
+ "--pod_name",
"$(KFP_POD_NAME)",
+ "--pod_uid",
+ "$(KFP_POD_UID)",
"--pipeline_root",
pipeline_root,
"--enable_caching",
| {"golden_diff": "diff --git a/sdk/python/kfp/compiler/_default_transformers.py b/sdk/python/kfp/compiler/_default_transformers.py\n--- a/sdk/python/kfp/compiler/_default_transformers.py\n+++ b/sdk/python/kfp/compiler/_default_transformers.py\n@@ -41,6 +41,11 @@\n value_from=k8s_client.V1EnvVarSource(\n field_ref=k8s_client.V1ObjectFieldSelector(\n field_path='metadata.name')))\n+ ).add_env_variable(\n+ k8s_client.V1EnvVar(name='KFP_POD_UID',\n+ value_from=k8s_client.V1EnvVarSource(\n+ field_ref=k8s_client.V1ObjectFieldSelector(\n+ field_path='metadata.uid')))\n ).add_env_variable(\n k8s_client.V1EnvVar(name='KFP_NAMESPACE',\n value_from=k8s_client.V1EnvVarSource(\n@@ -52,6 +57,12 @@\n value_from=k8s_client.\n V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(\n field_path=\"metadata.labels['workflows.argoproj.io/workflow']\")))\n+ ).add_env_variable(\n+ k8s_client.V1EnvVar(\n+ name='KFP_RUN_ID',\n+ value_from=k8s_client.\n+ V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(\n+ field_path=\"metadata.labels['pipeline/runid']\")))\n ).add_env_variable(\n k8s_client.V1EnvVar(\n name='ENABLE_CACHING',\ndiff --git a/sdk/python/kfp/compiler/v2_compat.py b/sdk/python/kfp/compiler/v2_compat.py\n--- a/sdk/python/kfp/compiler/v2_compat.py\n+++ b/sdk/python/kfp/compiler/v2_compat.py\n@@ -100,10 +100,16 @@\n op.name,\n \"--pipeline_name\",\n pipeline_name,\n- \"--pipeline_run_id\",\n- \"$(WORKFLOW_ID)\",\n- \"--pipeline_task_id\",\n+ \"--run_id\",\n+ \"$(KFP_RUN_ID)\",\n+ \"--run_resource\",\n+ \"workflows.argoproj.io/$(WORKFLOW_ID)\",\n+ \"--namespace\",\n+ \"$(KFP_NAMESPACE)\",\n+ \"--pod_name\",\n \"$(KFP_POD_NAME)\",\n+ \"--pod_uid\",\n+ \"$(KFP_POD_UID)\",\n \"--pipeline_root\",\n pipeline_root,\n \"--enable_caching\",\n", "issue": "[v2compat] re-evaluate execution custom properties schema\n* [ ] rename task execution `task_name` to `display_name`?\n", "before_files": [{"content": "# Copyright 2019 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom kubernetes import client as k8s_client\nfrom typing import Callable, Dict, Optional, Text\nfrom kfp.dsl._container_op import BaseOp, ContainerOp\n\ndef add_pod_env(op: BaseOp) -> BaseOp:\n \"\"\"Adds environment info if the Pod has the label `add-pod-env = true`.\n \"\"\"\n if isinstance(\n op, ContainerOp\n ) and op.pod_labels and 'add-pod-env' in op.pod_labels and op.pod_labels[\n 'add-pod-env'] == 'true':\n return add_kfp_pod_env(op)\n\n\ndef add_kfp_pod_env(op: BaseOp) -> BaseOp:\n \"\"\"Adds KFP pod environment info to the specified ContainerOp.\n \"\"\"\n if not isinstance(op, ContainerOp):\n warnings.warn(\n 'Trying to add default KFP environment variables to an Op that is '\n 'not a ContainerOp. Ignoring request.')\n return op\n\n op.container.add_env_variable(\n k8s_client.V1EnvVar(name='KFP_POD_NAME',\n value_from=k8s_client.V1EnvVarSource(\n field_ref=k8s_client.V1ObjectFieldSelector(\n field_path='metadata.name')))\n ).add_env_variable(\n k8s_client.V1EnvVar(name='KFP_NAMESPACE',\n value_from=k8s_client.V1EnvVarSource(\n field_ref=k8s_client.V1ObjectFieldSelector(\n field_path='metadata.namespace')))\n ).add_env_variable(\n k8s_client.V1EnvVar(\n name='WORKFLOW_ID',\n value_from=k8s_client.\n V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(\n field_path=\"metadata.labels['workflows.argoproj.io/workflow']\")))\n ).add_env_variable(\n k8s_client.V1EnvVar(\n name='ENABLE_CACHING',\n value_from=k8s_client.\n V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(\n field_path=\"metadata.labels['pipelines.kubeflow.org/enable_caching']\")))\n )\n return op\n\n\ndef add_pod_labels(labels: Optional[Dict] = None) -> Callable:\n \"\"\"Adds provided pod labels to each pod.\"\"\"\n\n def _add_pod_labels(task):\n for k, v in labels.items():\n # Only append but not update.\n # This is needed to bypass TFX pipelines/components.\n if k not in task.pod_labels:\n task.add_pod_label(k, v)\n return task\n\n return _add_pod_labels\n", "path": "sdk/python/kfp/compiler/_default_transformers.py"}, {"content": "# Copyright 2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility functions for enabling v2-compatible pipelines in v1.\"\"\"\nimport collections\nimport json\nfrom typing import Optional\n\nfrom kfp import dsl\nfrom kfp.compiler import _default_transformers\nfrom kfp.pipeline_spec import pipeline_spec_pb2\nfrom kfp.v2 import compiler\n\nfrom kubernetes import client as k8s_client\n\n_DEFAULT_LAUNCHER_IMAGE = \"gcr.io/ml-pipeline/kfp-launcher:1.6.4\"\n\n\ndef update_op(op: dsl.ContainerOp,\n pipeline_name: dsl.PipelineParam,\n pipeline_root: dsl.PipelineParam,\n launcher_image: Optional[str] = None) -> None:\n \"\"\"Updates the passed in Op for running in v2-compatible mode.\n\n Args:\n op: The Op to update.\n pipeline_spec: The PipelineSpec for the pipeline under which `op`\n runs.\n pipeline_root: The root output directory for pipeline artifacts.\n launcher_image: An optional launcher image. Useful for tests.\n \"\"\"\n op.is_v2 = True\n # Inject the launcher binary and overwrite the entrypoint.\n image_name = launcher_image or _DEFAULT_LAUNCHER_IMAGE\n launcher_container = dsl.UserContainer(name=\"kfp-launcher\",\n image=image_name,\n command=\"/bin/mount_launcher.sh\",\n mirror_volume_mounts=True)\n\n op.add_init_container(launcher_container)\n op.add_volume(k8s_client.V1Volume(name='kfp-launcher'))\n op.add_volume_mount(\n k8s_client.V1VolumeMount(name='kfp-launcher', mount_path='/kfp-launcher'))\n\n # op.command + op.args will have the following sections:\n # 1. args passed to kfp-launcher\n # 2. a separator \"--\"\n # 3. parameters in format \"key1=value1\", \"key2=value2\", ...\n # 4. a separator \"--\" as end of arguments passed to launcher\n # 5. (start of op.args) arguments of the original user program command + args\n #\n # example:\n # - command:\n # - /kfp-launcher/launch\n # - '--mlmd_server_address'\n # - $(METADATA_GRPC_SERVICE_HOST)\n # - '--mlmd_server_port'\n # - $(METADATA_GRPC_SERVICE_PORT)\n # - ... # more launcher params\n # - '--pipeline_task_id'\n # - $(KFP_POD_NAME)\n # - '--pipeline_root'\n # - ''\n # - '--' # start of parameter values\n # - first=first\n # - second=second\n # - '--' # start of user command and args\n # args:\n # - sh\n # - '-ec'\n # - |\n # program_path=$(mktemp)\n # printf \"%s\" \"$0\" > \"$program_path\"\n # python3 -u \"$program_path\" \"$@\"\n # - >\n # import json\n # import xxx\n # ...\n op.command = [\n \"/kfp-launcher/launch\",\n \"--mlmd_server_address\",\n \"$(METADATA_GRPC_SERVICE_HOST)\",\n \"--mlmd_server_port\",\n \"$(METADATA_GRPC_SERVICE_PORT)\",\n \"--runtime_info_json\",\n \"$(KFP_V2_RUNTIME_INFO)\",\n \"--container_image\",\n \"$(KFP_V2_IMAGE)\",\n \"--task_name\",\n op.name,\n \"--pipeline_name\",\n pipeline_name,\n \"--pipeline_run_id\",\n \"$(WORKFLOW_ID)\",\n \"--pipeline_task_id\",\n \"$(KFP_POD_NAME)\",\n \"--pipeline_root\",\n pipeline_root,\n \"--enable_caching\",\n \"$(ENABLE_CACHING)\",\n ]\n\n # Mount necessary environment variables.\n op.apply(_default_transformers.add_kfp_pod_env)\n op.container.add_env_variable(\n k8s_client.V1EnvVar(name=\"KFP_V2_IMAGE\", value=op.container.image))\n\n config_map_ref = k8s_client.V1ConfigMapEnvSource(\n name='metadata-grpc-configmap', optional=True)\n op.container.add_env_from(\n k8s_client.V1EnvFromSource(config_map_ref=config_map_ref))\n\n op.arguments = list(op.container_spec.command) + list(op.container_spec.args)\n\n runtime_info = {\n \"inputParameters\": collections.OrderedDict(),\n \"inputArtifacts\": collections.OrderedDict(),\n \"outputParameters\": collections.OrderedDict(),\n \"outputArtifacts\": collections.OrderedDict(),\n }\n\n op.command += [\"--\"]\n component_spec = op.component_spec\n for parameter, spec in sorted(\n component_spec.input_definitions.parameters.items()):\n parameter_info = {\n \"type\":\n pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),\n }\n op.command += [f\"{parameter}={op._parameter_arguments[parameter]}\"]\n runtime_info[\"inputParameters\"][parameter] = parameter_info\n op.command += [\"--\"]\n\n for artifact_name, spec in sorted(\n component_spec.input_definitions.artifacts.items()):\n artifact_info = {\n \"metadataPath\": op.input_artifact_paths[artifact_name],\n \"schemaTitle\": spec.artifact_type.schema_title,\n \"instanceSchema\": spec.artifact_type.instance_schema,\n }\n runtime_info[\"inputArtifacts\"][artifact_name] = artifact_info\n\n for parameter, spec in sorted(\n component_spec.output_definitions.parameters.items()):\n parameter_info = {\n \"type\":\n pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),\n \"path\":\n op.file_outputs[parameter],\n }\n runtime_info[\"outputParameters\"][parameter] = parameter_info\n\n for artifact_name, spec in sorted(\n component_spec.output_definitions.artifacts.items()):\n # TODO: Assert instance_schema.\n artifact_info = {\n # Type used to register output artifacts.\n \"schemaTitle\": spec.artifact_type.schema_title,\n \"instanceSchema\": spec.artifact_type.instance_schema,\n # File used to write out the registered artifact ID.\n \"metadataPath\": op.file_outputs[artifact_name],\n }\n runtime_info[\"outputArtifacts\"][artifact_name] = artifact_info\n\n op.container.add_env_variable(\n k8s_client.V1EnvVar(name=\"KFP_V2_RUNTIME_INFO\",\n value=json.dumps(runtime_info)))\n\n op.pod_annotations['pipelines.kubeflow.org/v2_component'] = \"true\"\n op.pod_labels['pipelines.kubeflow.org/v2_component']= \"true\"\n", "path": "sdk/python/kfp/compiler/v2_compat.py"}], "after_files": [{"content": "# Copyright 2019 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom kubernetes import client as k8s_client\nfrom typing import Callable, Dict, Optional, Text\nfrom kfp.dsl._container_op import BaseOp, ContainerOp\n\ndef add_pod_env(op: BaseOp) -> BaseOp:\n \"\"\"Adds environment info if the Pod has the label `add-pod-env = true`.\n \"\"\"\n if isinstance(\n op, ContainerOp\n ) and op.pod_labels and 'add-pod-env' in op.pod_labels and op.pod_labels[\n 'add-pod-env'] == 'true':\n return add_kfp_pod_env(op)\n\n\ndef add_kfp_pod_env(op: BaseOp) -> BaseOp:\n \"\"\"Adds KFP pod environment info to the specified ContainerOp.\n \"\"\"\n if not isinstance(op, ContainerOp):\n warnings.warn(\n 'Trying to add default KFP environment variables to an Op that is '\n 'not a ContainerOp. Ignoring request.')\n return op\n\n op.container.add_env_variable(\n k8s_client.V1EnvVar(name='KFP_POD_NAME',\n value_from=k8s_client.V1EnvVarSource(\n field_ref=k8s_client.V1ObjectFieldSelector(\n field_path='metadata.name')))\n ).add_env_variable(\n k8s_client.V1EnvVar(name='KFP_POD_UID',\n value_from=k8s_client.V1EnvVarSource(\n field_ref=k8s_client.V1ObjectFieldSelector(\n field_path='metadata.uid')))\n ).add_env_variable(\n k8s_client.V1EnvVar(name='KFP_NAMESPACE',\n value_from=k8s_client.V1EnvVarSource(\n field_ref=k8s_client.V1ObjectFieldSelector(\n field_path='metadata.namespace')))\n ).add_env_variable(\n k8s_client.V1EnvVar(\n name='WORKFLOW_ID',\n value_from=k8s_client.\n V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(\n field_path=\"metadata.labels['workflows.argoproj.io/workflow']\")))\n ).add_env_variable(\n k8s_client.V1EnvVar(\n name='KFP_RUN_ID',\n value_from=k8s_client.\n V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(\n field_path=\"metadata.labels['pipeline/runid']\")))\n ).add_env_variable(\n k8s_client.V1EnvVar(\n name='ENABLE_CACHING',\n value_from=k8s_client.\n V1EnvVarSource(field_ref=k8s_client.V1ObjectFieldSelector(\n field_path=\"metadata.labels['pipelines.kubeflow.org/enable_caching']\")))\n )\n return op\n\n\ndef add_pod_labels(labels: Optional[Dict] = None) -> Callable:\n \"\"\"Adds provided pod labels to each pod.\"\"\"\n\n def _add_pod_labels(task):\n for k, v in labels.items():\n # Only append but not update.\n # This is needed to bypass TFX pipelines/components.\n if k not in task.pod_labels:\n task.add_pod_label(k, v)\n return task\n\n return _add_pod_labels\n", "path": "sdk/python/kfp/compiler/_default_transformers.py"}, {"content": "# Copyright 2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility functions for enabling v2-compatible pipelines in v1.\"\"\"\nimport collections\nimport json\nfrom typing import Optional\n\nfrom kfp import dsl\nfrom kfp.compiler import _default_transformers\nfrom kfp.pipeline_spec import pipeline_spec_pb2\nfrom kfp.v2 import compiler\n\nfrom kubernetes import client as k8s_client\n\n_DEFAULT_LAUNCHER_IMAGE = \"gcr.io/ml-pipeline/kfp-launcher:1.6.4\"\n\n\ndef update_op(op: dsl.ContainerOp,\n pipeline_name: dsl.PipelineParam,\n pipeline_root: dsl.PipelineParam,\n launcher_image: Optional[str] = None) -> None:\n \"\"\"Updates the passed in Op for running in v2-compatible mode.\n\n Args:\n op: The Op to update.\n pipeline_spec: The PipelineSpec for the pipeline under which `op`\n runs.\n pipeline_root: The root output directory for pipeline artifacts.\n launcher_image: An optional launcher image. Useful for tests.\n \"\"\"\n op.is_v2 = True\n # Inject the launcher binary and overwrite the entrypoint.\n image_name = launcher_image or _DEFAULT_LAUNCHER_IMAGE\n launcher_container = dsl.UserContainer(name=\"kfp-launcher\",\n image=image_name,\n command=\"/bin/mount_launcher.sh\",\n mirror_volume_mounts=True)\n\n op.add_init_container(launcher_container)\n op.add_volume(k8s_client.V1Volume(name='kfp-launcher'))\n op.add_volume_mount(\n k8s_client.V1VolumeMount(name='kfp-launcher', mount_path='/kfp-launcher'))\n\n # op.command + op.args will have the following sections:\n # 1. args passed to kfp-launcher\n # 2. a separator \"--\"\n # 3. parameters in format \"key1=value1\", \"key2=value2\", ...\n # 4. a separator \"--\" as end of arguments passed to launcher\n # 5. (start of op.args) arguments of the original user program command + args\n #\n # example:\n # - command:\n # - /kfp-launcher/launch\n # - '--mlmd_server_address'\n # - $(METADATA_GRPC_SERVICE_HOST)\n # - '--mlmd_server_port'\n # - $(METADATA_GRPC_SERVICE_PORT)\n # - ... # more launcher params\n # - '--pipeline_task_id'\n # - $(KFP_POD_NAME)\n # - '--pipeline_root'\n # - ''\n # - '--' # start of parameter values\n # - first=first\n # - second=second\n # - '--' # start of user command and args\n # args:\n # - sh\n # - '-ec'\n # - |\n # program_path=$(mktemp)\n # printf \"%s\" \"$0\" > \"$program_path\"\n # python3 -u \"$program_path\" \"$@\"\n # - >\n # import json\n # import xxx\n # ...\n op.command = [\n \"/kfp-launcher/launch\",\n \"--mlmd_server_address\",\n \"$(METADATA_GRPC_SERVICE_HOST)\",\n \"--mlmd_server_port\",\n \"$(METADATA_GRPC_SERVICE_PORT)\",\n \"--runtime_info_json\",\n \"$(KFP_V2_RUNTIME_INFO)\",\n \"--container_image\",\n \"$(KFP_V2_IMAGE)\",\n \"--task_name\",\n op.name,\n \"--pipeline_name\",\n pipeline_name,\n \"--run_id\",\n \"$(KFP_RUN_ID)\",\n \"--run_resource\",\n \"workflows.argoproj.io/$(WORKFLOW_ID)\",\n \"--namespace\",\n \"$(KFP_NAMESPACE)\",\n \"--pod_name\",\n \"$(KFP_POD_NAME)\",\n \"--pod_uid\",\n \"$(KFP_POD_UID)\",\n \"--pipeline_root\",\n pipeline_root,\n \"--enable_caching\",\n \"$(ENABLE_CACHING)\",\n ]\n\n # Mount necessary environment variables.\n op.apply(_default_transformers.add_kfp_pod_env)\n op.container.add_env_variable(\n k8s_client.V1EnvVar(name=\"KFP_V2_IMAGE\", value=op.container.image))\n\n config_map_ref = k8s_client.V1ConfigMapEnvSource(\n name='metadata-grpc-configmap', optional=True)\n op.container.add_env_from(\n k8s_client.V1EnvFromSource(config_map_ref=config_map_ref))\n\n op.arguments = list(op.container_spec.command) + list(op.container_spec.args)\n\n runtime_info = {\n \"inputParameters\": collections.OrderedDict(),\n \"inputArtifacts\": collections.OrderedDict(),\n \"outputParameters\": collections.OrderedDict(),\n \"outputArtifacts\": collections.OrderedDict(),\n }\n\n op.command += [\"--\"]\n component_spec = op.component_spec\n for parameter, spec in sorted(\n component_spec.input_definitions.parameters.items()):\n parameter_info = {\n \"type\":\n pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),\n }\n op.command += [f\"{parameter}={op._parameter_arguments[parameter]}\"]\n runtime_info[\"inputParameters\"][parameter] = parameter_info\n op.command += [\"--\"]\n\n for artifact_name, spec in sorted(\n component_spec.input_definitions.artifacts.items()):\n artifact_info = {\n \"metadataPath\": op.input_artifact_paths[artifact_name],\n \"schemaTitle\": spec.artifact_type.schema_title,\n \"instanceSchema\": spec.artifact_type.instance_schema,\n }\n runtime_info[\"inputArtifacts\"][artifact_name] = artifact_info\n\n for parameter, spec in sorted(\n component_spec.output_definitions.parameters.items()):\n parameter_info = {\n \"type\":\n pipeline_spec_pb2.PrimitiveType.PrimitiveTypeEnum.Name(spec.type),\n \"path\":\n op.file_outputs[parameter],\n }\n runtime_info[\"outputParameters\"][parameter] = parameter_info\n\n for artifact_name, spec in sorted(\n component_spec.output_definitions.artifacts.items()):\n # TODO: Assert instance_schema.\n artifact_info = {\n # Type used to register output artifacts.\n \"schemaTitle\": spec.artifact_type.schema_title,\n \"instanceSchema\": spec.artifact_type.instance_schema,\n # File used to write out the registered artifact ID.\n \"metadataPath\": op.file_outputs[artifact_name],\n }\n runtime_info[\"outputArtifacts\"][artifact_name] = artifact_info\n\n op.container.add_env_variable(\n k8s_client.V1EnvVar(name=\"KFP_V2_RUNTIME_INFO\",\n value=json.dumps(runtime_info)))\n\n op.pod_annotations['pipelines.kubeflow.org/v2_component'] = \"true\"\n op.pod_labels['pipelines.kubeflow.org/v2_component']= \"true\"\n", "path": "sdk/python/kfp/compiler/v2_compat.py"}]} | 3,107 | 530 |
gh_patches_debug_12946 | rasdani/github-patches | git_diff | awslabs__gluonts-1884 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeepAR with NegativeBinomial cannot generate values above 1e6
## Description
A DeepAR model with NegativeBinomial output distribution cannot generate values significantly above 1e6.
## To Reproduce
I attach a jupyter notebook where I generate artificial timeseries with values between 0 and 1e8, train a model and plot the forecast. I compressed the notebook with zip as .ipynb files are not supported as attachments.
[1e6.ipynb.zip](https://github.com/awslabs/gluon-ts/files/8069187/1e6.ipynb.zip)
## Error message or code output
Please see the attached notebook.

## Environment
- Operating system: Ubuntu 20.04, linux kernel 5.13.0-28-generic
- Python version: 3.8.10
- GluonTS version: 0.8.1
- MXNet version: 1.9.0
I vaguely recall that
I observed this issue alredy in gluonts versions 0.4.x.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gluonts/mx/distribution/neg_binomial.py`
Content:
```
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License").
4 # You may not use this file except in compliance with the License.
5 # A copy of the License is located at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # or in the "license" file accompanying this file. This file is distributed
10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 # express or implied. See the License for the specific language governing
12 # permissions and limitations under the License.
13
14 from typing import Dict, List, Optional, Tuple
15
16 import numpy as np
17
18 from gluonts.core.component import validated
19 from gluonts.mx import Tensor
20
21 from .deterministic import DeterministicOutput
22 from .distribution import Distribution, _sample_multiple, getF, softplus
23 from .distribution_output import DistributionOutput
24 from .mixture import MixtureDistributionOutput
25
26
27 class NegativeBinomial(Distribution):
28 r"""
29 Negative binomial distribution, i.e. the distribution of the number of
30 successes in a sequence of independent Bernoulli trials.
31
32 Parameters
33 ----------
34 mu
35 Tensor containing the means, of shape `(*batch_shape, *event_shape)`.
36 alpha
37 Tensor of the shape parameters, of shape `(*batch_shape, *event_shape)`.
38 F
39 """
40
41 is_reparameterizable = False
42
43 @validated()
44 def __init__(self, mu: Tensor, alpha: Tensor) -> None:
45 self.mu = mu
46 self.alpha = alpha
47
48 @property
49 def F(self):
50 return getF(self.mu)
51
52 @property
53 def batch_shape(self) -> Tuple:
54 return self.mu.shape
55
56 @property
57 def event_shape(self) -> Tuple:
58 return ()
59
60 @property
61 def event_dim(self) -> int:
62 return 0
63
64 def log_prob(self, x: Tensor) -> Tensor:
65 alphaInv = 1.0 / self.alpha
66 alpha_times_mu = self.alpha * self.mu
67 F = self.F
68 ll = (
69 x * F.log(alpha_times_mu / (1.0 + alpha_times_mu))
70 - alphaInv * F.log1p(alpha_times_mu)
71 + F.gammaln(x + alphaInv)
72 - F.gammaln(x + 1.0)
73 - F.gammaln(alphaInv)
74 )
75 return ll
76
77 @property
78 def mean(self) -> Tensor:
79 return self.mu
80
81 @property
82 def stddev(self) -> Tensor:
83 return self.F.sqrt(self.mu * (1.0 + self.mu * self.alpha))
84
85 def sample(
86 self, num_samples: Optional[int] = None, dtype=np.float32
87 ) -> Tensor:
88 def s(mu: Tensor, alpha: Tensor) -> Tensor:
89 F = self.F
90 tol = 1e-5
91 r = 1.0 / alpha
92 theta = alpha * mu
93 r = F.minimum(F.maximum(tol, r), 1e10)
94 theta = F.minimum(F.maximum(tol, theta), 1e10)
95 x = F.minimum(F.random.gamma(r, theta), 1e6)
96 return F.random.poisson(lam=x, dtype=dtype)
97
98 return _sample_multiple(
99 s, mu=self.mu, alpha=self.alpha, num_samples=num_samples
100 )
101
102 @property
103 def args(self) -> List:
104 return [self.mu, self.alpha]
105
106
107 class NegativeBinomialOutput(DistributionOutput):
108 args_dim: Dict[str, int] = {"mu": 1, "alpha": 1}
109 distr_cls: type = NegativeBinomial
110
111 @classmethod
112 def domain_map(cls, F, mu, alpha):
113 epsilon = np.finfo(cls._dtype).eps # machine epsilon
114
115 mu = softplus(F, mu) + epsilon
116 alpha = softplus(F, alpha) + epsilon
117 return mu.squeeze(axis=-1), alpha.squeeze(axis=-1)
118
119 # Overwrites the parent class method.
120 # We cannot scale using the affine transformation since negative binomial should return integers.
121 # Instead we scale the parameters.
122 def distribution(
123 self,
124 distr_args,
125 loc: Optional[Tensor] = None,
126 scale: Optional[Tensor] = None,
127 ) -> NegativeBinomial:
128 mu, alpha = distr_args
129 if scale is None:
130 return NegativeBinomial(mu, alpha)
131 else:
132 F = getF(mu)
133 mu = F.broadcast_mul(mu, scale)
134 return NegativeBinomial(mu, alpha, F)
135
136 @property
137 def event_shape(self) -> Tuple:
138 return ()
139
140
141 def ZeroInflatedNegativeBinomialOutput() -> MixtureDistributionOutput:
142 return MixtureDistributionOutput(
143 distr_outputs=[NegativeBinomialOutput(), DeterministicOutput(0)]
144 )
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/gluonts/mx/distribution/neg_binomial.py b/src/gluonts/mx/distribution/neg_binomial.py
--- a/src/gluonts/mx/distribution/neg_binomial.py
+++ b/src/gluonts/mx/distribution/neg_binomial.py
@@ -87,13 +87,9 @@
) -> Tensor:
def s(mu: Tensor, alpha: Tensor) -> Tensor:
F = self.F
- tol = 1e-5
r = 1.0 / alpha
theta = alpha * mu
- r = F.minimum(F.maximum(tol, r), 1e10)
- theta = F.minimum(F.maximum(tol, theta), 1e10)
- x = F.minimum(F.random.gamma(r, theta), 1e6)
- return F.random.poisson(lam=x, dtype=dtype)
+ return F.random.poisson(lam=F.random.gamma(r, theta), dtype=dtype)
return _sample_multiple(
s, mu=self.mu, alpha=self.alpha, num_samples=num_samples
| {"golden_diff": "diff --git a/src/gluonts/mx/distribution/neg_binomial.py b/src/gluonts/mx/distribution/neg_binomial.py\n--- a/src/gluonts/mx/distribution/neg_binomial.py\n+++ b/src/gluonts/mx/distribution/neg_binomial.py\n@@ -87,13 +87,9 @@\n ) -> Tensor:\n def s(mu: Tensor, alpha: Tensor) -> Tensor:\n F = self.F\n- tol = 1e-5\n r = 1.0 / alpha\n theta = alpha * mu\n- r = F.minimum(F.maximum(tol, r), 1e10)\n- theta = F.minimum(F.maximum(tol, theta), 1e10)\n- x = F.minimum(F.random.gamma(r, theta), 1e6)\n- return F.random.poisson(lam=x, dtype=dtype)\n+ return F.random.poisson(lam=F.random.gamma(r, theta), dtype=dtype)\n \n return _sample_multiple(\n s, mu=self.mu, alpha=self.alpha, num_samples=num_samples\n", "issue": "DeepAR with NegativeBinomial cannot generate values above 1e6\n## Description\r\nA DeepAR model with NegativeBinomial output distribution cannot generate values significantly above 1e6.\r\n\r\n## To Reproduce\r\nI attach a jupyter notebook where I generate artificial timeseries with values between 0 and 1e8, train a model and plot the forecast. I compressed the notebook with zip as .ipynb files are not supported as attachments.\r\n\r\n[1e6.ipynb.zip](https://github.com/awslabs/gluon-ts/files/8069187/1e6.ipynb.zip)\r\n\r\n## Error message or code output\r\nPlease see the attached notebook.\r\n\r\n\r\n\r\n## Environment\r\n- Operating system: Ubuntu 20.04, linux kernel 5.13.0-28-generic\r\n- Python version: 3.8.10\r\n- GluonTS version: 0.8.1\r\n- MXNet version: 1.9.0\r\n\r\nI vaguely recall that \r\nI observed this issue alredy in gluonts versions 0.4.x.\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\n\nfrom gluonts.core.component import validated\nfrom gluonts.mx import Tensor\n\nfrom .deterministic import DeterministicOutput\nfrom .distribution import Distribution, _sample_multiple, getF, softplus\nfrom .distribution_output import DistributionOutput\nfrom .mixture import MixtureDistributionOutput\n\n\nclass NegativeBinomial(Distribution):\n r\"\"\"\n Negative binomial distribution, i.e. the distribution of the number of\n successes in a sequence of independent Bernoulli trials.\n\n Parameters\n ----------\n mu\n Tensor containing the means, of shape `(*batch_shape, *event_shape)`.\n alpha\n Tensor of the shape parameters, of shape `(*batch_shape, *event_shape)`.\n F\n \"\"\"\n\n is_reparameterizable = False\n\n @validated()\n def __init__(self, mu: Tensor, alpha: Tensor) -> None:\n self.mu = mu\n self.alpha = alpha\n\n @property\n def F(self):\n return getF(self.mu)\n\n @property\n def batch_shape(self) -> Tuple:\n return self.mu.shape\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n @property\n def event_dim(self) -> int:\n return 0\n\n def log_prob(self, x: Tensor) -> Tensor:\n alphaInv = 1.0 / self.alpha\n alpha_times_mu = self.alpha * self.mu\n F = self.F\n ll = (\n x * F.log(alpha_times_mu / (1.0 + alpha_times_mu))\n - alphaInv * F.log1p(alpha_times_mu)\n + F.gammaln(x + alphaInv)\n - F.gammaln(x + 1.0)\n - F.gammaln(alphaInv)\n )\n return ll\n\n @property\n def mean(self) -> Tensor:\n return self.mu\n\n @property\n def stddev(self) -> Tensor:\n return self.F.sqrt(self.mu * (1.0 + self.mu * self.alpha))\n\n def sample(\n self, num_samples: Optional[int] = None, dtype=np.float32\n ) -> Tensor:\n def s(mu: Tensor, alpha: Tensor) -> Tensor:\n F = self.F\n tol = 1e-5\n r = 1.0 / alpha\n theta = alpha * mu\n r = F.minimum(F.maximum(tol, r), 1e10)\n theta = F.minimum(F.maximum(tol, theta), 1e10)\n x = F.minimum(F.random.gamma(r, theta), 1e6)\n return F.random.poisson(lam=x, dtype=dtype)\n\n return _sample_multiple(\n s, mu=self.mu, alpha=self.alpha, num_samples=num_samples\n )\n\n @property\n def args(self) -> List:\n return [self.mu, self.alpha]\n\n\nclass NegativeBinomialOutput(DistributionOutput):\n args_dim: Dict[str, int] = {\"mu\": 1, \"alpha\": 1}\n distr_cls: type = NegativeBinomial\n\n @classmethod\n def domain_map(cls, F, mu, alpha):\n epsilon = np.finfo(cls._dtype).eps # machine epsilon\n\n mu = softplus(F, mu) + epsilon\n alpha = softplus(F, alpha) + epsilon\n return mu.squeeze(axis=-1), alpha.squeeze(axis=-1)\n\n # Overwrites the parent class method.\n # We cannot scale using the affine transformation since negative binomial should return integers.\n # Instead we scale the parameters.\n def distribution(\n self,\n distr_args,\n loc: Optional[Tensor] = None,\n scale: Optional[Tensor] = None,\n ) -> NegativeBinomial:\n mu, alpha = distr_args\n if scale is None:\n return NegativeBinomial(mu, alpha)\n else:\n F = getF(mu)\n mu = F.broadcast_mul(mu, scale)\n return NegativeBinomial(mu, alpha, F)\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n\ndef ZeroInflatedNegativeBinomialOutput() -> MixtureDistributionOutput:\n return MixtureDistributionOutput(\n distr_outputs=[NegativeBinomialOutput(), DeterministicOutput(0)]\n )\n", "path": "src/gluonts/mx/distribution/neg_binomial.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\n\nfrom gluonts.core.component import validated\nfrom gluonts.mx import Tensor\n\nfrom .deterministic import DeterministicOutput\nfrom .distribution import Distribution, _sample_multiple, getF, softplus\nfrom .distribution_output import DistributionOutput\nfrom .mixture import MixtureDistributionOutput\n\n\nclass NegativeBinomial(Distribution):\n r\"\"\"\n Negative binomial distribution, i.e. the distribution of the number of\n successes in a sequence of independent Bernoulli trials.\n\n Parameters\n ----------\n mu\n Tensor containing the means, of shape `(*batch_shape, *event_shape)`.\n alpha\n Tensor of the shape parameters, of shape `(*batch_shape, *event_shape)`.\n F\n \"\"\"\n\n is_reparameterizable = False\n\n @validated()\n def __init__(self, mu: Tensor, alpha: Tensor) -> None:\n self.mu = mu\n self.alpha = alpha\n\n @property\n def F(self):\n return getF(self.mu)\n\n @property\n def batch_shape(self) -> Tuple:\n return self.mu.shape\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n @property\n def event_dim(self) -> int:\n return 0\n\n def log_prob(self, x: Tensor) -> Tensor:\n alphaInv = 1.0 / self.alpha\n alpha_times_mu = self.alpha * self.mu\n F = self.F\n ll = (\n x * F.log(alpha_times_mu / (1.0 + alpha_times_mu))\n - alphaInv * F.log1p(alpha_times_mu)\n + F.gammaln(x + alphaInv)\n - F.gammaln(x + 1.0)\n - F.gammaln(alphaInv)\n )\n return ll\n\n @property\n def mean(self) -> Tensor:\n return self.mu\n\n @property\n def stddev(self) -> Tensor:\n return self.F.sqrt(self.mu * (1.0 + self.mu * self.alpha))\n\n def sample(\n self, num_samples: Optional[int] = None, dtype=np.float32\n ) -> Tensor:\n def s(mu: Tensor, alpha: Tensor) -> Tensor:\n F = self.F\n r = 1.0 / alpha\n theta = alpha * mu\n return F.random.poisson(lam=F.random.gamma(r, theta), dtype=dtype)\n\n return _sample_multiple(\n s, mu=self.mu, alpha=self.alpha, num_samples=num_samples\n )\n\n @property\n def args(self) -> List:\n return [self.mu, self.alpha]\n\n\nclass NegativeBinomialOutput(DistributionOutput):\n args_dim: Dict[str, int] = {\"mu\": 1, \"alpha\": 1}\n distr_cls: type = NegativeBinomial\n\n @classmethod\n def domain_map(cls, F, mu, alpha):\n epsilon = np.finfo(cls._dtype).eps # machine epsilon\n\n mu = softplus(F, mu) + epsilon\n alpha = softplus(F, alpha) + epsilon\n return mu.squeeze(axis=-1), alpha.squeeze(axis=-1)\n\n # Overwrites the parent class method.\n # We cannot scale using the affine transformation since negative binomial should return integers.\n # Instead we scale the parameters.\n def distribution(\n self,\n distr_args,\n loc: Optional[Tensor] = None,\n scale: Optional[Tensor] = None,\n ) -> NegativeBinomial:\n mu, alpha = distr_args\n if scale is None:\n return NegativeBinomial(mu, alpha)\n else:\n F = getF(mu)\n mu = F.broadcast_mul(mu, scale)\n return NegativeBinomial(mu, alpha, F)\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n\ndef ZeroInflatedNegativeBinomialOutput() -> MixtureDistributionOutput:\n return MixtureDistributionOutput(\n distr_outputs=[NegativeBinomialOutput(), DeterministicOutput(0)]\n )\n", "path": "src/gluonts/mx/distribution/neg_binomial.py"}]} | 1,978 | 247 |
gh_patches_debug_16813 | rasdani/github-patches | git_diff | nautobot__nautobot-5593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Plugins not loaded with Gunicorn
### Environment
* Nautobot version (Docker tag too if applicable): 2.2.1
* Python version: 3.11
* Database platform, version: psql
* Middleware(s):
### Steps to Reproduce
1. Use systemd
2. With gunicorn 21.2.0 or 22.0.0
<!-- What did you expect to happen? -->
### Expected Behavior
All applications to show
### Observed Behavior
We attempted to upgrade our dev environment from 2.1.9 to 2.2.1 but are hitting a weird issue where our plugins are reported as missing. We are only loading 1 or 2 basic plugins right now while we work on updating all our other plugins for 2.x. Oddly we are only seeing this issue on 1 out of 3 identical servers with identical Nautobot installs.
This looks very much like this issue from 2021: [Plugin Load Failure · Issue #95 · nautobot/nautobot (github.com)](https://github.com/nautobot/nautobot/issues/95)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/core/wsgi.py`
Content:
```
1 import logging
2 import os
3
4 from django.core import cache
5 from django.core.wsgi import get_wsgi_application
6 from django.db import connections
7
8 os.environ["DJANGO_SETTINGS_MODULE"] = "nautobot_config"
9
10 # Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,
11 # will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded
12 # already. Therefore, `settings.WEBSERVER_WARMUP` to `False` for this code to be loaded.
13 try:
14 import uwsgidecorators
15
16 @uwsgidecorators.postfork
17 def fix_uwsgi():
18 import uwsgi
19
20 logging.getLogger(__name__).info(
21 f"Closing existing DB and cache connections on worker {uwsgi.worker_id()} after uWSGI forked ..."
22 )
23 connections.close_all()
24 cache.close_caches()
25
26 except ImportError:
27 pass
28
29 application = get_wsgi_application()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/core/wsgi.py b/nautobot/core/wsgi.py
--- a/nautobot/core/wsgi.py
+++ b/nautobot/core/wsgi.py
@@ -1,11 +1,18 @@
import logging
-import os
from django.core import cache
from django.core.wsgi import get_wsgi_application
from django.db import connections
-os.environ["DJANGO_SETTINGS_MODULE"] = "nautobot_config"
+import nautobot
+
+# This is the Django default left here for visibility on how the Nautobot pattern
+# differs.
+# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nautobot.core.settings")
+
+# Instead of just pointing to `DJANGO_SETTINGS_MODULE` and letting Django run with it,
+# we're using the custom Nautobot loader code to read environment or config path for us.
+nautobot.setup()
# Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,
# will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded
| {"golden_diff": "diff --git a/nautobot/core/wsgi.py b/nautobot/core/wsgi.py\n--- a/nautobot/core/wsgi.py\n+++ b/nautobot/core/wsgi.py\n@@ -1,11 +1,18 @@\n import logging\n-import os\n \n from django.core import cache\n from django.core.wsgi import get_wsgi_application\n from django.db import connections\n \n-os.environ[\"DJANGO_SETTINGS_MODULE\"] = \"nautobot_config\"\n+import nautobot\n+\n+# This is the Django default left here for visibility on how the Nautobot pattern\n+# differs.\n+# os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"nautobot.core.settings\")\n+\n+# Instead of just pointing to `DJANGO_SETTINGS_MODULE` and letting Django run with it,\n+# we're using the custom Nautobot loader code to read environment or config path for us.\n+nautobot.setup()\n \n # Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,\n # will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded\n", "issue": "Plugins not loaded with Gunicorn\n\r\n### Environment\r\n\r\n* Nautobot version (Docker tag too if applicable): 2.2.1\r\n* Python version: 3.11\r\n* Database platform, version: psql\r\n* Middleware(s):\r\n\r\n\r\n### Steps to Reproduce\r\n1. Use systemd\r\n2. With gunicorn 21.2.0 or 22.0.0\r\n\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\nAll applications to show \r\n\r\n### Observed Behavior\r\n\r\nWe attempted to upgrade our dev environment from 2.1.9 to 2.2.1 but are hitting a weird issue where our plugins are reported as missing. We are only loading 1 or 2 basic plugins right now while we work on updating all our other plugins for 2.x. Oddly we are only seeing this issue on 1 out of 3 identical servers with identical Nautobot installs.\r\n\r\nThis looks very much like this issue from 2021: [Plugin Load Failure \u00b7 Issue #95 \u00b7 nautobot/nautobot (github.com)](https://github.com/nautobot/nautobot/issues/95)\n", "before_files": [{"content": "import logging\nimport os\n\nfrom django.core import cache\nfrom django.core.wsgi import get_wsgi_application\nfrom django.db import connections\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"nautobot_config\"\n\n# Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,\n# will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded\n# already. Therefore, `settings.WEBSERVER_WARMUP` to `False` for this code to be loaded.\ntry:\n import uwsgidecorators\n\n @uwsgidecorators.postfork\n def fix_uwsgi():\n import uwsgi\n\n logging.getLogger(__name__).info(\n f\"Closing existing DB and cache connections on worker {uwsgi.worker_id()} after uWSGI forked ...\"\n )\n connections.close_all()\n cache.close_caches()\n\nexcept ImportError:\n pass\n\napplication = get_wsgi_application()\n", "path": "nautobot/core/wsgi.py"}], "after_files": [{"content": "import logging\n\nfrom django.core import cache\nfrom django.core.wsgi import get_wsgi_application\nfrom django.db import connections\n\nimport nautobot\n\n# This is the Django default left here for visibility on how the Nautobot pattern\n# differs.\n# os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"nautobot.core.settings\")\n\n# Instead of just pointing to `DJANGO_SETTINGS_MODULE` and letting Django run with it,\n# we're using the custom Nautobot loader code to read environment or config path for us.\nnautobot.setup()\n\n# Use try/except because we might not be running uWSGI. If `settings.WEBSERVER_WARMUP` is `True`,\n# will first call `get_internal_wsgi_application` which does not have `uwsgi` module loaded\n# already. Therefore, `settings.WEBSERVER_WARMUP` to `False` for this code to be loaded.\ntry:\n import uwsgidecorators\n\n @uwsgidecorators.postfork\n def fix_uwsgi():\n import uwsgi\n\n logging.getLogger(__name__).info(\n f\"Closing existing DB and cache connections on worker {uwsgi.worker_id()} after uWSGI forked ...\"\n )\n connections.close_all()\n cache.close_caches()\n\nexcept ImportError:\n pass\n\napplication = get_wsgi_application()\n", "path": "nautobot/core/wsgi.py"}]} | 779 | 239 |
gh_patches_debug_22558 | rasdani/github-patches | git_diff | sublimelsp__LSP-925 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Forced-break in hover popup can break syntax highlighting
Using: OSX / typescript-language-server
Line-break, that LSP forces in the popup, can cause syntax highlighting to break. For example, if breaking a plain string in JS syntax.
<img width="512" alt="line-break" src="https://user-images.githubusercontent.com/153197/72525594-cfa7ff00-3864-11ea-9e8a-c183e07995a1.png">
Notice that in the screenshot the whole string should have a yellow color. Syntax highlighting breaks because line break within a string is not a valid syntax.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/hover.py`
Content:
```
1 import mdpopups
2 import sublime
3 import sublime_plugin
4 import webbrowser
5 import os
6 import textwrap
7 from html import escape
8 from .code_actions import actions_manager, run_code_action_or_command
9 from .code_actions import CodeActionOrCommand
10 from .core.configurations import is_supported_syntax
11 from .core.popups import popups
12 from .core.protocol import Request, DiagnosticSeverity, Diagnostic, DiagnosticRelatedInformation, Point
13 from .core.registry import session_for_view, LspTextCommand, windows
14 from .core.settings import client_configs, settings
15 from .core.typing import List, Optional, Any, Dict
16 from .core.views import text_document_position_params
17 from .diagnostics import filter_by_point, view_diagnostics
18
19
20 SUBLIME_WORD_MASK = 515
21
22
23 class HoverHandler(sublime_plugin.ViewEventListener):
24 def __init__(self, view: sublime.View) -> None:
25 self.view = view
26
27 @classmethod
28 def is_applicable(cls, view_settings: dict) -> bool:
29 if 'hover' in settings.disabled_capabilities:
30 return False
31 syntax = view_settings.get('syntax')
32 if syntax:
33 return is_supported_syntax(syntax, client_configs.all)
34 else:
35 return False
36
37 def on_hover(self, point: int, hover_zone: int) -> None:
38 if hover_zone != sublime.HOVER_TEXT or self.view.is_popup_visible():
39 return
40 self.view.run_command("lsp_hover", {"point": point})
41
42
43 _test_contents = [] # type: List[str]
44
45
46 class_for_severity = {
47 DiagnosticSeverity.Error: 'errors',
48 DiagnosticSeverity.Warning: 'warnings',
49 DiagnosticSeverity.Information: 'info',
50 DiagnosticSeverity.Hint: 'hints'
51 }
52
53
54 class GotoKind:
55
56 __slots__ = ("lsp_name", "label", "subl_cmd_name")
57
58 def __init__(self, lsp_name: str, label: str, subl_cmd_name: str) -> None:
59 self.lsp_name = lsp_name
60 self.label = label
61 self.subl_cmd_name = subl_cmd_name
62
63
64 goto_kinds = [
65 GotoKind("definition", "Definition", "definition"),
66 GotoKind("typeDefinition", "Type Definition", "type_definition"),
67 GotoKind("declaration", "Declaration", "declaration"),
68 GotoKind("implementation", "Implementation", "implementation")
69 ]
70
71
72 class LspHoverCommand(LspTextCommand):
73 def __init__(self, view: sublime.View) -> None:
74 super().__init__(view)
75 self._base_dir = None # type: Optional[str]
76
77 def is_likely_at_symbol(self, point: int) -> bool:
78 word_at_sel = self.view.classify(point)
79 return bool(word_at_sel & SUBLIME_WORD_MASK)
80
81 def run(self, edit: sublime.Edit, point: Optional[int] = None) -> None:
82 hover_point = point or self.view.sel()[0].begin()
83 self._base_dir = windows.lookup(self.view.window()).get_project_path(self.view.file_name() or "")
84
85 self._hover = None # type: Optional[Any]
86 self._actions_by_config = {} # type: Dict[str, List[CodeActionOrCommand]]
87 self._diagnostics_by_config = {} # type: Dict[str, List[Diagnostic]]
88
89 if self.is_likely_at_symbol(hover_point):
90 self.request_symbol_hover(hover_point)
91
92 self._diagnostics_by_config = filter_by_point(view_diagnostics(self.view),
93 Point(*self.view.rowcol(hover_point)))
94 if self._diagnostics_by_config:
95 self.request_code_actions(hover_point)
96 self.request_show_hover(hover_point)
97
98 def request_symbol_hover(self, point: int) -> None:
99 # todo: session_for_view looks up windowmanager twice (config and for sessions)
100 # can we memoize some part (eg. where no point is provided?)
101 session = session_for_view(self.view, 'hoverProvider', point)
102 if session:
103 document_position = text_document_position_params(self.view, point)
104 if session.client:
105 session.client.send_request(
106 Request.hover(document_position),
107 lambda response: self.handle_response(response, point))
108
109 def request_code_actions(self, point: int) -> None:
110 actions_manager.request(self.view, point, lambda response: self.handle_code_actions(response, point),
111 self._diagnostics_by_config)
112
113 def handle_code_actions(self, responses: Dict[str, List[CodeActionOrCommand]], point: int) -> None:
114 self._actions_by_config = responses
115 self.request_show_hover(point)
116
117 def handle_response(self, response: Optional[Any], point: int) -> None:
118 self._hover = response
119 self.request_show_hover(point)
120
121 def symbol_actions_content(self) -> str:
122 actions = []
123 for goto_kind in goto_kinds:
124 if self.has_client_with_capability(goto_kind.lsp_name + "Provider"):
125 actions.append("<a href='{}'>{}</a>".format(goto_kind.lsp_name, goto_kind.label))
126 if self.has_client_with_capability('referencesProvider'):
127 actions.append("<a href='{}'>{}</a>".format('references', 'References'))
128 if self.has_client_with_capability('renameProvider'):
129 actions.append("<a href='{}'>{}</a>".format('rename', 'Rename'))
130 return "<p>" + " | ".join(actions) + "</p>"
131
132 def format_diagnostic_related_info(self, info: DiagnosticRelatedInformation) -> str:
133 file_path = info.location.file_path
134 if self._base_dir and file_path.startswith(self._base_dir):
135 file_path = os.path.relpath(file_path, self._base_dir)
136 location = "{}:{}:{}".format(file_path, info.location.range.start.row+1, info.location.range.start.col+1)
137 return "<a href='location:{}'>{}</a>: {}".format(location, location, escape(info.message))
138
139 def format_diagnostic(self, diagnostic: 'Diagnostic') -> str:
140 diagnostic_message = escape(diagnostic.message, False).replace('\n', '<br>')
141 related_infos = [self.format_diagnostic_related_info(info) for info in diagnostic.related_info]
142 related_content = "<pre class='related_info'>" + "<br>".join(related_infos) + "</pre>" if related_infos else ""
143
144 if diagnostic.source:
145 return "<pre class=\"{}\">[{}] {}{}</pre>".format(class_for_severity[diagnostic.severity],
146 diagnostic.source, diagnostic_message, related_content)
147 else:
148 return "<pre class=\"{}\">{}{}</pre>".format(class_for_severity[diagnostic.severity], diagnostic_message,
149 related_content)
150
151 def diagnostics_content(self) -> str:
152 formatted = []
153 for config_name in self._diagnostics_by_config:
154 by_severity = {} # type: Dict[int, List[str]]
155 formatted.append("<div class='diagnostics'>")
156 for diagnostic in self._diagnostics_by_config[config_name]:
157 by_severity.setdefault(diagnostic.severity, []).append(self.format_diagnostic(diagnostic))
158
159 for severity, items in by_severity.items():
160 formatted.append("<div>")
161 formatted.extend(items)
162 formatted.append("</div>")
163
164 if config_name in self._actions_by_config:
165 action_count = len(self._actions_by_config[config_name])
166 if action_count > 0:
167 formatted.append("<div class=\"actions\"><a href='{}:{}'>{} ({})</a></div>".format(
168 'code-actions', config_name, 'Code Actions', action_count))
169
170 formatted.append("</div>")
171
172 return "".join(formatted)
173
174 def hover_content(self) -> str:
175 contents = [] # type: List[Any]
176 if isinstance(self._hover, dict):
177 response_content = self._hover.get('contents')
178 if response_content:
179 if isinstance(response_content, list):
180 contents = response_content
181 else:
182 contents = [response_content]
183
184 formatted = []
185 for item in contents:
186 value = ""
187 language = None
188 if isinstance(item, str):
189 value = item
190 else:
191 value = item.get("value")
192 language = item.get("language")
193
194 if '\n' not in value:
195 value = "\n".join(textwrap.wrap(value, 80))
196
197 if language:
198 formatted.append("```{}\n{}\n```\n".format(language, value))
199 else:
200 formatted.append(value)
201
202 if formatted:
203 return mdpopups.md2html(self.view, "\n".join(formatted))
204
205 return ""
206
207 def request_show_hover(self, point: int) -> None:
208 sublime.set_timeout(lambda: self.show_hover(point), 50)
209
210 def show_hover(self, point: int) -> None:
211 contents = self.diagnostics_content() + self.hover_content()
212 if contents and settings.show_symbol_action_links:
213 contents += self.symbol_actions_content()
214
215 _test_contents.clear()
216 _test_contents.append(contents) # for testing only
217
218 if contents:
219 mdpopups.show_popup(
220 self.view,
221 contents,
222 css=popups.stylesheet,
223 md=False,
224 flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
225 location=point,
226 wrapper_class=popups.classname,
227 max_width=800,
228 on_navigate=lambda href: self.on_hover_navigate(href, point))
229
230 def on_hover_navigate(self, href: str, point: int) -> None:
231 for goto_kind in goto_kinds:
232 if href == goto_kind.lsp_name:
233 self.run_command_from_point(point, "lsp_symbol_" + goto_kind.subl_cmd_name)
234 return
235 if href == 'references':
236 self.run_command_from_point(point, "lsp_symbol_references")
237 elif href == 'rename':
238 self.run_command_from_point(point, "lsp_symbol_rename")
239 elif href.startswith('code-actions'):
240 _, config_name = href.split(":")
241 titles = [command["title"] for command in self._actions_by_config[config_name]]
242 sel = self.view.sel()
243 sel.clear()
244 sel.add(sublime.Region(point, point))
245
246 self.view.show_popup_menu(titles, lambda i: self.handle_code_action_select(config_name, i))
247 elif href.startswith('location'):
248 _, file_path, location = href.split(":", 2)
249 file_path = os.path.join(self._base_dir, file_path) if self._base_dir else file_path
250 window = self.view.window()
251 if window:
252 window.open_file(file_path + ":" + location, sublime.ENCODED_POSITION | sublime.TRANSIENT)
253 else:
254 webbrowser.open_new_tab(href)
255
256 def handle_code_action_select(self, config_name: str, index: int) -> None:
257 if index > -1:
258 selected = self._actions_by_config[config_name][index]
259 run_code_action_or_command(self.view, config_name, selected)
260
261 def run_command_from_point(self, point: int, command_name: str, args: Optional[Any] = None) -> None:
262 sel = self.view.sel()
263 sel.clear()
264 sel.add(sublime.Region(point, point))
265 self.view.run_command(command_name, args)
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/hover.py b/plugin/hover.py
--- a/plugin/hover.py
+++ b/plugin/hover.py
@@ -3,7 +3,6 @@
import sublime_plugin
import webbrowser
import os
-import textwrap
from html import escape
from .code_actions import actions_manager, run_code_action_or_command
from .code_actions import CodeActionOrCommand
@@ -191,16 +190,14 @@
value = item.get("value")
language = item.get("language")
- if '\n' not in value:
- value = "\n".join(textwrap.wrap(value, 80))
-
if language:
formatted.append("```{}\n{}\n```\n".format(language, value))
else:
formatted.append(value)
if formatted:
- return mdpopups.md2html(self.view, "\n".join(formatted))
+ frontmatter_config = mdpopups.format_frontmatter({'allow_code_wrap': True})
+ return mdpopups.md2html(self.view, frontmatter_config + "\n".join(formatted))
return ""
| {"golden_diff": "diff --git a/plugin/hover.py b/plugin/hover.py\n--- a/plugin/hover.py\n+++ b/plugin/hover.py\n@@ -3,7 +3,6 @@\n import sublime_plugin\n import webbrowser\n import os\n-import textwrap\n from html import escape\n from .code_actions import actions_manager, run_code_action_or_command\n from .code_actions import CodeActionOrCommand\n@@ -191,16 +190,14 @@\n value = item.get(\"value\")\n language = item.get(\"language\")\n \n- if '\\n' not in value:\n- value = \"\\n\".join(textwrap.wrap(value, 80))\n-\n if language:\n formatted.append(\"```{}\\n{}\\n```\\n\".format(language, value))\n else:\n formatted.append(value)\n \n if formatted:\n- return mdpopups.md2html(self.view, \"\\n\".join(formatted))\n+ frontmatter_config = mdpopups.format_frontmatter({'allow_code_wrap': True})\n+ return mdpopups.md2html(self.view, frontmatter_config + \"\\n\".join(formatted))\n \n return \"\"\n", "issue": "Forced-break in hover popup can break syntax highlighting\nUsing: OSX / typescript-language-server\r\n\r\nLine-break, that LSP forces in the popup, can cause syntax highlighting to break. For example, if breaking a plain string in JS syntax.\r\n\r\n<img width=\"512\" alt=\"line-break\" src=\"https://user-images.githubusercontent.com/153197/72525594-cfa7ff00-3864-11ea-9e8a-c183e07995a1.png\">\r\n\r\nNotice that in the screenshot the whole string should have a yellow color. Syntax highlighting breaks because line break within a string is not a valid syntax.\n", "before_files": [{"content": "import mdpopups\nimport sublime\nimport sublime_plugin\nimport webbrowser\nimport os\nimport textwrap\nfrom html import escape\nfrom .code_actions import actions_manager, run_code_action_or_command\nfrom .code_actions import CodeActionOrCommand\nfrom .core.configurations import is_supported_syntax\nfrom .core.popups import popups\nfrom .core.protocol import Request, DiagnosticSeverity, Diagnostic, DiagnosticRelatedInformation, Point\nfrom .core.registry import session_for_view, LspTextCommand, windows\nfrom .core.settings import client_configs, settings\nfrom .core.typing import List, Optional, Any, Dict\nfrom .core.views import text_document_position_params\nfrom .diagnostics import filter_by_point, view_diagnostics\n\n\nSUBLIME_WORD_MASK = 515\n\n\nclass HoverHandler(sublime_plugin.ViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n self.view = view\n\n @classmethod\n def is_applicable(cls, view_settings: dict) -> bool:\n if 'hover' in settings.disabled_capabilities:\n return False\n syntax = view_settings.get('syntax')\n if syntax:\n return is_supported_syntax(syntax, client_configs.all)\n else:\n return False\n\n def on_hover(self, point: int, hover_zone: int) -> None:\n if hover_zone != sublime.HOVER_TEXT or self.view.is_popup_visible():\n return\n self.view.run_command(\"lsp_hover\", {\"point\": point})\n\n\n_test_contents = [] # type: List[str]\n\n\nclass_for_severity = {\n DiagnosticSeverity.Error: 'errors',\n DiagnosticSeverity.Warning: 'warnings',\n DiagnosticSeverity.Information: 'info',\n DiagnosticSeverity.Hint: 'hints'\n}\n\n\nclass GotoKind:\n\n __slots__ = (\"lsp_name\", \"label\", \"subl_cmd_name\")\n\n def __init__(self, lsp_name: str, label: str, subl_cmd_name: str) -> None:\n self.lsp_name = lsp_name\n self.label = label\n self.subl_cmd_name = subl_cmd_name\n\n\ngoto_kinds = [\n GotoKind(\"definition\", \"Definition\", \"definition\"),\n GotoKind(\"typeDefinition\", \"Type Definition\", \"type_definition\"),\n GotoKind(\"declaration\", \"Declaration\", \"declaration\"),\n GotoKind(\"implementation\", \"Implementation\", \"implementation\")\n]\n\n\nclass LspHoverCommand(LspTextCommand):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._base_dir = None # type: Optional[str]\n\n def is_likely_at_symbol(self, point: int) -> bool:\n word_at_sel = self.view.classify(point)\n return bool(word_at_sel & SUBLIME_WORD_MASK)\n\n def run(self, edit: sublime.Edit, point: Optional[int] = None) -> None:\n hover_point = point or self.view.sel()[0].begin()\n self._base_dir = windows.lookup(self.view.window()).get_project_path(self.view.file_name() or \"\")\n\n self._hover = None # type: Optional[Any]\n self._actions_by_config = {} # type: Dict[str, List[CodeActionOrCommand]]\n self._diagnostics_by_config = {} # type: Dict[str, List[Diagnostic]]\n\n if self.is_likely_at_symbol(hover_point):\n self.request_symbol_hover(hover_point)\n\n self._diagnostics_by_config = filter_by_point(view_diagnostics(self.view),\n Point(*self.view.rowcol(hover_point)))\n if self._diagnostics_by_config:\n self.request_code_actions(hover_point)\n self.request_show_hover(hover_point)\n\n def request_symbol_hover(self, point: int) -> None:\n # todo: session_for_view looks up windowmanager twice (config and for sessions)\n # can we memoize some part (eg. where no point is provided?)\n session = session_for_view(self.view, 'hoverProvider', point)\n if session:\n document_position = text_document_position_params(self.view, point)\n if session.client:\n session.client.send_request(\n Request.hover(document_position),\n lambda response: self.handle_response(response, point))\n\n def request_code_actions(self, point: int) -> None:\n actions_manager.request(self.view, point, lambda response: self.handle_code_actions(response, point),\n self._diagnostics_by_config)\n\n def handle_code_actions(self, responses: Dict[str, List[CodeActionOrCommand]], point: int) -> None:\n self._actions_by_config = responses\n self.request_show_hover(point)\n\n def handle_response(self, response: Optional[Any], point: int) -> None:\n self._hover = response\n self.request_show_hover(point)\n\n def symbol_actions_content(self) -> str:\n actions = []\n for goto_kind in goto_kinds:\n if self.has_client_with_capability(goto_kind.lsp_name + \"Provider\"):\n actions.append(\"<a href='{}'>{}</a>\".format(goto_kind.lsp_name, goto_kind.label))\n if self.has_client_with_capability('referencesProvider'):\n actions.append(\"<a href='{}'>{}</a>\".format('references', 'References'))\n if self.has_client_with_capability('renameProvider'):\n actions.append(\"<a href='{}'>{}</a>\".format('rename', 'Rename'))\n return \"<p>\" + \" | \".join(actions) + \"</p>\"\n\n def format_diagnostic_related_info(self, info: DiagnosticRelatedInformation) -> str:\n file_path = info.location.file_path\n if self._base_dir and file_path.startswith(self._base_dir):\n file_path = os.path.relpath(file_path, self._base_dir)\n location = \"{}:{}:{}\".format(file_path, info.location.range.start.row+1, info.location.range.start.col+1)\n return \"<a href='location:{}'>{}</a>: {}\".format(location, location, escape(info.message))\n\n def format_diagnostic(self, diagnostic: 'Diagnostic') -> str:\n diagnostic_message = escape(diagnostic.message, False).replace('\\n', '<br>')\n related_infos = [self.format_diagnostic_related_info(info) for info in diagnostic.related_info]\n related_content = \"<pre class='related_info'>\" + \"<br>\".join(related_infos) + \"</pre>\" if related_infos else \"\"\n\n if diagnostic.source:\n return \"<pre class=\\\"{}\\\">[{}] {}{}</pre>\".format(class_for_severity[diagnostic.severity],\n diagnostic.source, diagnostic_message, related_content)\n else:\n return \"<pre class=\\\"{}\\\">{}{}</pre>\".format(class_for_severity[diagnostic.severity], diagnostic_message,\n related_content)\n\n def diagnostics_content(self) -> str:\n formatted = []\n for config_name in self._diagnostics_by_config:\n by_severity = {} # type: Dict[int, List[str]]\n formatted.append(\"<div class='diagnostics'>\")\n for diagnostic in self._diagnostics_by_config[config_name]:\n by_severity.setdefault(diagnostic.severity, []).append(self.format_diagnostic(diagnostic))\n\n for severity, items in by_severity.items():\n formatted.append(\"<div>\")\n formatted.extend(items)\n formatted.append(\"</div>\")\n\n if config_name in self._actions_by_config:\n action_count = len(self._actions_by_config[config_name])\n if action_count > 0:\n formatted.append(\"<div class=\\\"actions\\\"><a href='{}:{}'>{} ({})</a></div>\".format(\n 'code-actions', config_name, 'Code Actions', action_count))\n\n formatted.append(\"</div>\")\n\n return \"\".join(formatted)\n\n def hover_content(self) -> str:\n contents = [] # type: List[Any]\n if isinstance(self._hover, dict):\n response_content = self._hover.get('contents')\n if response_content:\n if isinstance(response_content, list):\n contents = response_content\n else:\n contents = [response_content]\n\n formatted = []\n for item in contents:\n value = \"\"\n language = None\n if isinstance(item, str):\n value = item\n else:\n value = item.get(\"value\")\n language = item.get(\"language\")\n\n if '\\n' not in value:\n value = \"\\n\".join(textwrap.wrap(value, 80))\n\n if language:\n formatted.append(\"```{}\\n{}\\n```\\n\".format(language, value))\n else:\n formatted.append(value)\n\n if formatted:\n return mdpopups.md2html(self.view, \"\\n\".join(formatted))\n\n return \"\"\n\n def request_show_hover(self, point: int) -> None:\n sublime.set_timeout(lambda: self.show_hover(point), 50)\n\n def show_hover(self, point: int) -> None:\n contents = self.diagnostics_content() + self.hover_content()\n if contents and settings.show_symbol_action_links:\n contents += self.symbol_actions_content()\n\n _test_contents.clear()\n _test_contents.append(contents) # for testing only\n\n if contents:\n mdpopups.show_popup(\n self.view,\n contents,\n css=popups.stylesheet,\n md=False,\n flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n location=point,\n wrapper_class=popups.classname,\n max_width=800,\n on_navigate=lambda href: self.on_hover_navigate(href, point))\n\n def on_hover_navigate(self, href: str, point: int) -> None:\n for goto_kind in goto_kinds:\n if href == goto_kind.lsp_name:\n self.run_command_from_point(point, \"lsp_symbol_\" + goto_kind.subl_cmd_name)\n return\n if href == 'references':\n self.run_command_from_point(point, \"lsp_symbol_references\")\n elif href == 'rename':\n self.run_command_from_point(point, \"lsp_symbol_rename\")\n elif href.startswith('code-actions'):\n _, config_name = href.split(\":\")\n titles = [command[\"title\"] for command in self._actions_by_config[config_name]]\n sel = self.view.sel()\n sel.clear()\n sel.add(sublime.Region(point, point))\n\n self.view.show_popup_menu(titles, lambda i: self.handle_code_action_select(config_name, i))\n elif href.startswith('location'):\n _, file_path, location = href.split(\":\", 2)\n file_path = os.path.join(self._base_dir, file_path) if self._base_dir else file_path\n window = self.view.window()\n if window:\n window.open_file(file_path + \":\" + location, sublime.ENCODED_POSITION | sublime.TRANSIENT)\n else:\n webbrowser.open_new_tab(href)\n\n def handle_code_action_select(self, config_name: str, index: int) -> None:\n if index > -1:\n selected = self._actions_by_config[config_name][index]\n run_code_action_or_command(self.view, config_name, selected)\n\n def run_command_from_point(self, point: int, command_name: str, args: Optional[Any] = None) -> None:\n sel = self.view.sel()\n sel.clear()\n sel.add(sublime.Region(point, point))\n self.view.run_command(command_name, args)\n", "path": "plugin/hover.py"}], "after_files": [{"content": "import mdpopups\nimport sublime\nimport sublime_plugin\nimport webbrowser\nimport os\nfrom html import escape\nfrom .code_actions import actions_manager, run_code_action_or_command\nfrom .code_actions import CodeActionOrCommand\nfrom .core.configurations import is_supported_syntax\nfrom .core.popups import popups\nfrom .core.protocol import Request, DiagnosticSeverity, Diagnostic, DiagnosticRelatedInformation, Point\nfrom .core.registry import session_for_view, LspTextCommand, windows\nfrom .core.settings import client_configs, settings\nfrom .core.typing import List, Optional, Any, Dict\nfrom .core.views import text_document_position_params\nfrom .diagnostics import filter_by_point, view_diagnostics\n\n\nSUBLIME_WORD_MASK = 515\n\n\nclass HoverHandler(sublime_plugin.ViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n self.view = view\n\n @classmethod\n def is_applicable(cls, view_settings: dict) -> bool:\n if 'hover' in settings.disabled_capabilities:\n return False\n syntax = view_settings.get('syntax')\n if syntax:\n return is_supported_syntax(syntax, client_configs.all)\n else:\n return False\n\n def on_hover(self, point: int, hover_zone: int) -> None:\n if hover_zone != sublime.HOVER_TEXT or self.view.is_popup_visible():\n return\n self.view.run_command(\"lsp_hover\", {\"point\": point})\n\n\n_test_contents = [] # type: List[str]\n\n\nclass_for_severity = {\n DiagnosticSeverity.Error: 'errors',\n DiagnosticSeverity.Warning: 'warnings',\n DiagnosticSeverity.Information: 'info',\n DiagnosticSeverity.Hint: 'hints'\n}\n\n\nclass GotoKind:\n\n __slots__ = (\"lsp_name\", \"label\", \"subl_cmd_name\")\n\n def __init__(self, lsp_name: str, label: str, subl_cmd_name: str) -> None:\n self.lsp_name = lsp_name\n self.label = label\n self.subl_cmd_name = subl_cmd_name\n\n\ngoto_kinds = [\n GotoKind(\"definition\", \"Definition\", \"definition\"),\n GotoKind(\"typeDefinition\", \"Type Definition\", \"type_definition\"),\n GotoKind(\"declaration\", \"Declaration\", \"declaration\"),\n GotoKind(\"implementation\", \"Implementation\", \"implementation\")\n]\n\n\nclass LspHoverCommand(LspTextCommand):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._base_dir = None # type: Optional[str]\n\n def is_likely_at_symbol(self, point: int) -> bool:\n word_at_sel = self.view.classify(point)\n return bool(word_at_sel & SUBLIME_WORD_MASK)\n\n def run(self, edit: sublime.Edit, point: Optional[int] = None) -> None:\n hover_point = point or self.view.sel()[0].begin()\n self._base_dir = windows.lookup(self.view.window()).get_project_path(self.view.file_name() or \"\")\n\n self._hover = None # type: Optional[Any]\n self._actions_by_config = {} # type: Dict[str, List[CodeActionOrCommand]]\n self._diagnostics_by_config = {} # type: Dict[str, List[Diagnostic]]\n\n if self.is_likely_at_symbol(hover_point):\n self.request_symbol_hover(hover_point)\n\n self._diagnostics_by_config = filter_by_point(view_diagnostics(self.view),\n Point(*self.view.rowcol(hover_point)))\n if self._diagnostics_by_config:\n self.request_code_actions(hover_point)\n self.request_show_hover(hover_point)\n\n def request_symbol_hover(self, point: int) -> None:\n # todo: session_for_view looks up windowmanager twice (config and for sessions)\n # can we memoize some part (eg. where no point is provided?)\n session = session_for_view(self.view, 'hoverProvider', point)\n if session:\n document_position = text_document_position_params(self.view, point)\n if session.client:\n session.client.send_request(\n Request.hover(document_position),\n lambda response: self.handle_response(response, point))\n\n def request_code_actions(self, point: int) -> None:\n actions_manager.request(self.view, point, lambda response: self.handle_code_actions(response, point),\n self._diagnostics_by_config)\n\n def handle_code_actions(self, responses: Dict[str, List[CodeActionOrCommand]], point: int) -> None:\n self._actions_by_config = responses\n self.request_show_hover(point)\n\n def handle_response(self, response: Optional[Any], point: int) -> None:\n self._hover = response\n self.request_show_hover(point)\n\n def symbol_actions_content(self) -> str:\n actions = []\n for goto_kind in goto_kinds:\n if self.has_client_with_capability(goto_kind.lsp_name + \"Provider\"):\n actions.append(\"<a href='{}'>{}</a>\".format(goto_kind.lsp_name, goto_kind.label))\n if self.has_client_with_capability('referencesProvider'):\n actions.append(\"<a href='{}'>{}</a>\".format('references', 'References'))\n if self.has_client_with_capability('renameProvider'):\n actions.append(\"<a href='{}'>{}</a>\".format('rename', 'Rename'))\n return \"<p>\" + \" | \".join(actions) + \"</p>\"\n\n def format_diagnostic_related_info(self, info: DiagnosticRelatedInformation) -> str:\n file_path = info.location.file_path\n if self._base_dir and file_path.startswith(self._base_dir):\n file_path = os.path.relpath(file_path, self._base_dir)\n location = \"{}:{}:{}\".format(file_path, info.location.range.start.row+1, info.location.range.start.col+1)\n return \"<a href='location:{}'>{}</a>: {}\".format(location, location, escape(info.message))\n\n def format_diagnostic(self, diagnostic: 'Diagnostic') -> str:\n diagnostic_message = escape(diagnostic.message, False).replace('\\n', '<br>')\n related_infos = [self.format_diagnostic_related_info(info) for info in diagnostic.related_info]\n related_content = \"<pre class='related_info'>\" + \"<br>\".join(related_infos) + \"</pre>\" if related_infos else \"\"\n\n if diagnostic.source:\n return \"<pre class=\\\"{}\\\">[{}] {}{}</pre>\".format(class_for_severity[diagnostic.severity],\n diagnostic.source, diagnostic_message, related_content)\n else:\n return \"<pre class=\\\"{}\\\">{}{}</pre>\".format(class_for_severity[diagnostic.severity], diagnostic_message,\n related_content)\n\n def diagnostics_content(self) -> str:\n formatted = []\n for config_name in self._diagnostics_by_config:\n by_severity = {} # type: Dict[int, List[str]]\n formatted.append(\"<div class='diagnostics'>\")\n for diagnostic in self._diagnostics_by_config[config_name]:\n by_severity.setdefault(diagnostic.severity, []).append(self.format_diagnostic(diagnostic))\n\n for severity, items in by_severity.items():\n formatted.append(\"<div>\")\n formatted.extend(items)\n formatted.append(\"</div>\")\n\n if config_name in self._actions_by_config:\n action_count = len(self._actions_by_config[config_name])\n if action_count > 0:\n formatted.append(\"<div class=\\\"actions\\\"><a href='{}:{}'>{} ({})</a></div>\".format(\n 'code-actions', config_name, 'Code Actions', action_count))\n\n formatted.append(\"</div>\")\n\n return \"\".join(formatted)\n\n def hover_content(self) -> str:\n contents = [] # type: List[Any]\n if isinstance(self._hover, dict):\n response_content = self._hover.get('contents')\n if response_content:\n if isinstance(response_content, list):\n contents = response_content\n else:\n contents = [response_content]\n\n formatted = []\n for item in contents:\n value = \"\"\n language = None\n if isinstance(item, str):\n value = item\n else:\n value = item.get(\"value\")\n language = item.get(\"language\")\n\n if language:\n formatted.append(\"```{}\\n{}\\n```\\n\".format(language, value))\n else:\n formatted.append(value)\n\n if formatted:\n frontmatter_config = mdpopups.format_frontmatter({'allow_code_wrap': True})\n return mdpopups.md2html(self.view, frontmatter_config + \"\\n\".join(formatted))\n\n return \"\"\n\n def request_show_hover(self, point: int) -> None:\n sublime.set_timeout(lambda: self.show_hover(point), 50)\n\n def show_hover(self, point: int) -> None:\n contents = self.diagnostics_content() + self.hover_content()\n if contents and settings.show_symbol_action_links:\n contents += self.symbol_actions_content()\n\n _test_contents.clear()\n _test_contents.append(contents) # for testing only\n\n if contents:\n mdpopups.show_popup(\n self.view,\n contents,\n css=popups.stylesheet,\n md=False,\n flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n location=point,\n wrapper_class=popups.classname,\n max_width=800,\n on_navigate=lambda href: self.on_hover_navigate(href, point))\n\n def on_hover_navigate(self, href: str, point: int) -> None:\n for goto_kind in goto_kinds:\n if href == goto_kind.lsp_name:\n self.run_command_from_point(point, \"lsp_symbol_\" + goto_kind.subl_cmd_name)\n return\n if href == 'references':\n self.run_command_from_point(point, \"lsp_symbol_references\")\n elif href == 'rename':\n self.run_command_from_point(point, \"lsp_symbol_rename\")\n elif href.startswith('code-actions'):\n _, config_name = href.split(\":\")\n titles = [command[\"title\"] for command in self._actions_by_config[config_name]]\n sel = self.view.sel()\n sel.clear()\n sel.add(sublime.Region(point, point))\n\n self.view.show_popup_menu(titles, lambda i: self.handle_code_action_select(config_name, i))\n elif href.startswith('location'):\n _, file_path, location = href.split(\":\", 2)\n file_path = os.path.join(self._base_dir, file_path) if self._base_dir else file_path\n window = self.view.window()\n if window:\n window.open_file(file_path + \":\" + location, sublime.ENCODED_POSITION | sublime.TRANSIENT)\n else:\n webbrowser.open_new_tab(href)\n\n def handle_code_action_select(self, config_name: str, index: int) -> None:\n if index > -1:\n selected = self._actions_by_config[config_name][index]\n run_code_action_or_command(self.view, config_name, selected)\n\n def run_command_from_point(self, point: int, command_name: str, args: Optional[Any] = None) -> None:\n sel = self.view.sel()\n sel.clear()\n sel.add(sublime.Region(point, point))\n self.view.run_command(command_name, args)\n", "path": "plugin/hover.py"}]} | 3,507 | 243 |
gh_patches_debug_41200 | rasdani/github-patches | git_diff | mindee__doctr-107 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Models] Add SAR teacher forçing during training (decoder)
Teacher forçing (feeding the LSTM decoder of SAR with ground-truth characters during training) is not implemented, and it should really improve performances
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doctr/models/recognition/sar.py`
Content:
```
1 # Copyright (C) 2021, Mindee.
2
3 # This program is licensed under the Apache License version 2.
4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
5
6 from copy import deepcopy
7 import tensorflow as tf
8 from tensorflow.keras import Sequential, layers
9 from typing import Tuple, Dict, List, Any, Optional
10
11 from .. import vgg
12 from ..utils import load_pretrained_params
13 from .core import RecognitionModel
14 from .core import RecognitionPostProcessor
15 from doctr.utils.repr import NestedObject
16
17 __all__ = ['SAR', 'SARPostProcessor', 'sar_vgg16_bn']
18
19 default_cfgs: Dict[str, Dict[str, Any]] = {
20 'sar_vgg16_bn': {
21 'backbone': 'vgg16_bn', 'rnn_units': 512, 'max_length': 40, 'num_decoders': 2,
22 'input_shape': (64, 256, 3),
23 'post_processor': 'SARPostProcessor',
24 'vocab': ('3K}7eé;5àÎYho]QwV6qU~W"XnbBvcADfËmy.9ÔpÛ*{CôïE%M4#ÈR:g@T$x?0î£|za1ù8,OG€P-'
25 'kçHëÀÂ2É/ûIJ\'j(LNÙFut[)èZs+&°Sd=Ï!<â_Ç>rêi`l'),
26 'url': None,
27 },
28 }
29
30
31 class AttentionModule(layers.Layer, NestedObject):
32 """Implements attention module of the SAR model
33
34 Args:
35 attention_units: number of hidden attention units
36
37 """
38 def __init__(
39 self,
40 attention_units: int
41 ) -> None:
42
43 super().__init__()
44 self.hidden_state_projector = layers.Conv2D(
45 attention_units, 1, strides=1, use_bias=False, padding='same', kernel_initializer='he_normal',
46 )
47 self.features_projector = layers.Conv2D(
48 attention_units, 3, strides=1, use_bias=True, padding='same', kernel_initializer='he_normal',
49 )
50 self.attention_projector = layers.Conv2D(
51 1, 1, strides=1, use_bias=False, padding="same", kernel_initializer='he_normal',
52 )
53 self.flatten = layers.Flatten()
54
55 def call(
56 self,
57 features: tf.Tensor,
58 hidden_state: tf.Tensor,
59 **kwargs: Any,
60 ) -> tf.Tensor:
61
62 [H, W] = features.get_shape().as_list()[1:3]
63 # shape (N, 1, 1, rnn_units) -> (N, 1, 1, attention_units)
64 hidden_state_projection = self.hidden_state_projector(hidden_state, **kwargs)
65 # shape (N, H, W, vgg_units) -> (N, H, W, attention_units)
66 features_projection = self.features_projector(features, **kwargs)
67 projection = tf.math.tanh(hidden_state_projection + features_projection)
68 # shape (N, H, W, attention_units) -> (N, H, W, 1)
69 attention = self.attention_projector(projection, **kwargs)
70 # shape (N, H, W, 1) -> (N, H * W)
71 attention = self.flatten(attention)
72 attention = tf.nn.softmax(attention)
73 # shape (N, H * W) -> (N, H, W, 1)
74 attention_map = tf.reshape(attention, [-1, H, W, 1])
75 glimpse = tf.math.multiply(features, attention_map)
76 # shape (N, H * W) -> (N, 1)
77 glimpse = tf.reduce_sum(glimpse, axis=[1, 2])
78 return glimpse
79
80
81 class SARDecoder(layers.Layer, NestedObject):
82 """Implements decoder module of the SAR model
83
84 Args:
85 rnn_units: number of hidden units in recurrent cells
86 max_length: maximum length of a sequence
87 vocab_size: number of classes in the model alphabet
88 embedding_units: number of hidden embedding units
89 attention_units: number of hidden attention units
90 num_decoder_layers: number of LSTM layers to stack
91
92
93 """
94 def __init__(
95 self,
96 rnn_units: int,
97 max_length: int,
98 vocab_size: int,
99 embedding_units: int,
100 attention_units: int,
101 num_decoder_layers: int = 2
102 ) -> None:
103
104 super().__init__()
105 self.vocab_size = vocab_size
106 self.embed = layers.Dense(embedding_units, use_bias=False)
107 self.attention_module = AttentionModule(attention_units)
108 self.output_dense = layers.Dense(vocab_size + 1, use_bias=True)
109 self.max_length = max_length
110 self.lstm_decoder = layers.StackedRNNCells(
111 [layers.LSTMCell(rnn_units, dtype=tf.float32, implementation=1) for _ in range(num_decoder_layers)]
112 )
113
114 def call(
115 self,
116 features: tf.Tensor,
117 holistic: tf.Tensor,
118 **kwargs: Any,
119 ) -> tf.Tensor:
120
121 # initialize states (each of shape (N, rnn_units))
122 states = self.lstm_decoder.get_initial_state(
123 inputs=None, batch_size=features.shape[0], dtype=tf.float32
124 )
125 # run first step of lstm
126 # holistic: shape (N, rnn_units)
127 _, states = self.lstm_decoder(holistic, states, **kwargs)
128 # Initialize with the index of virtual START symbol (placed after <eos>)
129 symbol = tf.fill(features.shape[0], self.vocab_size + 1)
130 logits_list = []
131 for _ in range(self.max_length + 1): # keep 1 step for <eos>
132 # one-hot symbol with depth vocab_size + 1
133 # embeded_symbol: shape (N, embedding_units)
134 embeded_symbol = self.embed(tf.one_hot(symbol, depth=self.vocab_size + 1), **kwargs)
135 logits, states = self.lstm_decoder(embeded_symbol, states, **kwargs)
136 glimpse = self.attention_module(
137 features, tf.expand_dims(tf.expand_dims(logits, axis=1), axis=1), **kwargs,
138 )
139 # logits: shape (N, rnn_units), glimpse: shape (N, 1)
140 logits = tf.concat([logits, glimpse], axis=-1)
141 # shape (N, rnn_units + 1) -> (N, vocab_size + 1)
142 logits = self.output_dense(logits, **kwargs)
143 # update symbol with predicted logits for t+1 step
144 symbol = tf.argmax(logits, axis=-1)
145 logits_list.append(logits)
146 outputs = tf.stack(logits_list, axis=1) # shape (N, max_length + 1, vocab_size + 1)
147
148 return outputs
149
150
151 class SAR(RecognitionModel):
152 """Implements a SAR architecture as described in `"Show, Attend and Read:A Simple and Strong Baseline for
153 Irregular Text Recognition" <https://arxiv.org/pdf/1811.00751.pdf>`_.
154
155 Args:
156 feature_extractor: the backbone serving as feature extractor
157 vocab_size: size of the alphabet
158 rnn_units: number of hidden units in both encoder and decoder LSTM
159 embedding_units: number of embedding units
160 attention_units: number of hidden units in attention module
161 max_length: maximum word length handled by the model
162 num_decoders: number of LSTM to stack in decoder layer
163
164 """
165
166 _children_names: List[str] = ['feat_extractor', 'encoder', 'decoder']
167
168 def __init__(
169 self,
170 feature_extractor,
171 vocab_size: int = 110,
172 rnn_units: int = 512,
173 embedding_units: int = 512,
174 attention_units: int = 512,
175 max_length: int = 30,
176 num_decoders: int = 2,
177 cfg: Optional[Dict[str, Any]] = None,
178 ) -> None:
179
180 super().__init__(cfg=cfg)
181
182 self.feat_extractor = feature_extractor
183
184 self.encoder = Sequential(
185 [
186 layers.LSTM(units=rnn_units, return_sequences=True),
187 layers.LSTM(units=rnn_units, return_sequences=False)
188 ]
189 )
190
191 self.decoder = SARDecoder(
192 rnn_units, max_length, vocab_size, embedding_units, attention_units, num_decoders,
193
194 )
195
196 def call(
197 self,
198 x: tf.Tensor,
199 **kwargs: Any,
200 ) -> tf.Tensor:
201
202 features = self.feat_extractor(x, **kwargs)
203 pooled_features = tf.reduce_max(features, axis=1) # vertical max pooling
204 encoded = self.encoder(pooled_features, **kwargs)
205 decoded = self.decoder(features, encoded, **kwargs)
206
207 return decoded
208
209
210 class SARPostProcessor(RecognitionPostProcessor):
211 """Post processor for SAR architectures
212
213 Args:
214 vocab: string containing the ordered sequence of supported characters
215 ignore_case: if True, ignore case of letters
216 ignore_accents: if True, ignore accents of letters
217 """
218
219 def __call__(
220 self,
221 logits: tf.Tensor,
222 ) -> List[str]:
223 # compute pred with argmax for attention models
224 pred = tf.math.argmax(logits, axis=2)
225
226 # decode raw output of the model with tf_label_to_idx
227 pred = tf.cast(pred, dtype='int32')
228 decoded_strings_pred = tf.strings.reduce_join(inputs=tf.nn.embedding_lookup(self._embedding, pred), axis=-1)
229 decoded_strings_pred = tf.strings.split(decoded_strings_pred, "<eos>")
230 decoded_strings_pred = tf.sparse.to_dense(decoded_strings_pred.to_sparse(), default_value='not valid')[:, 0]
231 words_list = [word.decode() for word in list(decoded_strings_pred.numpy())]
232
233 if self.ignore_case:
234 words_list = [word.lower() for word in words_list]
235
236 if self.ignore_accents:
237 raise NotImplementedError
238
239 return words_list
240
241
242 def _sar_vgg(arch: str, pretrained: bool, input_shape: Tuple[int, int, int] = None, **kwargs: Any) -> SAR:
243
244 # Patch the config
245 _cfg = deepcopy(default_cfgs[arch])
246 _cfg['input_shape'] = input_shape or _cfg['input_shape']
247 _cfg['vocab_size'] = kwargs.get('vocab_size', len(_cfg['vocab']))
248 _cfg['rnn_units'] = kwargs.get('rnn_units', _cfg['rnn_units'])
249 _cfg['embedding_units'] = kwargs.get('embedding_units', _cfg['rnn_units'])
250 _cfg['attention_units'] = kwargs.get('attention_units', _cfg['rnn_units'])
251 _cfg['max_length'] = kwargs.get('max_length', _cfg['max_length'])
252 _cfg['num_decoders'] = kwargs.get('num_decoders', _cfg['num_decoders'])
253
254 # Feature extractor
255 feat_extractor = vgg.__dict__[default_cfgs[arch]['backbone']](
256 input_shape=_cfg['input_shape'],
257 include_top=False,
258 )
259
260 kwargs['vocab_size'] = _cfg['vocab_size']
261 kwargs['rnn_units'] = _cfg['rnn_units']
262 kwargs['embedding_units'] = _cfg['embedding_units']
263 kwargs['attention_units'] = _cfg['attention_units']
264 kwargs['max_length'] = _cfg['max_length']
265 kwargs['num_decoders'] = _cfg['num_decoders']
266
267 # Build the model
268 model = SAR(feat_extractor, cfg=_cfg, **kwargs)
269 # Load pretrained parameters
270 if pretrained:
271 load_pretrained_params(model, default_cfgs[arch]['url'])
272
273 return model
274
275
276 def sar_vgg16_bn(pretrained: bool = False, **kwargs: Any) -> SAR:
277 """SAR with a VGG16 feature extractor as described in `"Show, Attend and Read:A Simple and Strong
278 Baseline for Irregular Text Recognition" <https://arxiv.org/pdf/1811.00751.pdf>`_.
279
280 Example::
281 >>> import tensorflow as tf
282 >>> from doctr.models import sar_vgg16_bn
283 >>> model = sar_vgg16_bn(pretrained=False)
284 >>> input_tensor = tf.random.uniform(shape=[1, 64, 256, 3], maxval=1, dtype=tf.float32)
285 >>> out = model(input_tensor)
286
287 Args:
288 pretrained (bool): If True, returns a model pre-trained on ImageNet
289
290 Returns:
291 text recognition architecture
292 """
293
294 return _sar_vgg('sar_vgg16_bn', pretrained, **kwargs)
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doctr/models/recognition/sar.py b/doctr/models/recognition/sar.py
--- a/doctr/models/recognition/sar.py
+++ b/doctr/models/recognition/sar.py
@@ -89,7 +89,6 @@
attention_units: number of hidden attention units
num_decoder_layers: number of LSTM layers to stack
-
"""
def __init__(
self,
@@ -98,7 +97,7 @@
vocab_size: int,
embedding_units: int,
attention_units: int,
- num_decoder_layers: int = 2
+ num_decoder_layers: int = 2,
) -> None:
super().__init__()
@@ -115,6 +114,7 @@
self,
features: tf.Tensor,
holistic: tf.Tensor,
+ labels: Optional[tf.sparse.SparseTensor] = None,
**kwargs: Any,
) -> tf.Tensor:
@@ -128,7 +128,7 @@
# Initialize with the index of virtual START symbol (placed after <eos>)
symbol = tf.fill(features.shape[0], self.vocab_size + 1)
logits_list = []
- for _ in range(self.max_length + 1): # keep 1 step for <eos>
+ for t in range(self.max_length + 1): # keep 1 step for <eos>
# one-hot symbol with depth vocab_size + 1
# embeded_symbol: shape (N, embedding_units)
embeded_symbol = self.embed(tf.one_hot(symbol, depth=self.vocab_size + 1), **kwargs)
@@ -141,7 +141,13 @@
# shape (N, rnn_units + 1) -> (N, vocab_size + 1)
logits = self.output_dense(logits, **kwargs)
# update symbol with predicted logits for t+1 step
- symbol = tf.argmax(logits, axis=-1)
+ if kwargs.get('training'):
+ dense_labels = tf.sparse.to_dense(
+ labels, default_value=self.vocab_size
+ )
+ symbol = dense_labels[:, t]
+ else:
+ symbol = tf.argmax(logits, axis=-1)
logits_list.append(logits)
outputs = tf.stack(logits_list, axis=1) # shape (N, max_length + 1, vocab_size + 1)
@@ -196,13 +202,19 @@
def call(
self,
x: tf.Tensor,
+ labels: Optional[tf.sparse.SparseTensor] = None,
**kwargs: Any,
) -> tf.Tensor:
features = self.feat_extractor(x, **kwargs)
pooled_features = tf.reduce_max(features, axis=1) # vertical max pooling
encoded = self.encoder(pooled_features, **kwargs)
- decoded = self.decoder(features, encoded, **kwargs)
+ if kwargs.get('training'):
+ if labels is None:
+ raise ValueError('Need to provide labels during training for teacher forcing')
+ decoded = self.decoder(features, encoded, labels, **kwargs)
+ else:
+ decoded = self.decoder(features, encoded, **kwargs)
return decoded
| {"golden_diff": "diff --git a/doctr/models/recognition/sar.py b/doctr/models/recognition/sar.py\n--- a/doctr/models/recognition/sar.py\n+++ b/doctr/models/recognition/sar.py\n@@ -89,7 +89,6 @@\n attention_units: number of hidden attention units\n num_decoder_layers: number of LSTM layers to stack\n \n-\n \"\"\"\n def __init__(\n self,\n@@ -98,7 +97,7 @@\n vocab_size: int,\n embedding_units: int,\n attention_units: int,\n- num_decoder_layers: int = 2\n+ num_decoder_layers: int = 2,\n ) -> None:\n \n super().__init__()\n@@ -115,6 +114,7 @@\n self,\n features: tf.Tensor,\n holistic: tf.Tensor,\n+ labels: Optional[tf.sparse.SparseTensor] = None,\n **kwargs: Any,\n ) -> tf.Tensor:\n \n@@ -128,7 +128,7 @@\n # Initialize with the index of virtual START symbol (placed after <eos>)\n symbol = tf.fill(features.shape[0], self.vocab_size + 1)\n logits_list = []\n- for _ in range(self.max_length + 1): # keep 1 step for <eos>\n+ for t in range(self.max_length + 1): # keep 1 step for <eos>\n # one-hot symbol with depth vocab_size + 1\n # embeded_symbol: shape (N, embedding_units)\n embeded_symbol = self.embed(tf.one_hot(symbol, depth=self.vocab_size + 1), **kwargs)\n@@ -141,7 +141,13 @@\n # shape (N, rnn_units + 1) -> (N, vocab_size + 1)\n logits = self.output_dense(logits, **kwargs)\n # update symbol with predicted logits for t+1 step\n- symbol = tf.argmax(logits, axis=-1)\n+ if kwargs.get('training'):\n+ dense_labels = tf.sparse.to_dense(\n+ labels, default_value=self.vocab_size\n+ )\n+ symbol = dense_labels[:, t]\n+ else:\n+ symbol = tf.argmax(logits, axis=-1)\n logits_list.append(logits)\n outputs = tf.stack(logits_list, axis=1) # shape (N, max_length + 1, vocab_size + 1)\n \n@@ -196,13 +202,19 @@\n def call(\n self,\n x: tf.Tensor,\n+ labels: Optional[tf.sparse.SparseTensor] = None,\n **kwargs: Any,\n ) -> tf.Tensor:\n \n features = self.feat_extractor(x, **kwargs)\n pooled_features = tf.reduce_max(features, axis=1) # vertical max pooling\n encoded = self.encoder(pooled_features, **kwargs)\n- decoded = self.decoder(features, encoded, **kwargs)\n+ if kwargs.get('training'):\n+ if labels is None:\n+ raise ValueError('Need to provide labels during training for teacher forcing')\n+ decoded = self.decoder(features, encoded, labels, **kwargs)\n+ else:\n+ decoded = self.decoder(features, encoded, **kwargs)\n \n return decoded\n", "issue": "[Models] Add SAR teacher for\u00e7ing during training (decoder)\nTeacher for\u00e7ing (feeding the LSTM decoder of SAR with ground-truth characters during training) is not implemented, and it should really improve performances\n", "before_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom copy import deepcopy\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential, layers\nfrom typing import Tuple, Dict, List, Any, Optional\n\nfrom .. import vgg\nfrom ..utils import load_pretrained_params\nfrom .core import RecognitionModel\nfrom .core import RecognitionPostProcessor\nfrom doctr.utils.repr import NestedObject\n\n__all__ = ['SAR', 'SARPostProcessor', 'sar_vgg16_bn']\n\ndefault_cfgs: Dict[str, Dict[str, Any]] = {\n 'sar_vgg16_bn': {\n 'backbone': 'vgg16_bn', 'rnn_units': 512, 'max_length': 40, 'num_decoders': 2,\n 'input_shape': (64, 256, 3),\n 'post_processor': 'SARPostProcessor',\n 'vocab': ('3K}7e\u00e9;5\u00e0\u00ceYho]QwV6qU~W\"XnbBvcADf\u00cbmy.9\u00d4p\u00db*{C\u00f4\u00efE%M4#\u00c8R:g@T$x?0\u00ee\u00a3|za1\u00f98,OG\u20acP-'\n 'k\u00e7H\u00eb\u00c0\u00c22\u00c9/\u00fbIJ\\'j(LN\u00d9Fut[)\u00e8Zs+&\u00b0Sd=\u00cf!<\u00e2_\u00c7>r\u00eai`l'),\n 'url': None,\n },\n}\n\n\nclass AttentionModule(layers.Layer, NestedObject):\n \"\"\"Implements attention module of the SAR model\n\n Args:\n attention_units: number of hidden attention units\n\n \"\"\"\n def __init__(\n self,\n attention_units: int\n ) -> None:\n\n super().__init__()\n self.hidden_state_projector = layers.Conv2D(\n attention_units, 1, strides=1, use_bias=False, padding='same', kernel_initializer='he_normal',\n )\n self.features_projector = layers.Conv2D(\n attention_units, 3, strides=1, use_bias=True, padding='same', kernel_initializer='he_normal',\n )\n self.attention_projector = layers.Conv2D(\n 1, 1, strides=1, use_bias=False, padding=\"same\", kernel_initializer='he_normal',\n )\n self.flatten = layers.Flatten()\n\n def call(\n self,\n features: tf.Tensor,\n hidden_state: tf.Tensor,\n **kwargs: Any,\n ) -> tf.Tensor:\n\n [H, W] = features.get_shape().as_list()[1:3]\n # shape (N, 1, 1, rnn_units) -> (N, 1, 1, attention_units)\n hidden_state_projection = self.hidden_state_projector(hidden_state, **kwargs)\n # shape (N, H, W, vgg_units) -> (N, H, W, attention_units)\n features_projection = self.features_projector(features, **kwargs)\n projection = tf.math.tanh(hidden_state_projection + features_projection)\n # shape (N, H, W, attention_units) -> (N, H, W, 1)\n attention = self.attention_projector(projection, **kwargs)\n # shape (N, H, W, 1) -> (N, H * W)\n attention = self.flatten(attention)\n attention = tf.nn.softmax(attention)\n # shape (N, H * W) -> (N, H, W, 1)\n attention_map = tf.reshape(attention, [-1, H, W, 1])\n glimpse = tf.math.multiply(features, attention_map)\n # shape (N, H * W) -> (N, 1)\n glimpse = tf.reduce_sum(glimpse, axis=[1, 2])\n return glimpse\n\n\nclass SARDecoder(layers.Layer, NestedObject):\n \"\"\"Implements decoder module of the SAR model\n\n Args:\n rnn_units: number of hidden units in recurrent cells\n max_length: maximum length of a sequence\n vocab_size: number of classes in the model alphabet\n embedding_units: number of hidden embedding units\n attention_units: number of hidden attention units\n num_decoder_layers: number of LSTM layers to stack\n\n\n \"\"\"\n def __init__(\n self,\n rnn_units: int,\n max_length: int,\n vocab_size: int,\n embedding_units: int,\n attention_units: int,\n num_decoder_layers: int = 2\n ) -> None:\n\n super().__init__()\n self.vocab_size = vocab_size\n self.embed = layers.Dense(embedding_units, use_bias=False)\n self.attention_module = AttentionModule(attention_units)\n self.output_dense = layers.Dense(vocab_size + 1, use_bias=True)\n self.max_length = max_length\n self.lstm_decoder = layers.StackedRNNCells(\n [layers.LSTMCell(rnn_units, dtype=tf.float32, implementation=1) for _ in range(num_decoder_layers)]\n )\n\n def call(\n self,\n features: tf.Tensor,\n holistic: tf.Tensor,\n **kwargs: Any,\n ) -> tf.Tensor:\n\n # initialize states (each of shape (N, rnn_units))\n states = self.lstm_decoder.get_initial_state(\n inputs=None, batch_size=features.shape[0], dtype=tf.float32\n )\n # run first step of lstm\n # holistic: shape (N, rnn_units)\n _, states = self.lstm_decoder(holistic, states, **kwargs)\n # Initialize with the index of virtual START symbol (placed after <eos>)\n symbol = tf.fill(features.shape[0], self.vocab_size + 1)\n logits_list = []\n for _ in range(self.max_length + 1): # keep 1 step for <eos>\n # one-hot symbol with depth vocab_size + 1\n # embeded_symbol: shape (N, embedding_units)\n embeded_symbol = self.embed(tf.one_hot(symbol, depth=self.vocab_size + 1), **kwargs)\n logits, states = self.lstm_decoder(embeded_symbol, states, **kwargs)\n glimpse = self.attention_module(\n features, tf.expand_dims(tf.expand_dims(logits, axis=1), axis=1), **kwargs,\n )\n # logits: shape (N, rnn_units), glimpse: shape (N, 1)\n logits = tf.concat([logits, glimpse], axis=-1)\n # shape (N, rnn_units + 1) -> (N, vocab_size + 1)\n logits = self.output_dense(logits, **kwargs)\n # update symbol with predicted logits for t+1 step\n symbol = tf.argmax(logits, axis=-1)\n logits_list.append(logits)\n outputs = tf.stack(logits_list, axis=1) # shape (N, max_length + 1, vocab_size + 1)\n\n return outputs\n\n\nclass SAR(RecognitionModel):\n \"\"\"Implements a SAR architecture as described in `\"Show, Attend and Read:A Simple and Strong Baseline for\n Irregular Text Recognition\" <https://arxiv.org/pdf/1811.00751.pdf>`_.\n\n Args:\n feature_extractor: the backbone serving as feature extractor\n vocab_size: size of the alphabet\n rnn_units: number of hidden units in both encoder and decoder LSTM\n embedding_units: number of embedding units\n attention_units: number of hidden units in attention module\n max_length: maximum word length handled by the model\n num_decoders: number of LSTM to stack in decoder layer\n\n \"\"\"\n\n _children_names: List[str] = ['feat_extractor', 'encoder', 'decoder']\n\n def __init__(\n self,\n feature_extractor,\n vocab_size: int = 110,\n rnn_units: int = 512,\n embedding_units: int = 512,\n attention_units: int = 512,\n max_length: int = 30,\n num_decoders: int = 2,\n cfg: Optional[Dict[str, Any]] = None,\n ) -> None:\n\n super().__init__(cfg=cfg)\n\n self.feat_extractor = feature_extractor\n\n self.encoder = Sequential(\n [\n layers.LSTM(units=rnn_units, return_sequences=True),\n layers.LSTM(units=rnn_units, return_sequences=False)\n ]\n )\n\n self.decoder = SARDecoder(\n rnn_units, max_length, vocab_size, embedding_units, attention_units, num_decoders,\n\n )\n\n def call(\n self,\n x: tf.Tensor,\n **kwargs: Any,\n ) -> tf.Tensor:\n\n features = self.feat_extractor(x, **kwargs)\n pooled_features = tf.reduce_max(features, axis=1) # vertical max pooling\n encoded = self.encoder(pooled_features, **kwargs)\n decoded = self.decoder(features, encoded, **kwargs)\n\n return decoded\n\n\nclass SARPostProcessor(RecognitionPostProcessor):\n \"\"\"Post processor for SAR architectures\n\n Args:\n vocab: string containing the ordered sequence of supported characters\n ignore_case: if True, ignore case of letters\n ignore_accents: if True, ignore accents of letters\n \"\"\"\n\n def __call__(\n self,\n logits: tf.Tensor,\n ) -> List[str]:\n # compute pred with argmax for attention models\n pred = tf.math.argmax(logits, axis=2)\n\n # decode raw output of the model with tf_label_to_idx\n pred = tf.cast(pred, dtype='int32')\n decoded_strings_pred = tf.strings.reduce_join(inputs=tf.nn.embedding_lookup(self._embedding, pred), axis=-1)\n decoded_strings_pred = tf.strings.split(decoded_strings_pred, \"<eos>\")\n decoded_strings_pred = tf.sparse.to_dense(decoded_strings_pred.to_sparse(), default_value='not valid')[:, 0]\n words_list = [word.decode() for word in list(decoded_strings_pred.numpy())]\n\n if self.ignore_case:\n words_list = [word.lower() for word in words_list]\n\n if self.ignore_accents:\n raise NotImplementedError\n\n return words_list\n\n\ndef _sar_vgg(arch: str, pretrained: bool, input_shape: Tuple[int, int, int] = None, **kwargs: Any) -> SAR:\n\n # Patch the config\n _cfg = deepcopy(default_cfgs[arch])\n _cfg['input_shape'] = input_shape or _cfg['input_shape']\n _cfg['vocab_size'] = kwargs.get('vocab_size', len(_cfg['vocab']))\n _cfg['rnn_units'] = kwargs.get('rnn_units', _cfg['rnn_units'])\n _cfg['embedding_units'] = kwargs.get('embedding_units', _cfg['rnn_units'])\n _cfg['attention_units'] = kwargs.get('attention_units', _cfg['rnn_units'])\n _cfg['max_length'] = kwargs.get('max_length', _cfg['max_length'])\n _cfg['num_decoders'] = kwargs.get('num_decoders', _cfg['num_decoders'])\n\n # Feature extractor\n feat_extractor = vgg.__dict__[default_cfgs[arch]['backbone']](\n input_shape=_cfg['input_shape'],\n include_top=False,\n )\n\n kwargs['vocab_size'] = _cfg['vocab_size']\n kwargs['rnn_units'] = _cfg['rnn_units']\n kwargs['embedding_units'] = _cfg['embedding_units']\n kwargs['attention_units'] = _cfg['attention_units']\n kwargs['max_length'] = _cfg['max_length']\n kwargs['num_decoders'] = _cfg['num_decoders']\n\n # Build the model\n model = SAR(feat_extractor, cfg=_cfg, **kwargs)\n # Load pretrained parameters\n if pretrained:\n load_pretrained_params(model, default_cfgs[arch]['url'])\n\n return model\n\n\ndef sar_vgg16_bn(pretrained: bool = False, **kwargs: Any) -> SAR:\n \"\"\"SAR with a VGG16 feature extractor as described in `\"Show, Attend and Read:A Simple and Strong\n Baseline for Irregular Text Recognition\" <https://arxiv.org/pdf/1811.00751.pdf>`_.\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import sar_vgg16_bn\n >>> model = sar_vgg16_bn(pretrained=False)\n >>> input_tensor = tf.random.uniform(shape=[1, 64, 256, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n\n Returns:\n text recognition architecture\n \"\"\"\n\n return _sar_vgg('sar_vgg16_bn', pretrained, **kwargs)\n", "path": "doctr/models/recognition/sar.py"}], "after_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom copy import deepcopy\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential, layers\nfrom typing import Tuple, Dict, List, Any, Optional\n\nfrom .. import vgg\nfrom ..utils import load_pretrained_params\nfrom .core import RecognitionModel\nfrom .core import RecognitionPostProcessor\nfrom doctr.utils.repr import NestedObject\n\n__all__ = ['SAR', 'SARPostProcessor', 'sar_vgg16_bn']\n\ndefault_cfgs: Dict[str, Dict[str, Any]] = {\n 'sar_vgg16_bn': {\n 'backbone': 'vgg16_bn', 'rnn_units': 512, 'max_length': 40, 'num_decoders': 2,\n 'input_shape': (64, 256, 3),\n 'post_processor': 'SARPostProcessor',\n 'vocab': ('3K}7e\u00e9;5\u00e0\u00ceYho]QwV6qU~W\"XnbBvcADf\u00cbmy.9\u00d4p\u00db*{C\u00f4\u00efE%M4#\u00c8R:g@T$x?0\u00ee\u00a3|za1\u00f98,OG\u20acP-'\n 'k\u00e7H\u00eb\u00c0\u00c22\u00c9/\u00fbIJ\\'j(LN\u00d9Fut[)\u00e8Zs+&\u00b0Sd=\u00cf!<\u00e2_\u00c7>r\u00eai`l'),\n 'url': None,\n },\n}\n\n\nclass AttentionModule(layers.Layer, NestedObject):\n \"\"\"Implements attention module of the SAR model\n\n Args:\n attention_units: number of hidden attention units\n\n \"\"\"\n def __init__(\n self,\n attention_units: int\n ) -> None:\n\n super().__init__()\n self.hidden_state_projector = layers.Conv2D(\n attention_units, 1, strides=1, use_bias=False, padding='same', kernel_initializer='he_normal',\n )\n self.features_projector = layers.Conv2D(\n attention_units, 3, strides=1, use_bias=True, padding='same', kernel_initializer='he_normal',\n )\n self.attention_projector = layers.Conv2D(\n 1, 1, strides=1, use_bias=False, padding=\"same\", kernel_initializer='he_normal',\n )\n self.flatten = layers.Flatten()\n\n def call(\n self,\n features: tf.Tensor,\n hidden_state: tf.Tensor,\n **kwargs: Any,\n ) -> tf.Tensor:\n\n [H, W] = features.get_shape().as_list()[1:3]\n # shape (N, 1, 1, rnn_units) -> (N, 1, 1, attention_units)\n hidden_state_projection = self.hidden_state_projector(hidden_state, **kwargs)\n # shape (N, H, W, vgg_units) -> (N, H, W, attention_units)\n features_projection = self.features_projector(features, **kwargs)\n projection = tf.math.tanh(hidden_state_projection + features_projection)\n # shape (N, H, W, attention_units) -> (N, H, W, 1)\n attention = self.attention_projector(projection, **kwargs)\n # shape (N, H, W, 1) -> (N, H * W)\n attention = self.flatten(attention)\n attention = tf.nn.softmax(attention)\n # shape (N, H * W) -> (N, H, W, 1)\n attention_map = tf.reshape(attention, [-1, H, W, 1])\n glimpse = tf.math.multiply(features, attention_map)\n # shape (N, H * W) -> (N, 1)\n glimpse = tf.reduce_sum(glimpse, axis=[1, 2])\n return glimpse\n\n\nclass SARDecoder(layers.Layer, NestedObject):\n \"\"\"Implements decoder module of the SAR model\n\n Args:\n rnn_units: number of hidden units in recurrent cells\n max_length: maximum length of a sequence\n vocab_size: number of classes in the model alphabet\n embedding_units: number of hidden embedding units\n attention_units: number of hidden attention units\n num_decoder_layers: number of LSTM layers to stack\n\n \"\"\"\n def __init__(\n self,\n rnn_units: int,\n max_length: int,\n vocab_size: int,\n embedding_units: int,\n attention_units: int,\n num_decoder_layers: int = 2,\n ) -> None:\n\n super().__init__()\n self.vocab_size = vocab_size\n self.embed = layers.Dense(embedding_units, use_bias=False)\n self.attention_module = AttentionModule(attention_units)\n self.output_dense = layers.Dense(vocab_size + 1, use_bias=True)\n self.max_length = max_length\n self.lstm_decoder = layers.StackedRNNCells(\n [layers.LSTMCell(rnn_units, dtype=tf.float32, implementation=1) for _ in range(num_decoder_layers)]\n )\n\n def call(\n self,\n features: tf.Tensor,\n holistic: tf.Tensor,\n labels: Optional[tf.sparse.SparseTensor] = None,\n **kwargs: Any,\n ) -> tf.Tensor:\n\n # initialize states (each of shape (N, rnn_units))\n states = self.lstm_decoder.get_initial_state(\n inputs=None, batch_size=features.shape[0], dtype=tf.float32\n )\n # run first step of lstm\n # holistic: shape (N, rnn_units)\n _, states = self.lstm_decoder(holistic, states, **kwargs)\n # Initialize with the index of virtual START symbol (placed after <eos>)\n symbol = tf.fill(features.shape[0], self.vocab_size + 1)\n logits_list = []\n for t in range(self.max_length + 1): # keep 1 step for <eos>\n # one-hot symbol with depth vocab_size + 1\n # embeded_symbol: shape (N, embedding_units)\n embeded_symbol = self.embed(tf.one_hot(symbol, depth=self.vocab_size + 1), **kwargs)\n logits, states = self.lstm_decoder(embeded_symbol, states, **kwargs)\n glimpse = self.attention_module(\n features, tf.expand_dims(tf.expand_dims(logits, axis=1), axis=1), **kwargs,\n )\n # logits: shape (N, rnn_units), glimpse: shape (N, 1)\n logits = tf.concat([logits, glimpse], axis=-1)\n # shape (N, rnn_units + 1) -> (N, vocab_size + 1)\n logits = self.output_dense(logits, **kwargs)\n # update symbol with predicted logits for t+1 step\n if kwargs.get('training'):\n dense_labels = tf.sparse.to_dense(\n labels, default_value=self.vocab_size\n )\n symbol = dense_labels[:, t]\n else:\n symbol = tf.argmax(logits, axis=-1)\n logits_list.append(logits)\n outputs = tf.stack(logits_list, axis=1) # shape (N, max_length + 1, vocab_size + 1)\n\n return outputs\n\n\nclass SAR(RecognitionModel):\n \"\"\"Implements a SAR architecture as described in `\"Show, Attend and Read:A Simple and Strong Baseline for\n Irregular Text Recognition\" <https://arxiv.org/pdf/1811.00751.pdf>`_.\n\n Args:\n feature_extractor: the backbone serving as feature extractor\n vocab_size: size of the alphabet\n rnn_units: number of hidden units in both encoder and decoder LSTM\n embedding_units: number of embedding units\n attention_units: number of hidden units in attention module\n max_length: maximum word length handled by the model\n num_decoders: number of LSTM to stack in decoder layer\n\n \"\"\"\n\n _children_names: List[str] = ['feat_extractor', 'encoder', 'decoder']\n\n def __init__(\n self,\n feature_extractor,\n vocab_size: int = 110,\n rnn_units: int = 512,\n embedding_units: int = 512,\n attention_units: int = 512,\n max_length: int = 30,\n num_decoders: int = 2,\n cfg: Optional[Dict[str, Any]] = None,\n ) -> None:\n\n super().__init__(cfg=cfg)\n\n self.feat_extractor = feature_extractor\n\n self.encoder = Sequential(\n [\n layers.LSTM(units=rnn_units, return_sequences=True),\n layers.LSTM(units=rnn_units, return_sequences=False)\n ]\n )\n\n self.decoder = SARDecoder(\n rnn_units, max_length, vocab_size, embedding_units, attention_units, num_decoders,\n\n )\n\n def call(\n self,\n x: tf.Tensor,\n labels: Optional[tf.sparse.SparseTensor] = None,\n **kwargs: Any,\n ) -> tf.Tensor:\n\n features = self.feat_extractor(x, **kwargs)\n pooled_features = tf.reduce_max(features, axis=1) # vertical max pooling\n encoded = self.encoder(pooled_features, **kwargs)\n if kwargs.get('training'):\n if labels is None:\n raise ValueError('Need to provide labels during training for teacher forcing')\n decoded = self.decoder(features, encoded, labels, **kwargs)\n else:\n decoded = self.decoder(features, encoded, **kwargs)\n\n return decoded\n\n\nclass SARPostProcessor(RecognitionPostProcessor):\n \"\"\"Post processor for SAR architectures\n\n Args:\n vocab: string containing the ordered sequence of supported characters\n ignore_case: if True, ignore case of letters\n ignore_accents: if True, ignore accents of letters\n \"\"\"\n\n def __call__(\n self,\n logits: tf.Tensor,\n ) -> List[str]:\n # compute pred with argmax for attention models\n pred = tf.math.argmax(logits, axis=2)\n\n # decode raw output of the model with tf_label_to_idx\n pred = tf.cast(pred, dtype='int32')\n decoded_strings_pred = tf.strings.reduce_join(inputs=tf.nn.embedding_lookup(self._embedding, pred), axis=-1)\n decoded_strings_pred = tf.strings.split(decoded_strings_pred, \"<eos>\")\n decoded_strings_pred = tf.sparse.to_dense(decoded_strings_pred.to_sparse(), default_value='not valid')[:, 0]\n words_list = [word.decode() for word in list(decoded_strings_pred.numpy())]\n\n if self.ignore_case:\n words_list = [word.lower() for word in words_list]\n\n if self.ignore_accents:\n raise NotImplementedError\n\n return words_list\n\n\ndef _sar_vgg(arch: str, pretrained: bool, input_shape: Tuple[int, int, int] = None, **kwargs: Any) -> SAR:\n\n # Patch the config\n _cfg = deepcopy(default_cfgs[arch])\n _cfg['input_shape'] = input_shape or _cfg['input_shape']\n _cfg['vocab_size'] = kwargs.get('vocab_size', len(_cfg['vocab']))\n _cfg['rnn_units'] = kwargs.get('rnn_units', _cfg['rnn_units'])\n _cfg['embedding_units'] = kwargs.get('embedding_units', _cfg['rnn_units'])\n _cfg['attention_units'] = kwargs.get('attention_units', _cfg['rnn_units'])\n _cfg['max_length'] = kwargs.get('max_length', _cfg['max_length'])\n _cfg['num_decoders'] = kwargs.get('num_decoders', _cfg['num_decoders'])\n\n # Feature extractor\n feat_extractor = vgg.__dict__[default_cfgs[arch]['backbone']](\n input_shape=_cfg['input_shape'],\n include_top=False,\n )\n\n kwargs['vocab_size'] = _cfg['vocab_size']\n kwargs['rnn_units'] = _cfg['rnn_units']\n kwargs['embedding_units'] = _cfg['embedding_units']\n kwargs['attention_units'] = _cfg['attention_units']\n kwargs['max_length'] = _cfg['max_length']\n kwargs['num_decoders'] = _cfg['num_decoders']\n\n # Build the model\n model = SAR(feat_extractor, cfg=_cfg, **kwargs)\n # Load pretrained parameters\n if pretrained:\n load_pretrained_params(model, default_cfgs[arch]['url'])\n\n return model\n\n\ndef sar_vgg16_bn(pretrained: bool = False, **kwargs: Any) -> SAR:\n \"\"\"SAR with a VGG16 feature extractor as described in `\"Show, Attend and Read:A Simple and Strong\n Baseline for Irregular Text Recognition\" <https://arxiv.org/pdf/1811.00751.pdf>`_.\n\n Example::\n >>> import tensorflow as tf\n >>> from doctr.models import sar_vgg16_bn\n >>> model = sar_vgg16_bn(pretrained=False)\n >>> input_tensor = tf.random.uniform(shape=[1, 64, 256, 3], maxval=1, dtype=tf.float32)\n >>> out = model(input_tensor)\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n\n Returns:\n text recognition architecture\n \"\"\"\n\n return _sar_vgg('sar_vgg16_bn', pretrained, **kwargs)\n", "path": "doctr/models/recognition/sar.py"}]} | 3,895 | 702 |
gh_patches_debug_3475 | rasdani/github-patches | git_diff | ckan__ckan-7033 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IConfigurer plugin load order
**CKAN version**
(all)
**Describe the bug**
`update_config` runs through all IConfigurer plugins from first to last calling `plugin.update_config`. The pattern for other interfaces is that the "first plugin wins", but this is difficult to implement when later plugins override values from earlier ones in the list.
**Steps to reproduce**
Enable two plugins that set the same config value using IConfigurer
**Expected behavior**
First plugin value should win, like with other interfaces.
**Additional details**
ckanext-envvars recommends adding `envvars` last in the list of plugins, which makes sense but if other plugins depend on/override values configured in envvars (e.g. ckanext-scheming) they won't be available at `update_config` time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/config/environment.py`
Content:
```
1 # encoding: utf-8
2 '''CKAN environment configuration'''
3 from __future__ import annotations
4
5 import os
6 import logging
7 import warnings
8 import pytz
9
10 from typing import Union, cast
11
12 import sqlalchemy
13
14 import ckan.model as model
15 import ckan.plugins as p
16 import ckan.lib.plugins as lib_plugins
17 import ckan.lib.helpers as helpers
18 import ckan.lib.app_globals as app_globals
19 from ckan.lib.redis import is_redis_available
20 import ckan.lib.search as search
21 import ckan.logic as logic
22 import ckan.authz as authz
23 from ckan.lib.webassets_tools import webassets_init
24 from ckan.lib.i18n import build_js_translations
25
26 from ckan.common import CKANConfig, config, config_declaration
27 from ckan.exceptions import CkanConfigurationException
28 from ckan.types import Config
29
30 log = logging.getLogger(__name__)
31
32 # Suppress benign warning 'Unbuilt egg for setuptools'
33 warnings.simplefilter('ignore', UserWarning)
34
35
36 def load_environment(conf: Union[Config, CKANConfig]):
37 """
38 Configure the Pylons environment via the ``pylons.config`` object. This
39 code should only need to be run once.
40 """
41 os.environ['CKAN_CONFIG'] = cast(str, conf['__file__'])
42
43 valid_base_public_folder_names = ['public', 'public-bs3']
44 static_files = conf.get('ckan.base_public_folder', 'public')
45 conf['ckan.base_public_folder'] = static_files
46
47 if static_files not in valid_base_public_folder_names:
48 raise CkanConfigurationException(
49 'You provided an invalid value for ckan.base_public_folder. '
50 'Possible values are: "public" and "public-bs3".'
51 )
52
53 log.info('Loading static files from %s' % static_files)
54
55 # Initialize main CKAN config object
56 config.update(conf)
57
58 # Setup the SQLAlchemy database engine
59 # Suppress a couple of sqlalchemy warnings
60 msgs = ['^Unicode type received non-unicode bind param value',
61 "^Did not recognize type 'BIGINT' of column 'size'",
62 "^Did not recognize type 'tsvector' of column 'search_vector'"
63 ]
64 for msg in msgs:
65 warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)
66
67 # load all CKAN plugins
68 p.load_all()
69
70 # Check Redis availability
71 if not is_redis_available():
72 log.critical('Could not connect to Redis.')
73
74 app_globals.reset()
75
76 # Build JavaScript translations. Must be done after plugins have
77 # been loaded.
78 build_js_translations()
79
80
81 # A mapping of config settings that can be overridden by env vars.
82 # Note: Do not remove the following lines, they are used in the docs
83 # Start CONFIG_FROM_ENV_VARS
84 CONFIG_FROM_ENV_VARS: dict[str, str] = {
85 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',
86 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',
87 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',
88 'ckan.redis.url': 'CKAN_REDIS_URL',
89 'solr_url': 'CKAN_SOLR_URL',
90 'solr_user': 'CKAN_SOLR_USER',
91 'solr_password': 'CKAN_SOLR_PASSWORD',
92 'ckan.site_id': 'CKAN_SITE_ID',
93 'ckan.site_url': 'CKAN_SITE_URL',
94 'ckan.storage_path': 'CKAN_STORAGE_PATH',
95 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',
96 'smtp.server': 'CKAN_SMTP_SERVER',
97 'smtp.starttls': 'CKAN_SMTP_STARTTLS',
98 'smtp.user': 'CKAN_SMTP_USER',
99 'smtp.password': 'CKAN_SMTP_PASSWORD',
100 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',
101 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'
102 }
103 # End CONFIG_FROM_ENV_VARS
104
105
106 def update_config() -> None:
107 ''' This code needs to be run when the config is changed to take those
108 changes into account. It is called whenever a plugin is loaded as the
109 plugin might have changed the config values (for instance it might
110 change ckan.site_url) '''
111
112 config_declaration.setup()
113 config_declaration.make_safe(config)
114 config_declaration.normalize(config)
115
116 webassets_init()
117
118 for plugin in p.PluginImplementations(p.IConfigurer):
119 # must do update in place as this does not work:
120 # config = plugin.update_config(config)
121 plugin.update_config(config)
122
123 for option in CONFIG_FROM_ENV_VARS:
124 from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)
125 if from_env:
126 config[option] = from_env
127
128 if config.get_value("config.mode") == "strict":
129 _, errors = config_declaration.validate(config)
130 if errors:
131 msg = "\n".join(
132 "{}: {}".format(key, "; ".join(issues))
133 for key, issues in errors.items()
134 )
135 raise CkanConfigurationException(msg)
136
137 root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
138
139 site_url = config.get_value('ckan.site_url')
140 if not site_url:
141 raise RuntimeError(
142 'ckan.site_url is not configured and it must have a value.'
143 ' Please amend your .ini file.')
144 if not site_url.lower().startswith('http'):
145 raise RuntimeError(
146 'ckan.site_url should be a full URL, including the schema '
147 '(http or https)')
148 # Remove backslash from site_url if present
149 config['ckan.site_url'] = site_url.rstrip('/')
150
151 display_timezone = config.get_value('ckan.display_timezone')
152 if (display_timezone and
153 display_timezone != 'server' and
154 display_timezone not in pytz.all_timezones):
155 raise CkanConfigurationException(
156 "ckan.display_timezone is not 'server' or a valid timezone"
157 )
158
159 # Init SOLR settings and check if the schema is compatible
160 # from ckan.lib.search import SolrSettings, check_solr_schema_version
161
162 # lib.search is imported here as we need the config enabled and parsed
163 search.SolrSettings.init(config.get_value('solr_url'),
164 config.get_value('solr_user'),
165 config.get_value('solr_password'))
166 search.check_solr_schema_version()
167
168 lib_plugins.reset_package_plugins()
169 lib_plugins.register_package_plugins()
170 lib_plugins.reset_group_plugins()
171 lib_plugins.register_group_plugins()
172
173 # initialise the globals
174 app_globals.app_globals._init()
175
176 helpers.load_plugin_helpers()
177
178 # Templates and CSS loading from configuration
179 valid_base_templates_folder_names = ['templates', 'templates-bs3']
180 templates = config.get('ckan.base_templates_folder', 'templates')
181 config['ckan.base_templates_folder'] = templates
182
183 if templates not in valid_base_templates_folder_names:
184 raise CkanConfigurationException(
185 'You provided an invalid value for ckan.base_templates_folder. '
186 'Possible values are: "templates" and "templates-bs3".'
187 )
188
189 jinja2_templates_path = os.path.join(root, templates)
190 log.info('Loading templates from %s' % jinja2_templates_path)
191 template_paths = [jinja2_templates_path]
192
193 extra_template_paths = config.get_value('extra_template_paths')
194 if extra_template_paths:
195 # must be first for them to override defaults
196 template_paths = extra_template_paths.split(',') + template_paths
197 config['computed_template_paths'] = template_paths
198
199 # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)
200 # to eliminate database errors due to stale pooled connections
201 config.setdefault('sqlalchemy.pool_pre_ping', True)
202 # Initialize SQLAlchemy
203 engine = sqlalchemy.engine_from_config(config)
204 model.init_model(engine)
205
206 for plugin in p.PluginImplementations(p.IConfigurable):
207 plugin.configure(config)
208
209 # clear other caches
210 logic.clear_actions_cache()
211 logic.clear_validators_cache()
212 authz.clear_auth_functions_cache()
213
214 # Here we create the site user if they are not already in the database
215 try:
216 logic.get_action('get_site_user')({'ignore_auth': True}, {})
217 except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
218 # The database is not yet initialised. It happens in `ckan db init`
219 pass
220 except sqlalchemy.exc.IntegrityError:
221 # Race condition, user already exists.
222 pass
223
224 # Close current session and open database connections to ensure a clean
225 # clean environment even if an error occurs later on
226 model.Session.remove()
227 model.Session.bind.dispose()
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/config/environment.py b/ckan/config/environment.py
--- a/ckan/config/environment.py
+++ b/ckan/config/environment.py
@@ -115,7 +115,7 @@
webassets_init()
- for plugin in p.PluginImplementations(p.IConfigurer):
+ for plugin in reversed(list(p.PluginImplementations(p.IConfigurer))):
# must do update in place as this does not work:
# config = plugin.update_config(config)
plugin.update_config(config)
| {"golden_diff": "diff --git a/ckan/config/environment.py b/ckan/config/environment.py\n--- a/ckan/config/environment.py\n+++ b/ckan/config/environment.py\n@@ -115,7 +115,7 @@\n \n webassets_init()\n \n- for plugin in p.PluginImplementations(p.IConfigurer):\n+ for plugin in reversed(list(p.PluginImplementations(p.IConfigurer))):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n", "issue": "IConfigurer plugin load order\n**CKAN version**\r\n(all)\r\n\r\n**Describe the bug**\r\n`update_config` runs through all IConfigurer plugins from first to last calling `plugin.update_config`. The pattern for other interfaces is that the \"first plugin wins\", but this is difficult to implement when later plugins override values from earlier ones in the list.\r\n\r\n**Steps to reproduce**\r\nEnable two plugins that set the same config value using IConfigurer\r\n\r\n**Expected behavior**\r\nFirst plugin value should win, like with other interfaces.\r\n\r\n**Additional details**\r\nckanext-envvars recommends adding `envvars` last in the list of plugins, which makes sense but if other plugins depend on/override values configured in envvars (e.g. ckanext-scheming) they won't be available at `update_config` time.\n", "before_files": [{"content": "# encoding: utf-8\n'''CKAN environment configuration'''\nfrom __future__ import annotations\n\nimport os\nimport logging\nimport warnings\nimport pytz\n\nfrom typing import Union, cast\n\nimport sqlalchemy\n\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.plugins as lib_plugins\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nfrom ckan.lib.webassets_tools import webassets_init\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import CKANConfig, config, config_declaration\nfrom ckan.exceptions import CkanConfigurationException\nfrom ckan.types import Config\n\nlog = logging.getLogger(__name__)\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(conf: Union[Config, CKANConfig]):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n os.environ['CKAN_CONFIG'] = cast(str, conf['__file__'])\n\n valid_base_public_folder_names = ['public', 'public-bs3']\n static_files = conf.get('ckan.base_public_folder', 'public')\n conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\" and \"public-bs3\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n\n # Initialize main CKAN config object\n config.update(conf)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS: dict[str, str] = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'solr_user': 'CKAN_SOLR_USER',\n 'solr_password': 'CKAN_SOLR_PASSWORD',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',\n 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config() -> None:\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n config_declaration.setup()\n config_declaration.make_safe(config)\n config_declaration.normalize(config)\n\n webassets_init()\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n if config.get_value(\"config.mode\") == \"strict\":\n _, errors = config_declaration.validate(config)\n if errors:\n msg = \"\\n\".join(\n \"{}: {}\".format(key, \"; \".join(issues))\n for key, issues in errors.items()\n )\n raise CkanConfigurationException(msg)\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get_value('ckan.site_url')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n # Remove backslash from site_url if present\n config['ckan.site_url'] = site_url.rstrip('/')\n\n display_timezone = config.get_value('ckan.display_timezone')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get_value('solr_url'),\n config.get_value('solr_user'),\n config.get_value('solr_password'))\n search.check_solr_schema_version()\n\n lib_plugins.reset_package_plugins()\n lib_plugins.register_package_plugins()\n lib_plugins.reset_group_plugins()\n lib_plugins.register_group_plugins()\n\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates', 'templates-bs3']\n templates = config.get('ckan.base_templates_folder', 'templates')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\" and \"templates-bs3\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get_value('extra_template_paths')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)\n # to eliminate database errors due to stale pooled connections\n config.setdefault('sqlalchemy.pool_pre_ping', True)\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config)\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, {})\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # The database is not yet initialised. It happens in `ckan db init`\n pass\n except sqlalchemy.exc.IntegrityError:\n # Race condition, user already exists.\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}], "after_files": [{"content": "# encoding: utf-8\n'''CKAN environment configuration'''\nfrom __future__ import annotations\n\nimport os\nimport logging\nimport warnings\nimport pytz\n\nfrom typing import Union, cast\n\nimport sqlalchemy\n\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.plugins as lib_plugins\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nfrom ckan.lib.webassets_tools import webassets_init\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import CKANConfig, config, config_declaration\nfrom ckan.exceptions import CkanConfigurationException\nfrom ckan.types import Config\n\nlog = logging.getLogger(__name__)\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(conf: Union[Config, CKANConfig]):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n os.environ['CKAN_CONFIG'] = cast(str, conf['__file__'])\n\n valid_base_public_folder_names = ['public', 'public-bs3']\n static_files = conf.get('ckan.base_public_folder', 'public')\n conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\" and \"public-bs3\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n\n # Initialize main CKAN config object\n config.update(conf)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS: dict[str, str] = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'solr_user': 'CKAN_SOLR_USER',\n 'solr_password': 'CKAN_SOLR_PASSWORD',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',\n 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config() -> None:\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n config_declaration.setup()\n config_declaration.make_safe(config)\n config_declaration.normalize(config)\n\n webassets_init()\n\n for plugin in reversed(list(p.PluginImplementations(p.IConfigurer))):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n if config.get_value(\"config.mode\") == \"strict\":\n _, errors = config_declaration.validate(config)\n if errors:\n msg = \"\\n\".join(\n \"{}: {}\".format(key, \"; \".join(issues))\n for key, issues in errors.items()\n )\n raise CkanConfigurationException(msg)\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get_value('ckan.site_url')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n # Remove backslash from site_url if present\n config['ckan.site_url'] = site_url.rstrip('/')\n\n display_timezone = config.get_value('ckan.display_timezone')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get_value('solr_url'),\n config.get_value('solr_user'),\n config.get_value('solr_password'))\n search.check_solr_schema_version()\n\n lib_plugins.reset_package_plugins()\n lib_plugins.register_package_plugins()\n lib_plugins.reset_group_plugins()\n lib_plugins.register_group_plugins()\n\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates', 'templates-bs3']\n templates = config.get('ckan.base_templates_folder', 'templates')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\" and \"templates-bs3\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get_value('extra_template_paths')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)\n # to eliminate database errors due to stale pooled connections\n config.setdefault('sqlalchemy.pool_pre_ping', True)\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config)\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, {})\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # The database is not yet initialised. It happens in `ckan db init`\n pass\n except sqlalchemy.exc.IntegrityError:\n # Race condition, user already exists.\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}]} | 2,865 | 113 |
gh_patches_debug_12067 | rasdani/github-patches | git_diff | sktime__sktime-1453 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] sktime.datatypes._panel._examples raises pandas.core.common.SettingWithCopyError
**Describe the bug**
Attempting to install [tsai](https://pypi.org/project/tsai/) as an upstream package also installs this package, but the install raises an error that traces to line 67 in "/opt/conda/lib/python3.8/site-packages/sktime/datatypes/_panel/_examples.py"
```
X.iloc[0][0] = pd.Series([1, 2, 3])
```
**To Reproduce**
Importing any code that executes the code starting at line 67 of /sktime/datatypes/_panel/_examples.py can raise a Pandas error, depending on Pandas version that may be installed
**Expected behavior**
No error should be raised on install or import of sktime as a dependency.
**Additional context**
<!--
Add any other context about the problem here.
-->
**Versions**
System:
python: 3.9.1 (default, Sep 16 2021, 11:42:30) [Clang 12.0.5 (clang-1205.0.22.11)]
executable: /.../.pyenv/versions/3.9.1/bin/python
machine: macOS-11.6-x86_64-i386-64bit
Python dependencies:
pip: 21.2.4
setuptools: 49.2.1
sklearn: 1.0
sktime: 0.8.0
statsmodels: 0.12.2
numpy: 1.20.3
scipy: 1.7.1
Cython: None
pandas: 1.3.3
matplotlib: 3.4.3
joblib: 1.0.1
numba: 0.53.1
pmdarima: None
tsfresh: 0.18.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/datatypes/_panel/_examples.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Example generation for testing.
3
4 Exports dict of examples, useful for testing as fixtures.
5
6 example_dict: dict indexed by triple
7 1st element = mtype - str
8 2nd element = considered as this scitype - str
9 3rd element = int - index of example
10 elements are data objects, considered examples for the mtype
11 all examples with same index are considered "same" on scitype content
12 if None, indicates that representation is not possible
13
14 example_lossy: dict of bool indexed by pairs of str
15 1st element = mtype - str
16 2nd element = considered as this scitype - str
17 3rd element = int - index of example
18 elements are bool, indicate whether representation has information removed
19 all examples with same index are considered "same" on scitype content
20
21 overall, conversions from non-lossy representations to any other ones
22 should yield the element exactly, identidally (given same index)
23 """
24
25 import pandas as pd
26 import numpy as np
27
28 example_dict = dict()
29 example_dict_lossy = dict()
30
31 ###
32
33
34 X = np.array(
35 [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]],
36 dtype=np.int64,
37 )
38
39 example_dict[("numpy3D", "Panel", 0)] = X
40 example_dict_lossy[("numpy3D", "Panel", 0)] = False
41
42 cols = [f"var_{i}" for i in range(2)]
43 Xlist = [
44 pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols),
45 pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols),
46 pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols),
47 ]
48
49 example_dict[("df-list", "Panel", 0)] = Xlist
50 example_dict_lossy[("df-list", "Panel", 0)] = False
51
52 cols = ["instances", "timepoints"] + [f"var_{i}" for i in range(2)]
53
54 Xlist = [
55 pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols),
56 pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols),
57 pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols),
58 ]
59 X = pd.concat(Xlist)
60 X = X.set_index(["instances", "timepoints"])
61
62 example_dict[("pd-multiindex", "Panel", 0)] = X
63 example_dict_lossy[("pd-multiindex", "Panel", 0)] = False
64
65 cols = [f"var_{i}" for i in range(2)]
66 X = pd.DataFrame(columns=cols, index=[0, 1, 2])
67 X.iloc[0][0] = pd.Series([1, 2, 3])
68 X.iloc[0][1] = pd.Series([4, 5, 6])
69 X.iloc[1][0] = pd.Series([1, 2, 3])
70 X.iloc[1][1] = pd.Series([4, 55, 6])
71 X.iloc[2][0] = pd.Series([1, 2, 3])
72 X.iloc[2][1] = pd.Series([42, 5, 6])
73
74 example_dict[("nested_univ", "Panel", 0)] = X
75 example_dict_lossy[("nested_univ", "Panel", 0)] = False
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/datatypes/_panel/_examples.py b/sktime/datatypes/_panel/_examples.py
--- a/sktime/datatypes/_panel/_examples.py
+++ b/sktime/datatypes/_panel/_examples.py
@@ -64,12 +64,13 @@
cols = [f"var_{i}" for i in range(2)]
X = pd.DataFrame(columns=cols, index=[0, 1, 2])
-X.iloc[0][0] = pd.Series([1, 2, 3])
-X.iloc[0][1] = pd.Series([4, 5, 6])
-X.iloc[1][0] = pd.Series([1, 2, 3])
-X.iloc[1][1] = pd.Series([4, 55, 6])
-X.iloc[2][0] = pd.Series([1, 2, 3])
-X.iloc[2][1] = pd.Series([42, 5, 6])
+X["var_0"] = pd.Series(
+ [pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])]
+)
+
+X["var_1"] = pd.Series(
+ [pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])]
+)
example_dict[("nested_univ", "Panel", 0)] = X
example_dict_lossy[("nested_univ", "Panel", 0)] = False
| {"golden_diff": "diff --git a/sktime/datatypes/_panel/_examples.py b/sktime/datatypes/_panel/_examples.py\n--- a/sktime/datatypes/_panel/_examples.py\n+++ b/sktime/datatypes/_panel/_examples.py\n@@ -64,12 +64,13 @@\n \n cols = [f\"var_{i}\" for i in range(2)]\n X = pd.DataFrame(columns=cols, index=[0, 1, 2])\n-X.iloc[0][0] = pd.Series([1, 2, 3])\n-X.iloc[0][1] = pd.Series([4, 5, 6])\n-X.iloc[1][0] = pd.Series([1, 2, 3])\n-X.iloc[1][1] = pd.Series([4, 55, 6])\n-X.iloc[2][0] = pd.Series([1, 2, 3])\n-X.iloc[2][1] = pd.Series([42, 5, 6])\n+X[\"var_0\"] = pd.Series(\n+ [pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])]\n+)\n+\n+X[\"var_1\"] = pd.Series(\n+ [pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])]\n+)\n \n example_dict[(\"nested_univ\", \"Panel\", 0)] = X\n example_dict_lossy[(\"nested_univ\", \"Panel\", 0)] = False\n", "issue": "[BUG] sktime.datatypes._panel._examples raises pandas.core.common.SettingWithCopyError\n**Describe the bug**\r\nAttempting to install [tsai](https://pypi.org/project/tsai/) as an upstream package also installs this package, but the install raises an error that traces to line 67 in \"/opt/conda/lib/python3.8/site-packages/sktime/datatypes/_panel/_examples.py\"\r\n```\r\n X.iloc[0][0] = pd.Series([1, 2, 3])\r\n```\r\n\r\n**To Reproduce**\r\nImporting any code that executes the code starting at line 67 of /sktime/datatypes/_panel/_examples.py can raise a Pandas error, depending on Pandas version that may be installed\r\n\r\n**Expected behavior**\r\nNo error should be raised on install or import of sktime as a dependency. \r\n\r\n**Additional context**\r\n<!--\r\nAdd any other context about the problem here.\r\n-->\r\n\r\n**Versions**\r\nSystem:\r\n python: 3.9.1 (default, Sep 16 2021, 11:42:30) [Clang 12.0.5 (clang-1205.0.22.11)]\r\nexecutable: /.../.pyenv/versions/3.9.1/bin/python\r\n machine: macOS-11.6-x86_64-i386-64bit\r\n\r\nPython dependencies:\r\n pip: 21.2.4\r\n setuptools: 49.2.1\r\n sklearn: 1.0\r\n sktime: 0.8.0\r\n statsmodels: 0.12.2\r\n numpy: 1.20.3\r\n scipy: 1.7.1\r\n Cython: None\r\n pandas: 1.3.3\r\n matplotlib: 3.4.3\r\n joblib: 1.0.1\r\n numba: 0.53.1\r\n pmdarima: None\r\n tsfresh: 0.18.0\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Example generation for testing.\n\nExports dict of examples, useful for testing as fixtures.\n\nexample_dict: dict indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are data objects, considered examples for the mtype\n all examples with same index are considered \"same\" on scitype content\n if None, indicates that representation is not possible\n\nexample_lossy: dict of bool indexed by pairs of str\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are bool, indicate whether representation has information removed\n all examples with same index are considered \"same\" on scitype content\n\noverall, conversions from non-lossy representations to any other ones\n should yield the element exactly, identidally (given same index)\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nexample_dict = dict()\nexample_dict_lossy = dict()\n\n###\n\n\nX = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]],\n dtype=np.int64,\n)\n\nexample_dict[(\"numpy3D\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"numpy3D\", \"Panel\", 0)] = False\n\ncols = [f\"var_{i}\" for i in range(2)]\nXlist = [\n pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols),\n pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols),\n pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols),\n]\n\nexample_dict[(\"df-list\", \"Panel\", 0)] = Xlist\nexample_dict_lossy[(\"df-list\", \"Panel\", 0)] = False\n\ncols = [\"instances\", \"timepoints\"] + [f\"var_{i}\" for i in range(2)]\n\nXlist = [\n pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols),\n pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols),\n pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols),\n]\nX = pd.concat(Xlist)\nX = X.set_index([\"instances\", \"timepoints\"])\n\nexample_dict[(\"pd-multiindex\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"pd-multiindex\", \"Panel\", 0)] = False\n\ncols = [f\"var_{i}\" for i in range(2)]\nX = pd.DataFrame(columns=cols, index=[0, 1, 2])\nX.iloc[0][0] = pd.Series([1, 2, 3])\nX.iloc[0][1] = pd.Series([4, 5, 6])\nX.iloc[1][0] = pd.Series([1, 2, 3])\nX.iloc[1][1] = pd.Series([4, 55, 6])\nX.iloc[2][0] = pd.Series([1, 2, 3])\nX.iloc[2][1] = pd.Series([42, 5, 6])\n\nexample_dict[(\"nested_univ\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"nested_univ\", \"Panel\", 0)] = False\n", "path": "sktime/datatypes/_panel/_examples.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Example generation for testing.\n\nExports dict of examples, useful for testing as fixtures.\n\nexample_dict: dict indexed by triple\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are data objects, considered examples for the mtype\n all examples with same index are considered \"same\" on scitype content\n if None, indicates that representation is not possible\n\nexample_lossy: dict of bool indexed by pairs of str\n 1st element = mtype - str\n 2nd element = considered as this scitype - str\n 3rd element = int - index of example\nelements are bool, indicate whether representation has information removed\n all examples with same index are considered \"same\" on scitype content\n\noverall, conversions from non-lossy representations to any other ones\n should yield the element exactly, identidally (given same index)\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nexample_dict = dict()\nexample_dict_lossy = dict()\n\n###\n\n\nX = np.array(\n [[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]],\n dtype=np.int64,\n)\n\nexample_dict[(\"numpy3D\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"numpy3D\", \"Panel\", 0)] = False\n\ncols = [f\"var_{i}\" for i in range(2)]\nXlist = [\n pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols),\n pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols),\n pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols),\n]\n\nexample_dict[(\"df-list\", \"Panel\", 0)] = Xlist\nexample_dict_lossy[(\"df-list\", \"Panel\", 0)] = False\n\ncols = [\"instances\", \"timepoints\"] + [f\"var_{i}\" for i in range(2)]\n\nXlist = [\n pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols),\n pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols),\n pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols),\n]\nX = pd.concat(Xlist)\nX = X.set_index([\"instances\", \"timepoints\"])\n\nexample_dict[(\"pd-multiindex\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"pd-multiindex\", \"Panel\", 0)] = False\n\ncols = [f\"var_{i}\" for i in range(2)]\nX = pd.DataFrame(columns=cols, index=[0, 1, 2])\nX[\"var_0\"] = pd.Series(\n [pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])]\n)\n\nX[\"var_1\"] = pd.Series(\n [pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])]\n)\n\nexample_dict[(\"nested_univ\", \"Panel\", 0)] = X\nexample_dict_lossy[(\"nested_univ\", \"Panel\", 0)] = False\n", "path": "sktime/datatypes/_panel/_examples.py"}]} | 1,755 | 355 |
gh_patches_debug_319 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-730 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
better identification of Solaar versions
`git describe` produces
0.9.2-339-g39791be
Instead it should produce something based on 1.0.1
`git describe --tags` produces
1.0.1-58-g39791be
which is much better.
I think that all that is required is to upgrade the 1.0.1 tag that already exists.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/solaar/__init__.py`
Content:
```
1 # -*- python-mode -*-
2 # -*- coding: UTF-8 -*-
3
4 ## Copyright (C) 2012-2013 Daniel Pavel
5 ##
6 ## This program is free software; you can redistribute it and/or modify
7 ## it under the terms of the GNU General Public License as published by
8 ## the Free Software Foundation; either version 2 of the License, or
9 ## (at your option) any later version.
10 ##
11 ## This program is distributed in the hope that it will be useful,
12 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ## GNU General Public License for more details.
15 ##
16 ## You should have received a copy of the GNU General Public License along
17 ## with this program; if not, write to the Free Software Foundation, Inc.,
18 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19
20 from __future__ import absolute_import, division, print_function, unicode_literals
21
22 __version__ = '1.0.1'
23 NAME = 'Solaar'
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/solaar/__init__.py b/lib/solaar/__init__.py
--- a/lib/solaar/__init__.py
+++ b/lib/solaar/__init__.py
@@ -19,5 +19,5 @@
from __future__ import absolute_import, division, print_function, unicode_literals
-__version__ = '1.0.1'
+__version__ = '1.0.2-rc1'
NAME = 'Solaar'
| {"golden_diff": "diff --git a/lib/solaar/__init__.py b/lib/solaar/__init__.py\n--- a/lib/solaar/__init__.py\n+++ b/lib/solaar/__init__.py\n@@ -19,5 +19,5 @@\n \n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-__version__ = '1.0.1'\n+__version__ = '1.0.2-rc1'\n NAME = 'Solaar'\n", "issue": "better identification of Solaar versions\n`git describe` produces\r\n0.9.2-339-g39791be\r\nInstead it should produce something based on 1.0.1\r\n`git describe --tags` produces\r\n1.0.1-58-g39791be\r\nwhich is much better.\r\n\r\nI think that all that is required is to upgrade the 1.0.1 tag that already exists.\n", "before_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__version__ = '1.0.1'\nNAME = 'Solaar'\n", "path": "lib/solaar/__init__.py"}], "after_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__version__ = '1.0.2-rc1'\nNAME = 'Solaar'\n", "path": "lib/solaar/__init__.py"}]} | 632 | 107 |
gh_patches_debug_9857 | rasdani/github-patches | git_diff | saulpw__visidata-2160 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[parquet] can't load parquet directory anymore: `IsADirectoryError`
**Small description**
Hi @saulpw @anjakefala @takacsd - it seems that forcing opening the path as file with `.open()` - introduced with #2133 - breaks the use case where the multiple parquet files are stored in a directory, and this directory is then read by visidata. This is common with Hive partitioning or when working with spark. A simple fix would be to check if the path is a directory with `os.path.is_dir()` and then retaining old behavior of passing it as a string to `read_table()`. If it is not an existing directory, we move to the new way of opening as a binary buffer.
I have already added this workaround to my clone of visidata, and it fixes my issue, but maybe you have some better ideas how to handle it instead of `if-else` statement in the `ParquetSheet`.
**Expected result**
```bash
vd -f parquet parquet_dir
```
should load a parquet into visidata
**Actual result with screenshot**

**Additional context**
```bash
# freshest develop
visidata@9fd728b72c115e50e99c24b455caaf020381b48e
pyarrow==12.0.0
python 3.10.2
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/loaders/parquet.py`
Content:
```
1 from visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd
2 from collections import defaultdict
3
4
5 @VisiData.api
6 def open_parquet(vd, p):
7 return ParquetSheet(p.name, source=p)
8
9
10 class ParquetColumn(Column):
11 def calcValue(self, row):
12 val = self.source[row["__rownum__"]]
13 if val.type == 'large_string':
14 return memoryview(val.as_buffer())[:2**20].tobytes().decode('utf-8')
15 else:
16 return val.as_py()
17
18
19 class ParquetSheet(Sheet):
20 # rowdef: {'__rownum__':int, parquet_col:overridden_value, ...}
21 def iterload(self):
22 pa = vd.importExternal("pyarrow", "pyarrow")
23 pq = vd.importExternal("pyarrow.parquet", "pyarrow")
24 from visidata.loaders.arrow import arrow_to_vdtype
25
26 with self.source.open('rb') as f:
27 self.tbl = pq.read_table(f)
28
29 self.columns = []
30 for colname, col in zip(self.tbl.column_names, self.tbl.columns):
31 c = ParquetColumn(colname,
32 type=arrow_to_vdtype(col.type),
33 source=col,
34 cache=(col.type.id == pa.lib.Type_LARGE_STRING))
35 self.addColumn(c)
36
37 for i in range(self.tbl.num_rows):
38 yield dict(__rownum__=i)
39
40
41 @VisiData.api
42 def save_parquet(vd, p, sheet):
43 pa = vd.importExternal("pyarrow")
44 pq = vd.importExternal("pyarrow.parquet", "pyarrow")
45
46 typemap = {
47 anytype: pa.string(),
48 int: pa.int64(),
49 vlen: pa.int64(),
50 float: pa.float64(),
51 str: pa.string(),
52 date: pa.date64(),
53 # list: pa.array(),
54 }
55
56 for t in vd.numericTypes:
57 if t not in typemap:
58 typemap[t] = pa.float64()
59
60 databycol = defaultdict(list) # col -> [values]
61
62 for typedvals in sheet.iterdispvals(format=False):
63 for col, val in typedvals.items():
64 if isinstance(val, TypedWrapper):
65 val = None
66
67 databycol[col].append(val)
68
69 data = [
70 pa.array(vals, type=typemap.get(col.type, pa.string()))
71 for col, vals in databycol.items()
72 ]
73
74 schema = pa.schema(
75 [(c.name, typemap.get(c.type, pa.string())) for c in sheet.visibleCols]
76 )
77 with p.open_bytes(mode="w") as outf:
78 with pq.ParquetWriter(outf, schema) as writer:
79 writer.write_batch(
80 pa.record_batch(data, names=[c.name for c in sheet.visibleCols])
81 )
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/loaders/parquet.py b/visidata/loaders/parquet.py
--- a/visidata/loaders/parquet.py
+++ b/visidata/loaders/parquet.py
@@ -23,8 +23,11 @@
pq = vd.importExternal("pyarrow.parquet", "pyarrow")
from visidata.loaders.arrow import arrow_to_vdtype
- with self.source.open('rb') as f:
- self.tbl = pq.read_table(f)
+ if self.source.is_dir():
+ self.tbl = pq.read_table(str(self.source))
+ else:
+ with self.source.open('rb') as f:
+ self.tbl = pq.read_table(f)
self.columns = []
for colname, col in zip(self.tbl.column_names, self.tbl.columns):
| {"golden_diff": "diff --git a/visidata/loaders/parquet.py b/visidata/loaders/parquet.py\n--- a/visidata/loaders/parquet.py\n+++ b/visidata/loaders/parquet.py\n@@ -23,8 +23,11 @@\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n from visidata.loaders.arrow import arrow_to_vdtype\n \n- with self.source.open('rb') as f:\n- self.tbl = pq.read_table(f)\n+ if self.source.is_dir():\n+ self.tbl = pq.read_table(str(self.source))\n+ else: \n+ with self.source.open('rb') as f:\n+ self.tbl = pq.read_table(f)\n \n self.columns = []\n for colname, col in zip(self.tbl.column_names, self.tbl.columns):\n", "issue": "[parquet] can't load parquet directory anymore: `IsADirectoryError`\n**Small description**\r\n\r\nHi @saulpw @anjakefala @takacsd - it seems that forcing opening the path as file with `.open()` - introduced with #2133 - breaks the use case where the multiple parquet files are stored in a directory, and this directory is then read by visidata. This is common with Hive partitioning or when working with spark. A simple fix would be to check if the path is a directory with `os.path.is_dir()` and then retaining old behavior of passing it as a string to `read_table()`. If it is not an existing directory, we move to the new way of opening as a binary buffer.\r\n\r\nI have already added this workaround to my clone of visidata, and it fixes my issue, but maybe you have some better ideas how to handle it instead of `if-else` statement in the `ParquetSheet`.\r\n\r\n**Expected result**\r\n\r\n```bash\r\nvd -f parquet parquet_dir\r\n```\r\nshould load a parquet into visidata\r\n\r\n**Actual result with screenshot**\r\n\r\n\r\n**Additional context**\r\n\r\n```bash\r\n# freshest develop\r\nvisidata@9fd728b72c115e50e99c24b455caaf020381b48e\r\n\r\npyarrow==12.0.0\r\npython 3.10.2\r\n```\r\n\n", "before_files": [{"content": "from visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd\nfrom collections import defaultdict\n\n\[email protected]\ndef open_parquet(vd, p):\n return ParquetSheet(p.name, source=p)\n\n\nclass ParquetColumn(Column):\n def calcValue(self, row):\n val = self.source[row[\"__rownum__\"]]\n if val.type == 'large_string':\n return memoryview(val.as_buffer())[:2**20].tobytes().decode('utf-8')\n else:\n return val.as_py()\n\n\nclass ParquetSheet(Sheet):\n # rowdef: {'__rownum__':int, parquet_col:overridden_value, ...}\n def iterload(self):\n pa = vd.importExternal(\"pyarrow\", \"pyarrow\")\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n from visidata.loaders.arrow import arrow_to_vdtype\n\n with self.source.open('rb') as f:\n self.tbl = pq.read_table(f)\n\n self.columns = []\n for colname, col in zip(self.tbl.column_names, self.tbl.columns):\n c = ParquetColumn(colname,\n type=arrow_to_vdtype(col.type),\n source=col,\n cache=(col.type.id == pa.lib.Type_LARGE_STRING))\n self.addColumn(c)\n\n for i in range(self.tbl.num_rows):\n yield dict(__rownum__=i)\n\n\[email protected]\ndef save_parquet(vd, p, sheet):\n pa = vd.importExternal(\"pyarrow\")\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n\n typemap = {\n anytype: pa.string(),\n int: pa.int64(),\n vlen: pa.int64(),\n float: pa.float64(),\n str: pa.string(),\n date: pa.date64(),\n # list: pa.array(),\n }\n\n for t in vd.numericTypes:\n if t not in typemap:\n typemap[t] = pa.float64()\n\n databycol = defaultdict(list) # col -> [values]\n\n for typedvals in sheet.iterdispvals(format=False):\n for col, val in typedvals.items():\n if isinstance(val, TypedWrapper):\n val = None\n\n databycol[col].append(val)\n\n data = [\n pa.array(vals, type=typemap.get(col.type, pa.string()))\n for col, vals in databycol.items()\n ]\n\n schema = pa.schema(\n [(c.name, typemap.get(c.type, pa.string())) for c in sheet.visibleCols]\n )\n with p.open_bytes(mode=\"w\") as outf:\n with pq.ParquetWriter(outf, schema) as writer:\n writer.write_batch(\n pa.record_batch(data, names=[c.name for c in sheet.visibleCols])\n )\n", "path": "visidata/loaders/parquet.py"}], "after_files": [{"content": "from visidata import Sheet, VisiData, TypedWrapper, anytype, date, vlen, Column, vd\nfrom collections import defaultdict\n\n\[email protected]\ndef open_parquet(vd, p):\n return ParquetSheet(p.name, source=p)\n\n\nclass ParquetColumn(Column):\n def calcValue(self, row):\n val = self.source[row[\"__rownum__\"]]\n if val.type == 'large_string':\n return memoryview(val.as_buffer())[:2**20].tobytes().decode('utf-8')\n else:\n return val.as_py()\n\n\nclass ParquetSheet(Sheet):\n # rowdef: {'__rownum__':int, parquet_col:overridden_value, ...}\n def iterload(self):\n pa = vd.importExternal(\"pyarrow\", \"pyarrow\")\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n from visidata.loaders.arrow import arrow_to_vdtype\n\n if self.source.is_dir():\n self.tbl = pq.read_table(str(self.source))\n else: \n with self.source.open('rb') as f:\n self.tbl = pq.read_table(f)\n\n self.columns = []\n for colname, col in zip(self.tbl.column_names, self.tbl.columns):\n c = ParquetColumn(colname,\n type=arrow_to_vdtype(col.type),\n source=col,\n cache=(col.type.id == pa.lib.Type_LARGE_STRING))\n self.addColumn(c)\n\n for i in range(self.tbl.num_rows):\n yield dict(__rownum__=i)\n\n\[email protected]\ndef save_parquet(vd, p, sheet):\n pa = vd.importExternal(\"pyarrow\")\n pq = vd.importExternal(\"pyarrow.parquet\", \"pyarrow\")\n\n typemap = {\n anytype: pa.string(),\n int: pa.int64(),\n vlen: pa.int64(),\n float: pa.float64(),\n str: pa.string(),\n date: pa.date64(),\n # list: pa.array(),\n }\n\n for t in vd.numericTypes:\n if t not in typemap:\n typemap[t] = pa.float64()\n\n databycol = defaultdict(list) # col -> [values]\n\n for typedvals in sheet.iterdispvals(format=False):\n for col, val in typedvals.items():\n if isinstance(val, TypedWrapper):\n val = None\n\n databycol[col].append(val)\n\n data = [\n pa.array(vals, type=typemap.get(col.type, pa.string()))\n for col, vals in databycol.items()\n ]\n\n schema = pa.schema(\n [(c.name, typemap.get(c.type, pa.string())) for c in sheet.visibleCols]\n )\n with p.open_bytes(mode=\"w\") as outf:\n with pq.ParquetWriter(outf, schema) as writer:\n writer.write_batch(\n pa.record_batch(data, names=[c.name for c in sheet.visibleCols])\n )\n", "path": "visidata/loaders/parquet.py"}]} | 1,415 | 182 |
gh_patches_debug_33730 | rasdani/github-patches | git_diff | sonic-net__sonic-mgmt-1253 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docker shell module failed in the latest sonic-mgmt
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/Azure/SONiC/wiki#report-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support can be found at the following locations:
- SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
The task with sell type docker always fails on the latest sonic-mgmt, example:
```
- name: Gather information from lldp
lldp:
vars:
ansible_shell_type: docker
ansible_python_interpreter: docker exec -i lldp python
```
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
**Steps to reproduce the issue:**
1. run dip_sip or lag_2 CT
2.
3.
**Describe the results you received:**
```
TASK [test : Gathering peer VM information from lldp] **************************
task path: /var/user/jenkins/bfn-sonic-mgmt/ansible/roles/test/tasks/lag_2.yml:26
Thursday 28 November 2019 10:28:47 +0000 (0:00:00.263) 0:00:26.753 *****
The full traceback is:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/ansible/executor/task_executor.py", line 145, in run
res = self._execute()
File "/usr/local/lib/python2.7/dist-packages/ansible/executor/task_executor.py", line 612, in _execute
self._set_connection_options(variables, templar)
File "/usr/local/lib/python2.7/dist-packages/ansible/executor/task_executor.py", line 1012, in _set_connection_options
self._set_plugin_options('shell', final_vars, templar, task_keys)
File "/usr/local/lib/python2.7/dist-packages/ansible/executor/task_executor.py", line 978, in _set_plugin_options
plugin.set_options(task_keys=task_keys, var_options=options)
File "/usr/local/lib/python2.7/dist-packages/ansible/plugins/shell/__init__.py", line 70, in set_options
env = self.get_option('environment')
File "/usr/local/lib/python2.7/dist-packages/ansible/plugins/__init__.py", line 60, in get_option
raise KeyError(to_native(e))
KeyError: 'Requested entry (plugin_type: shell plugin: docker setting: environment ) was not defined in configuration.'
fatal: [cab18-2-dut]: FAILED! => {
"msg": "Unexpected failure during module execution.",
"stdout": ""
}
```
**Describe the results you expected:**
**Additional information you deem important:**
<!--
software/ASIC/Hardware Flatform version and info
-->
**Output of `show version`:**
```
SONiC Software Version: SONiC.HEAD.488-dirty-20191127.082217
Distribution: Debian 9.11
Kernel: 4.9.0-9-2-amd64
Build commit: 7622a30d
Build date: Wed Nov 27 11:15:51 UTC 2019
Built by: johnar@jenkins-worker-11
```
**Attach debug file `sudo generate_dump`:**
```
(paste your output here)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ansible/shell_plugins/docker.py`
Content:
```
1 from __future__ import (absolute_import, division)
2 __metaclass__ = type
3
4 import os
5 import re
6 import pipes
7 import ansible.constants as C
8 import time
9 import random
10 import shlex
11 import getopt
12 from ansible.module_utils.six import text_type
13 from ansible.plugins.shell.sh import ShellModule as sh
14 from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
15
16 class ShellModule(sh):
17
18 def __init__(self, *args, **kwargs):
19 super(ShellModule, self).__init__(*args, **kwargs)
20 self.dtemps = []
21
22 def join_path(self, *args):
23 ## HACK! HACK! HACK!
24 ## We observe the interactions between ShellModule and ActionModule, and
25 ## find the temporary directories Ansible created on remote machine. So we
26 ## collect them and copied to docker container in build_module_command
27 if len(args) >= 2 and (args[0].startswith('/home/') or args[0].startswith('/root/')) and args[1] == '':
28 self.dtemps.append(args[0])
29
30 return super(ShellModule, self).join_path(*args)
31
32 def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
33 # assert(self.container_name)
34 argv = shlex.split(shebang.replace("#!", ""))
35 assert(argv[0] == 'docker')
36 assert(argv[1] == 'exec')
37 opts, args = getopt.getopt(argv[2:], 'i')
38 self.container_name = args[0]
39
40 # Inject environment variable before python in the shebang string
41 assert(args[1].endswith('python'))
42 args[1] = 'env {0} {1}'.format(env_string, args[1])
43 argv_env = argv[0:2] + [o for opt in opts for o in opt] + args
44 shebang_env = ' '.join(argv_env)
45
46 ## Note: Docker cp behavior
47 ## DEST_PATH exists and is a directory
48 ## SRC_PATH does end with /.
49 ## the content of the source directory is copied into this directory
50 ## Ref: https://docs.docker.com/engine/reference/commandline/cp/
51 pre = ''.join('docker exec {1} mkdir -p {0}; docker cp {0}/. {1}:{0}; '
52 .format(dtemp, self.container_name) for dtemp in self.dtemps)
53
54 if rm_tmp:
55 post = ''.join('docker exec {1} rm -rf {0}; '
56 .format(dtemp, self.container_name) for dtemp in self.dtemps)
57 else:
58 post = ''
59
60 return pre + super(ShellModule, self).build_module_command('', shebang_env, cmd, arg_path, rm_tmp) + '; ' + post
61
62 def checksum(self, path, python_interp):
63 """
64 Return the command to calculate the checksum for the file in ansible controlled machine
65 Arguments:
66 path:
67 the file path
68 python_interp:
69 the path for the python interpreter
70 Example:
71 path:
72 /zebra.conf
73 python_interp:
74 docker exec -i debian python
75 cmd:
76 rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x"$rc" != "xflag" ] && echo "${rc} "/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf)
77 returns:
78 docker exec -i debian sh -c "rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x\"\$rc\" != \"xflag\" ] && echo \"\${rc} \"/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf)"
79 """
80 ## Super class implements this function by sh commands and python scripts
81 ## If python_interp is modified to 'docker CONTAINER python', it will only influence the python
82 ## script part in super class. Instead we should influence both
83 simple_interp = 'python'
84 assert(python_interp.startswith('docker exec '))
85 assert(python_interp.endswith(' ' + simple_interp))
86
87 docker_prefix = re.sub(simple_interp, '', python_interp)
88 cmd = super(ShellModule, self).checksum(path, simple_interp)
89 ## Escape the cmd:
90 ## " --> \"
91 cmd_escaped = cmd.replace('"', '\\"')
92 ## $ --> \$
93 cmd_escaped = cmd_escaped.replace('$', '\\$')
94 return '%s sh -c "%s"' % (docker_prefix, cmd_escaped)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ansible/shell_plugins/docker.py b/ansible/shell_plugins/docker.py
--- a/ansible/shell_plugins/docker.py
+++ b/ansible/shell_plugins/docker.py
@@ -1,6 +1,17 @@
from __future__ import (absolute_import, division)
__metaclass__ = type
+DOCUMENTATION = '''
+name: docker
+plugin_type: shell
+short_description: "docker shell plugin"
+version_added: historical
+description:
+ - This module allows you to execute commands directly in docker on the remote host
+extends_documentation_fragment:
+ - shell_common
+'''
+
import os
import re
import pipes
@@ -29,7 +40,24 @@
return super(ShellModule, self).join_path(*args)
- def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
+ def remove(self, path, recurse=False):
+ argv = self.get_option('ansible_python_interpreter').split()
+ assert(argv[0] == 'docker')
+ assert(argv[1] == 'exec')
+ opts, args = getopt.getopt(argv[2:], 'i')
+ self.container_name = args[0]
+
+ remove_files_on_host_cmd = super(ShellModule, self).remove(path, recurse)
+
+ cmd = remove_files_on_host_cmd + "; docker exec -i "
+ cmd += self.container_name + " rm -f "
+ if recurse:
+ cmd += '-r '
+ cmd += " ".join(self.dtemps)
+
+ return cmd
+
+ def build_module_command(self, env_string, shebang, cmd, arg_path=None):
# assert(self.container_name)
argv = shlex.split(shebang.replace("#!", ""))
assert(argv[0] == 'docker')
@@ -51,13 +79,7 @@
pre = ''.join('docker exec {1} mkdir -p {0}; docker cp {0}/. {1}:{0}; '
.format(dtemp, self.container_name) for dtemp in self.dtemps)
- if rm_tmp:
- post = ''.join('docker exec {1} rm -rf {0}; '
- .format(dtemp, self.container_name) for dtemp in self.dtemps)
- else:
- post = ''
-
- return pre + super(ShellModule, self).build_module_command('', shebang_env, cmd, arg_path, rm_tmp) + '; ' + post
+ return pre + super(ShellModule, self).build_module_command('', shebang_env, cmd, arg_path)
def checksum(self, path, python_interp):
"""
| {"golden_diff": "diff --git a/ansible/shell_plugins/docker.py b/ansible/shell_plugins/docker.py\n--- a/ansible/shell_plugins/docker.py\n+++ b/ansible/shell_plugins/docker.py\n@@ -1,6 +1,17 @@\n from __future__ import (absolute_import, division)\n __metaclass__ = type\n \n+DOCUMENTATION = '''\n+name: docker\n+plugin_type: shell\n+short_description: \"docker shell plugin\"\n+version_added: historical\n+description:\n+ - This module allows you to execute commands directly in docker on the remote host\n+extends_documentation_fragment:\n+ - shell_common\n+'''\n+\n import os\n import re\n import pipes\n@@ -29,7 +40,24 @@\n \n return super(ShellModule, self).join_path(*args)\n \n- def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):\n+ def remove(self, path, recurse=False):\n+ argv = self.get_option('ansible_python_interpreter').split()\n+ assert(argv[0] == 'docker')\n+ assert(argv[1] == 'exec')\n+ opts, args = getopt.getopt(argv[2:], 'i')\n+ self.container_name = args[0]\n+\n+ remove_files_on_host_cmd = super(ShellModule, self).remove(path, recurse)\n+\n+ cmd = remove_files_on_host_cmd + \"; docker exec -i \"\n+ cmd += self.container_name + \" rm -f \"\n+ if recurse:\n+ cmd += '-r '\n+ cmd += \" \".join(self.dtemps)\n+\n+ return cmd\n+\n+ def build_module_command(self, env_string, shebang, cmd, arg_path=None):\n # assert(self.container_name)\n argv = shlex.split(shebang.replace(\"#!\", \"\"))\n assert(argv[0] == 'docker')\n@@ -51,13 +79,7 @@\n pre = ''.join('docker exec {1} mkdir -p {0}; docker cp {0}/. {1}:{0}; '\n .format(dtemp, self.container_name) for dtemp in self.dtemps)\n \n- if rm_tmp:\n- post = ''.join('docker exec {1} rm -rf {0}; '\n- .format(dtemp, self.container_name) for dtemp in self.dtemps)\n- else:\n- post = ''\n-\n- return pre + super(ShellModule, self).build_module_command('', shebang_env, cmd, arg_path, rm_tmp) + '; ' + post\n+ return pre + super(ShellModule, self).build_module_command('', shebang_env, cmd, arg_path)\n \n def checksum(self, path, python_interp):\n \"\"\"\n", "issue": "Docker shell module failed in the latest sonic-mgmt\n<!--\r\nIf you are reporting a new issue, make sure that we do not have any duplicates\r\nalready open. You can ensure this by searching the issue list for this\r\nrepository. If there is a duplicate, please close your issue and add a comment\r\nto the existing issue instead.\r\n\r\nIf you suspect your issue is a bug, please edit your issue description to\r\ninclude the BUG REPORT INFORMATION shown below. If you fail to provide this\r\ninformation within 7 days, we cannot debug your issue and will close it. We\r\nwill, however, reopen it if you later provide the information.\r\n\r\nFor more information about reporting issues, see\r\nhttps://github.com/Azure/SONiC/wiki#report-issues\r\n\r\n---------------------------------------------------\r\nGENERAL SUPPORT INFORMATION\r\n---------------------------------------------------\r\n\r\nThe GitHub issue tracker is for bug reports and feature requests.\r\nGeneral support can be found at the following locations:\r\n\r\n- SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject\r\n\r\n---------------------------------------------------\r\nBUG REPORT INFORMATION\r\n---------------------------------------------------\r\nUse the commands below to provide key information from your environment:\r\nYou do NOT have to include this information if this is a FEATURE REQUEST\r\n-->\r\n\r\n**Description**\r\nThe task with sell type docker always fails on the latest sonic-mgmt, example:\r\n```\r\n- name: Gather information from lldp\r\n lldp:\r\n vars:\r\n ansible_shell_type: docker\r\n ansible_python_interpreter: docker exec -i lldp python\r\n```\r\n\r\n<!--\r\nBriefly describe the problem you are having in a few paragraphs.\r\n-->\r\n\r\n**Steps to reproduce the issue:**\r\n1. run dip_sip or lag_2 CT \r\n2.\r\n3.\r\n\r\n**Describe the results you received:**\r\n```\r\nTASK [test : Gathering peer VM information from lldp] **************************\r\ntask path: /var/user/jenkins/bfn-sonic-mgmt/ansible/roles/test/tasks/lag_2.yml:26\r\nThursday 28 November 2019 10:28:47 +0000 (0:00:00.263) 0:00:26.753 ***** \r\nThe full traceback is:\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/dist-packages/ansible/executor/task_executor.py\", line 145, in run\r\n res = self._execute()\r\n File \"/usr/local/lib/python2.7/dist-packages/ansible/executor/task_executor.py\", line 612, in _execute\r\n self._set_connection_options(variables, templar)\r\n File \"/usr/local/lib/python2.7/dist-packages/ansible/executor/task_executor.py\", line 1012, in _set_connection_options\r\n self._set_plugin_options('shell', final_vars, templar, task_keys)\r\n File \"/usr/local/lib/python2.7/dist-packages/ansible/executor/task_executor.py\", line 978, in _set_plugin_options\r\n plugin.set_options(task_keys=task_keys, var_options=options)\r\n File \"/usr/local/lib/python2.7/dist-packages/ansible/plugins/shell/__init__.py\", line 70, in set_options\r\n env = self.get_option('environment')\r\n File \"/usr/local/lib/python2.7/dist-packages/ansible/plugins/__init__.py\", line 60, in get_option\r\n raise KeyError(to_native(e))\r\nKeyError: 'Requested entry (plugin_type: shell plugin: docker setting: environment ) was not defined in configuration.'\r\n\r\nfatal: [cab18-2-dut]: FAILED! => {\r\n \"msg\": \"Unexpected failure during module execution.\", \r\n \"stdout\": \"\"\r\n}\r\n\r\n```\r\n\r\n**Describe the results you expected:**\r\n\r\n\r\n**Additional information you deem important:**\r\n<!--\r\nsoftware/ASIC/Hardware Flatform version and info\r\n-->\r\n **Output of `show version`:**\r\n\r\n ```\r\nSONiC Software Version: SONiC.HEAD.488-dirty-20191127.082217\r\nDistribution: Debian 9.11\r\nKernel: 4.9.0-9-2-amd64\r\nBuild commit: 7622a30d\r\nBuild date: Wed Nov 27 11:15:51 UTC 2019\r\nBuilt by: johnar@jenkins-worker-11\r\n\r\n ```\r\n\r\n **Attach debug file `sudo generate_dump`:**\r\n\r\n ```\r\n (paste your output here)\r\n ```\r\n\n", "before_files": [{"content": "from __future__ import (absolute_import, division)\n__metaclass__ = type\n\nimport os\nimport re\nimport pipes\nimport ansible.constants as C\nimport time\nimport random\nimport shlex\nimport getopt\nfrom ansible.module_utils.six import text_type\nfrom ansible.plugins.shell.sh import ShellModule as sh\nfrom ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound\n\nclass ShellModule(sh):\n\n def __init__(self, *args, **kwargs):\n super(ShellModule, self).__init__(*args, **kwargs)\n self.dtemps = []\n\n def join_path(self, *args):\n ## HACK! HACK! HACK!\n ## We observe the interactions between ShellModule and ActionModule, and\n ## find the temporary directories Ansible created on remote machine. So we\n ## collect them and copied to docker container in build_module_command\n if len(args) >= 2 and (args[0].startswith('/home/') or args[0].startswith('/root/')) and args[1] == '':\n self.dtemps.append(args[0])\n\n return super(ShellModule, self).join_path(*args)\n\n def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):\n # assert(self.container_name)\n argv = shlex.split(shebang.replace(\"#!\", \"\"))\n assert(argv[0] == 'docker')\n assert(argv[1] == 'exec')\n opts, args = getopt.getopt(argv[2:], 'i')\n self.container_name = args[0]\n\n # Inject environment variable before python in the shebang string\n assert(args[1].endswith('python'))\n args[1] = 'env {0} {1}'.format(env_string, args[1])\n argv_env = argv[0:2] + [o for opt in opts for o in opt] + args\n shebang_env = ' '.join(argv_env)\n\n ## Note: Docker cp behavior\n ## DEST_PATH exists and is a directory\n ## SRC_PATH does end with /.\n ## the content of the source directory is copied into this directory\n ## Ref: https://docs.docker.com/engine/reference/commandline/cp/\n pre = ''.join('docker exec {1} mkdir -p {0}; docker cp {0}/. {1}:{0}; '\n .format(dtemp, self.container_name) for dtemp in self.dtemps)\n\n if rm_tmp:\n post = ''.join('docker exec {1} rm -rf {0}; '\n .format(dtemp, self.container_name) for dtemp in self.dtemps)\n else:\n post = ''\n\n return pre + super(ShellModule, self).build_module_command('', shebang_env, cmd, arg_path, rm_tmp) + '; ' + post\n\n def checksum(self, path, python_interp):\n \"\"\"\n Return the command to calculate the checksum for the file in ansible controlled machine\n Arguments:\n path:\n the file path\n python_interp:\n the path for the python interpreter\n Example:\n path:\n /zebra.conf\n python_interp:\n docker exec -i debian python\n cmd:\n rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf)\n returns:\n docker exec -i debian sh -c \"rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x\\\"\\$rc\\\" != \\\"xflag\\\" ] && echo \\\"\\${rc} \\\"/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf)\"\n \"\"\"\n ## Super class implements this function by sh commands and python scripts\n ## If python_interp is modified to 'docker CONTAINER python', it will only influence the python\n ## script part in super class. Instead we should influence both\n simple_interp = 'python'\n assert(python_interp.startswith('docker exec '))\n assert(python_interp.endswith(' ' + simple_interp))\n\n docker_prefix = re.sub(simple_interp, '', python_interp)\n cmd = super(ShellModule, self).checksum(path, simple_interp)\n ## Escape the cmd:\n ## \" --> \\\"\n cmd_escaped = cmd.replace('\"', '\\\\\"')\n ## $ --> \\$\n cmd_escaped = cmd_escaped.replace('$', '\\\\$')\n return '%s sh -c \"%s\"' % (docker_prefix, cmd_escaped)\n", "path": "ansible/shell_plugins/docker.py"}], "after_files": [{"content": "from __future__ import (absolute_import, division)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nname: docker\nplugin_type: shell\nshort_description: \"docker shell plugin\"\nversion_added: historical\ndescription:\n - This module allows you to execute commands directly in docker on the remote host\nextends_documentation_fragment:\n - shell_common\n'''\n\nimport os\nimport re\nimport pipes\nimport ansible.constants as C\nimport time\nimport random\nimport shlex\nimport getopt\nfrom ansible.module_utils.six import text_type\nfrom ansible.plugins.shell.sh import ShellModule as sh\nfrom ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound\n\nclass ShellModule(sh):\n\n def __init__(self, *args, **kwargs):\n super(ShellModule, self).__init__(*args, **kwargs)\n self.dtemps = []\n\n def join_path(self, *args):\n ## HACK! HACK! HACK!\n ## We observe the interactions between ShellModule and ActionModule, and\n ## find the temporary directories Ansible created on remote machine. So we\n ## collect them and copied to docker container in build_module_command\n if len(args) >= 2 and (args[0].startswith('/home/') or args[0].startswith('/root/')) and args[1] == '':\n self.dtemps.append(args[0])\n\n return super(ShellModule, self).join_path(*args)\n\n def remove(self, path, recurse=False):\n argv = self.get_option('ansible_python_interpreter').split()\n assert(argv[0] == 'docker')\n assert(argv[1] == 'exec')\n opts, args = getopt.getopt(argv[2:], 'i')\n self.container_name = args[0]\n\n remove_files_on_host_cmd = super(ShellModule, self).remove(path, recurse)\n\n cmd = remove_files_on_host_cmd + \"; docker exec -i \"\n cmd += self.container_name + \" rm -f \"\n if recurse:\n cmd += '-r '\n cmd += \" \".join(self.dtemps)\n\n return cmd\n\n def build_module_command(self, env_string, shebang, cmd, arg_path=None):\n # assert(self.container_name)\n argv = shlex.split(shebang.replace(\"#!\", \"\"))\n assert(argv[0] == 'docker')\n assert(argv[1] == 'exec')\n opts, args = getopt.getopt(argv[2:], 'i')\n self.container_name = args[0]\n\n # Inject environment variable before python in the shebang string\n assert(args[1].endswith('python'))\n args[1] = 'env {0} {1}'.format(env_string, args[1])\n argv_env = argv[0:2] + [o for opt in opts for o in opt] + args\n shebang_env = ' '.join(argv_env)\n\n ## Note: Docker cp behavior\n ## DEST_PATH exists and is a directory\n ## SRC_PATH does end with /.\n ## the content of the source directory is copied into this directory\n ## Ref: https://docs.docker.com/engine/reference/commandline/cp/\n pre = ''.join('docker exec {1} mkdir -p {0}; docker cp {0}/. {1}:{0}; '\n .format(dtemp, self.container_name) for dtemp in self.dtemps)\n\n return pre + super(ShellModule, self).build_module_command('', shebang_env, cmd, arg_path)\n\n def checksum(self, path, python_interp):\n \"\"\"\n Return the command to calculate the checksum for the file in ansible controlled machine\n Arguments:\n path:\n the file path\n python_interp:\n the path for the python interpreter\n Example:\n path:\n /zebra.conf\n python_interp:\n docker exec -i debian python\n cmd:\n rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf)\n returns:\n docker exec -i debian sh -c \"rc=flag; [ -r /zebra.conf ] || rc=2; [ -f /zebra.conf ] || rc=1; [ -d /zebra.conf ] && rc=3; python -V 2>/dev/null || rc=4; [ x\\\"\\$rc\\\" != \\\"xflag\\\" ] && echo \\\"\\${rc} \\\"/zebra.conf && exit 0; (python -c '...' 2>/dev/null) || (echo '0 '/zebra.conf)\"\n \"\"\"\n ## Super class implements this function by sh commands and python scripts\n ## If python_interp is modified to 'docker CONTAINER python', it will only influence the python\n ## script part in super class. Instead we should influence both\n simple_interp = 'python'\n assert(python_interp.startswith('docker exec '))\n assert(python_interp.endswith(' ' + simple_interp))\n\n docker_prefix = re.sub(simple_interp, '', python_interp)\n cmd = super(ShellModule, self).checksum(path, simple_interp)\n ## Escape the cmd:\n ## \" --> \\\"\n cmd_escaped = cmd.replace('\"', '\\\\\"')\n ## $ --> \\$\n cmd_escaped = cmd_escaped.replace('$', '\\\\$')\n return '%s sh -c \"%s\"' % (docker_prefix, cmd_escaped)\n", "path": "ansible/shell_plugins/docker.py"}]} | 2,503 | 590 |
gh_patches_debug_22540 | rasdani/github-patches | git_diff | Kinto__kinto-1087 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Port Kinto for Pyramid 1.8
Pyramid 1.8 is breaking a number of things:
- BasicAuth authn policy
- pserve http_port config.
Right now it is still an alpha release but it gets installed on time to time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/__init__.py`
Content:
```
1 """Main entry point
2 """
3 import pkg_resources
4 import tempfile
5
6 from cornice import Service as CorniceService
7 from pyramid.settings import aslist
8
9 from kinto.core import errors
10 from kinto.core import events
11 from kinto.core.initialization import ( # NOQA
12 initialize, install_middlewares,
13 load_default_settings)
14 from kinto.core.utils import (
15 follow_subrequest, current_service, current_resource_name,
16 prefixed_userid, prefixed_principals)
17 from kinto.core.logs import logger
18
19
20 # Module version, as defined in PEP-0396.
21 __version__ = pkg_resources.get_distribution('kinto').version # FIXME?
22
23
24 DEFAULT_SETTINGS = {
25 'backoff': None,
26 'batch_max_requests': 25,
27 'cache_backend': '',
28 'cache_url': '',
29 'cache_pool_size': 25,
30 'cache_prefix': '',
31 'cache_max_size_bytes': 524288,
32 'cors_origins': '*',
33 'cors_max_age_seconds': 3600,
34 'eos': None,
35 'eos_message': None,
36 'eos_url': None,
37 'error_info_link': 'https://github.com/Kinto/kinto/issues/',
38 'http_host': None,
39 'http_scheme': None,
40 'id_generator': 'kinto.core.storage.generators.UUID4',
41 'includes': '',
42 'initialization_sequence': (
43 'kinto.core.initialization.setup_request_bound_data',
44 'kinto.core.initialization.setup_json_serializer',
45 'kinto.core.initialization.setup_logging',
46 'kinto.core.initialization.setup_storage',
47 'kinto.core.initialization.setup_permission',
48 'kinto.core.initialization.setup_cache',
49 'kinto.core.initialization.setup_requests_scheme',
50 'kinto.core.initialization.setup_version_redirection',
51 'kinto.core.initialization.setup_deprecation',
52 'kinto.core.initialization.setup_authentication',
53 'kinto.core.initialization.setup_backoff',
54 'kinto.core.initialization.setup_statsd',
55 'kinto.core.initialization.setup_listeners',
56 'kinto.core.events.setup_transaction_hook',
57 ),
58 'event_listeners': '',
59 'heartbeat_timeout_seconds': 10,
60 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',
61 'newrelic_config': None,
62 'newrelic_env': 'dev',
63 'paginate_by': None,
64 'permission_backend': '',
65 'permission_url': '',
66 'permission_pool_size': 25,
67 'profiler_dir': tempfile.gettempdir(),
68 'profiler_enabled': False,
69 'project_docs': '',
70 'project_name': '',
71 'project_version': '',
72 'readonly': False,
73 'retry_after_seconds': 30,
74 'statsd_backend': 'kinto.core.statsd',
75 'statsd_prefix': 'kinto.core',
76 'statsd_url': None,
77 'storage_backend': '',
78 'storage_url': '',
79 'storage_max_fetch_size': 10000,
80 'storage_pool_size': 25,
81 'tm.annotate_user': False, # Do annotate transactions with the user-id.
82 'transaction_per_request': True,
83 'userid_hmac_secret': '',
84 'version_json_path': 'version.json',
85 'version_prefix_redirect_enabled': True,
86 'trailing_slash_redirect_enabled': True,
87 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',
88 'multiauth.policies': 'basicauth',
89 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'
90 'BasicAuthAuthenticationPolicy'),
91 'multiauth.authorization_policy': ('kinto.core.authorization.'
92 'AuthorizationPolicy'),
93 'swagger_file': 'swagger.yaml',
94 }
95
96
97 class Service(CorniceService):
98 """Subclass of the default cornice service.
99
100 This is useful in order to attach specific behaviours without monkey
101 patching the default cornice service (which would impact other uses of it)
102 """
103 default_cors_headers = ('Backoff', 'Retry-After', 'Alert',
104 'Content-Length')
105
106 def error_handler(self, request):
107 return errors.json_error_handler(request)
108
109 @classmethod
110 def init_from_settings(cls, settings):
111 cls.cors_origins = tuple(aslist(settings['cors_origins']))
112 cors_max_age = settings['cors_max_age_seconds']
113 cls.cors_max_age = int(cors_max_age) if cors_max_age else None
114
115
116 def includeme(config):
117 settings = config.get_settings()
118
119 # Heartbeat registry.
120 config.registry.heartbeats = {}
121
122 # Public settings registry.
123 config.registry.public_settings = {'batch_max_requests', 'readonly'}
124
125 # Directive to declare arbitrary API capabilities.
126 def add_api_capability(config, identifier, description="", url="", **kw):
127 existing = config.registry.api_capabilities.get(identifier)
128 if existing:
129 error_msg = "The '{}' API capability was already registered ({})."
130 raise ValueError(error_msg.format(identifier, existing))
131
132 capability = dict(description=description, url=url, **kw)
133 config.registry.api_capabilities[identifier] = capability
134
135 config.add_directive('add_api_capability', add_api_capability)
136 config.registry.api_capabilities = {}
137
138 # Resource events helpers.
139 config.add_request_method(events.get_resource_events,
140 name='get_resource_events')
141 config.add_request_method(events.notify_resource_event,
142 name='notify_resource_event')
143
144 # Setup cornice.
145 config.include("cornice")
146
147 # Per-request transaction.
148 config.include("pyramid_tm")
149
150 # Add CORS settings to the base kinto.core Service class.
151 Service.init_from_settings(settings)
152
153 # Setup components.
154 for step in aslist(settings['initialization_sequence']):
155 step_func = config.maybe_dotted(step)
156 step_func(config)
157
158 # Custom helpers.
159 config.add_request_method(follow_subrequest)
160 config.add_request_method(prefixed_userid, property=True)
161 config.add_request_method(prefixed_principals, reify=True)
162 config.add_request_method(lambda r: {
163 'id': r.prefixed_userid,
164 'principals': r.prefixed_principals},
165 name='get_user_info')
166 config.add_request_method(current_resource_name, reify=True)
167 config.add_request_method(current_service, reify=True)
168 config.commit()
169
170 # Include plugins after init, unlike pyramid includes.
171 includes = aslist(settings['includes'])
172 for app in includes:
173 config.include(app)
174
175 # # Show settings to output.
176 # for key, value in settings.items():
177 # logger.info('Using {} = {}'.format(key, value))
178
179 # Scan views.
180 config.scan("kinto.core.views")
181
182 # Give sign of life.
183 msg = "Running {project_name} {project_version}."
184 logger.info(msg.format_map(settings))
185
```
Path: `kinto/core/authentication.py`
Content:
```
1 from pyramid import authentication as base_auth
2
3 from kinto.core import utils
4
5
6 class BasicAuthAuthenticationPolicy(base_auth.BasicAuthAuthenticationPolicy):
7 """Basic auth implementation.
8
9 Allow any user with any credentials (e.g. there is no need to create an
10 account).
11
12 """
13 def __init__(self, *args, **kwargs):
14 def noop_check(*a):
15 return []
16 super().__init__(noop_check, *args, **kwargs)
17
18 def effective_principals(self, request):
19 # Bypass default Pyramid construction of principals because
20 # Pyramid multiauth already adds userid, Authenticated and Everyone
21 # principals.
22 return []
23
24 def unauthenticated_userid(self, request):
25 settings = request.registry.settings
26
27 credentials = self._get_credentials(request)
28 if credentials:
29 username, password = credentials
30 if not username:
31 return
32
33 hmac_secret = settings['userid_hmac_secret']
34 credentials = '{}:{}'.format(*credentials)
35 userid = utils.hmac_digest(hmac_secret, credentials)
36 return userid
37
38
39 def includeme(config):
40 config.add_api_capability(
41 "basicauth",
42 description="Very basic authentication sessions. Not for production use.",
43 url="http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html",
44 )
45
```
Path: `setup.py`
Content:
```
1 import codecs
2 import os
3 from setuptools import setup, find_packages
4
5 here = os.path.abspath(os.path.dirname(__file__))
6
7
8 def read_file(filename):
9 """Open a related file and return its content."""
10 with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
11 content = f.read()
12 return content
13
14
15 README = read_file('README.rst')
16 CHANGELOG = read_file('CHANGELOG.rst')
17 CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
18
19 REQUIREMENTS = [
20 'colander >= 1.3.2',
21 'colorama',
22 'cornice >= 2.4',
23 'jsonschema',
24 'jsonpatch',
25 'python-dateutil',
26 'pyramid >1.7,<1.8',
27 'pyramid_multiauth >= 0.8', # User on policy selected event.
28 'ruamel.yaml',
29 'transaction',
30 'pyramid_tm',
31 'requests',
32 'structlog >= 16.1.0',
33 'enum34',
34 'waitress',
35 'ujson >= 1.35'
36 ]
37
38 POSTGRESQL_REQUIRES = [
39 'SQLAlchemy',
40 'psycopg2 > 2.5',
41 'zope.sqlalchemy',
42 ]
43
44 REDIS_REQUIRES = [
45 'kinto_redis'
46 ]
47
48 SETUP_REQUIRES = [
49 'pytest-runner'
50 ]
51
52 TEST_REQUIREMENTS = [
53 'bravado_core',
54 'pytest',
55 'WebTest'
56 ]
57
58 DEPENDENCY_LINKS = [
59 ]
60
61 MONITORING_REQUIRES = [
62 'raven',
63 'statsd',
64 'newrelic',
65 'werkzeug',
66 ]
67
68 ENTRY_POINTS = {
69 'paste.app_factory': [
70 'main = kinto:main',
71 ],
72 'console_scripts': [
73 'kinto = kinto.__main__:main'
74 ],
75 }
76
77
78 setup(name='kinto',
79 version='6.0.0.dev0',
80 description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
81 long_description="{}\n\n{}\n\n{}".format(README, CHANGELOG, CONTRIBUTORS),
82 license='Apache License (2.0)',
83 classifiers=[
84 "Programming Language :: Python",
85 "Programming Language :: Python :: 3",
86 "Programming Language :: Python :: 3.5",
87 "Programming Language :: Python :: 3.6",
88 "Programming Language :: Python :: Implementation :: CPython",
89 "Topic :: Internet :: WWW/HTTP",
90 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
91 "License :: OSI Approved :: Apache Software License"
92 ],
93 keywords="web sync json storage services",
94 author='Mozilla Services',
95 author_email='[email protected]',
96 url='https://github.com/Kinto/kinto',
97 packages=find_packages(),
98 package_data={'': ['*.rst', '*.py', '*.yaml']},
99 include_package_data=True,
100 zip_safe=False,
101 setup_requires=SETUP_REQUIRES,
102 tests_require=TEST_REQUIREMENTS,
103 install_requires=REQUIREMENTS,
104 extras_require={
105 'redis': REDIS_REQUIRES,
106 'postgresql': POSTGRESQL_REQUIRES,
107 'monitoring': MONITORING_REQUIRES,
108 },
109 test_suite="tests",
110 dependency_links=DEPENDENCY_LINKS,
111 entry_points=ENTRY_POINTS)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py
--- a/kinto/core/__init__.py
+++ b/kinto/core/__init__.py
@@ -20,7 +20,6 @@
# Module version, as defined in PEP-0396.
__version__ = pkg_resources.get_distribution('kinto').version # FIXME?
-
DEFAULT_SETTINGS = {
'backoff': None,
'batch_max_requests': 25,
diff --git a/kinto/core/authentication.py b/kinto/core/authentication.py
--- a/kinto/core/authentication.py
+++ b/kinto/core/authentication.py
@@ -24,7 +24,7 @@
def unauthenticated_userid(self, request):
settings = request.registry.settings
- credentials = self._get_credentials(request)
+ credentials = base_auth.extract_http_basic_credentials(request)
if credentials:
username, password = credentials
if not username:
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
'jsonschema',
'jsonpatch',
'python-dateutil',
- 'pyramid >1.7,<1.8',
+ 'pyramid > 1.8',
'pyramid_multiauth >= 0.8', # User on policy selected event.
'ruamel.yaml',
'transaction',
| {"golden_diff": "diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py\n--- a/kinto/core/__init__.py\n+++ b/kinto/core/__init__.py\n@@ -20,7 +20,6 @@\n # Module version, as defined in PEP-0396.\n __version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n \n-\n DEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\ndiff --git a/kinto/core/authentication.py b/kinto/core/authentication.py\n--- a/kinto/core/authentication.py\n+++ b/kinto/core/authentication.py\n@@ -24,7 +24,7 @@\n def unauthenticated_userid(self, request):\n settings = request.registry.settings\n \n- credentials = self._get_credentials(request)\n+ credentials = base_auth.extract_http_basic_credentials(request)\n if credentials:\n username, password = credentials\n if not username:\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n 'jsonschema',\n 'jsonpatch',\n 'python-dateutil',\n- 'pyramid >1.7,<1.8',\n+ 'pyramid > 1.8',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'ruamel.yaml',\n 'transaction',\n", "issue": "Port Kinto for Pyramid 1.8\nPyramid 1.8 is breaking a number of things:\r\n\r\n- BasicAuth authn policy\r\n- pserve http_port config.\r\n\r\nRight now it is still an alpha release but it gets installed on time to time.\n", "before_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport pkg_resources\nimport tempfile\n\nfrom cornice import Service as CorniceService\nfrom pyramid.settings import aslist\n\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name,\n prefixed_userid, prefixed_principals)\nfrom kinto.core.logs import logger\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cache_max_size_bytes': 524288,\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'heartbeat_timeout_seconds': 10,\n 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': tempfile.gettempdir(),\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_backend': 'kinto.core.statsd',\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_json_path': 'version.json',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy'),\n 'swagger_file': 'swagger.yaml',\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, request):\n return errors.json_error_handler(request)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '{}' API capability was already registered ({}).\"\n raise ValueError(error_msg.format(identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(follow_subrequest)\n config.add_request_method(prefixed_userid, property=True)\n config.add_request_method(prefixed_principals, reify=True)\n config.add_request_method(lambda r: {\n 'id': r.prefixed_userid,\n 'principals': r.prefixed_principals},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using {} = {}'.format(key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"Running {project_name} {project_version}.\"\n logger.info(msg.format_map(settings))\n", "path": "kinto/core/__init__.py"}, {"content": "from pyramid import authentication as base_auth\n\nfrom kinto.core import utils\n\n\nclass BasicAuthAuthenticationPolicy(base_auth.BasicAuthAuthenticationPolicy):\n \"\"\"Basic auth implementation.\n\n Allow any user with any credentials (e.g. there is no need to create an\n account).\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n def noop_check(*a):\n return []\n super().__init__(noop_check, *args, **kwargs)\n\n def effective_principals(self, request):\n # Bypass default Pyramid construction of principals because\n # Pyramid multiauth already adds userid, Authenticated and Everyone\n # principals.\n return []\n\n def unauthenticated_userid(self, request):\n settings = request.registry.settings\n\n credentials = self._get_credentials(request)\n if credentials:\n username, password = credentials\n if not username:\n return\n\n hmac_secret = settings['userid_hmac_secret']\n credentials = '{}:{}'.format(*credentials)\n userid = utils.hmac_digest(hmac_secret, credentials)\n return userid\n\n\ndef includeme(config):\n config.add_api_capability(\n \"basicauth\",\n description=\"Very basic authentication sessions. Not for production use.\",\n url=\"http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html\",\n )\n", "path": "kinto/core/authentication.py"}, {"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'colander >= 1.3.2',\n 'colorama',\n 'cornice >= 2.4',\n 'jsonschema',\n 'jsonpatch',\n 'python-dateutil',\n 'pyramid >1.7,<1.8',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'ruamel.yaml',\n 'transaction',\n 'pyramid_tm',\n 'requests',\n 'structlog >= 16.1.0',\n 'enum34',\n 'waitress',\n 'ujson >= 1.35'\n]\n\nPOSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2 > 2.5',\n 'zope.sqlalchemy',\n]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'bravado_core',\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='6.0.0.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py', '*.yaml']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport pkg_resources\nimport tempfile\n\nfrom cornice import Service as CorniceService\nfrom pyramid.settings import aslist\n\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name,\n prefixed_userid, prefixed_principals)\nfrom kinto.core.logs import logger\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cache_max_size_bytes': 524288,\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'heartbeat_timeout_seconds': 10,\n 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': tempfile.gettempdir(),\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_backend': 'kinto.core.statsd',\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_json_path': 'version.json',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy'),\n 'swagger_file': 'swagger.yaml',\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, request):\n return errors.json_error_handler(request)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '{}' API capability was already registered ({}).\"\n raise ValueError(error_msg.format(identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(follow_subrequest)\n config.add_request_method(prefixed_userid, property=True)\n config.add_request_method(prefixed_principals, reify=True)\n config.add_request_method(lambda r: {\n 'id': r.prefixed_userid,\n 'principals': r.prefixed_principals},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using {} = {}'.format(key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"Running {project_name} {project_version}.\"\n logger.info(msg.format_map(settings))\n", "path": "kinto/core/__init__.py"}, {"content": "from pyramid import authentication as base_auth\n\nfrom kinto.core import utils\n\n\nclass BasicAuthAuthenticationPolicy(base_auth.BasicAuthAuthenticationPolicy):\n \"\"\"Basic auth implementation.\n\n Allow any user with any credentials (e.g. there is no need to create an\n account).\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n def noop_check(*a):\n return []\n super().__init__(noop_check, *args, **kwargs)\n\n def effective_principals(self, request):\n # Bypass default Pyramid construction of principals because\n # Pyramid multiauth already adds userid, Authenticated and Everyone\n # principals.\n return []\n\n def unauthenticated_userid(self, request):\n settings = request.registry.settings\n\n credentials = base_auth.extract_http_basic_credentials(request)\n if credentials:\n username, password = credentials\n if not username:\n return\n\n hmac_secret = settings['userid_hmac_secret']\n credentials = '{}:{}'.format(*credentials)\n userid = utils.hmac_digest(hmac_secret, credentials)\n return userid\n\n\ndef includeme(config):\n config.add_api_capability(\n \"basicauth\",\n description=\"Very basic authentication sessions. Not for production use.\",\n url=\"http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html\",\n )\n", "path": "kinto/core/authentication.py"}, {"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'colander >= 1.3.2',\n 'colorama',\n 'cornice >= 2.4',\n 'jsonschema',\n 'jsonpatch',\n 'python-dateutil',\n 'pyramid > 1.8',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'ruamel.yaml',\n 'transaction',\n 'pyramid_tm',\n 'requests',\n 'structlog >= 16.1.0',\n 'enum34',\n 'waitress',\n 'ujson >= 1.35'\n]\n\nPOSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2 > 2.5',\n 'zope.sqlalchemy',\n]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'bravado_core',\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='6.0.0.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=\"{}\\n\\n{}\\n\\n{}\".format(README, CHANGELOG, CONTRIBUTORS),\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py', '*.yaml']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n },\n test_suite=\"tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}]} | 3,580 | 309 |
gh_patches_debug_43483 | rasdani/github-patches | git_diff | OpenCTI-Platform__connectors-2071 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ESET connector causing runaway Redis memory consumption despite REDIS__TRIMMING=1000000
## Description
When enabling the `external-import/eset` connector with defaults (from `external-import/eset/docker-compose.yml`), it causes redis to rapidly run away with consuming all memory. I have `REDIS__TRIMMING=1000000` set in my `docker-compose.yml` and it doesn't seem to make a difference for this particular connector, but seems to be working for everything else.
The `connector-eset` section of my `docker-compose.yml` is lifted almost verbatim from the example provided in the connector's directory:
```yaml
connector-eset:
image: opencti/connector-eset:5.12.32
environment:
- OPENCTI_URL=http://opencti-url
- OPENCTI_TOKEN=${OPENCTI_ADMIN_TOKEN}
- CONNECTOR_ID=${ESET_CONNECTOR_ID}
- "CONNECTOR_NAME=ESET Cyber Threat Intelligence"
- CONNECTOR_SCOPE=eset
- CONNECTOR_CONFIDENCE_LEVEL=30 # From 0 (Unknown) to 100 (Fully trusted)
- CONNECTOR_LOG_LEVEL=error
- ESET_API_URL=eti.eset.com
- ESET_USERNAME=${ESET_CONNECTOR_USER}
- ESET_PASSWORD=${ESET_CONNECTOR_PASSWORD}
- ESET_COLLECTIONS=ei.misp,ei.cc,ei.urls
- ESET_IMPORT_APT_REPORTS=true
- ESET_IMPORT_START_DATE=2022-04-01
- ESET_CREATE_OBSERVABLES=true
- ESET_INTERVAL=60 # Required, in minutes
restart: always
```
## Environment
1. OS (where OpenCTI server runs): Amazon Linux 2023
2. OpenCTI version: 5.12.32
3. OpenCTI client: connector
4. Other environment details: Using Docker CE and `docker-compose`
## Reproducible Steps
Steps to create the smallest reproducible scenario:
1. Add `connector-eset` in my `docker-compose.yml`
2. Update `.env` with proper credentials for access
3. `docker-compose up -d`
4. Wait for awhile and eventually redis grows to consume all RAM (in my case, it got to around 12GB in usage)
## Expected Output
Redis shouldn't consume all memory and `REDIS__TRIMMING=....` should be able to keep it within a reasonably predictable ceiling. In this particular case, `redis` resident memory size seems to stay under 2GB when trimming is set to `1000000`.
## Actual Output
`redis` memory consumption grows without restraint until the system runs out of memory and the OOM reaper has to kill something.
## Additional information
Here is a `docker-compose.yml` that seems to be working well for me with `REDIS__TRIMMING=2000000`: https://github.com/ckane/opencti-docker/blob/tf-main/docker-compose.yml
In the deployment I am trying to use it in, I reduced this to `1000000` to see if it would fix the problem, but it doesn't seem to have any impact at all.
In this case, `redis` memory consumption stays under control, but if I add `connector-eset` to it, then when the `eset` connector starts ingesting intel from their feed, `redis` consumption will grow rapidly until it exhausts all RAM in the system.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `external-import/eset/src/eset.py`
Content:
```
1 import base64
2 import datetime
3 import json
4 import os
5 import sys
6 import time
7
8 import cabby
9 import eti_api
10 import pytz
11 import stix2
12 import yaml
13 from dateutil.parser import parse
14 from pycti import OpenCTIConnectorHelper, Report, get_config_variable
15
16 TMP_DIR = "TMP"
17
18
19 class Eset:
20 def __init__(self):
21 # Instantiate the connector helper from config
22 config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
23 config = (
24 yaml.load(open(config_file_path), Loader=yaml.FullLoader)
25 if os.path.isfile(config_file_path)
26 else {}
27 )
28 self.helper = OpenCTIConnectorHelper(config)
29 # Extra config
30 self.eset_api_url = get_config_variable(
31 "ESET_API_URL", ["eset", "api_url"], config
32 )
33 self.eset_username = get_config_variable(
34 "ESET_USERNAME", ["eset", "username"], config
35 )
36 self.eset_password = get_config_variable(
37 "ESET_PASSWORD", ["eset", "password"], config
38 )
39 self.eset_collections = get_config_variable(
40 "ESET_COLLECTIONS", ["eset", "collections"], config
41 )
42 self.eset_import_apt_reports = get_config_variable(
43 "ESET_IMPORT_APT_REPORTS",
44 ["eset", "import_apt_reports"],
45 config,
46 False,
47 True,
48 )
49 self.eset_import_start_date = get_config_variable(
50 "ESET_IMPORT_START_DATE",
51 ["eset", "import_start_date"],
52 config,
53 )
54 self.eset_create_observables = get_config_variable(
55 "ESET_CREATE_OBSERVABLES",
56 ["eset", "create_observables"],
57 config,
58 )
59 self.eset_interval = get_config_variable(
60 "ESET_INTERVAL", ["eset", "interval"], config, True
61 )
62 self.update_existing_data = get_config_variable(
63 "CONNECTOR_UPDATE_EXISTING_DATA",
64 ["connector", "update_existing_data"],
65 config,
66 )
67 self.identity = self.helper.api.identity.create(
68 type="Organization",
69 name="ESET",
70 description="ESET, s.r.o., is a software company specializing in cybersecurity.",
71 )
72 self.added_after = int(parse(self.eset_import_start_date).timestamp())
73 # Init variables
74 self.cache = {}
75 if self.eset_collections is not None:
76 self.eset_collections = self.eset_collections.split(",")
77
78 # Create temporary dir and initialize logging.
79 if sys.version_info.major == 2: # Compatibility with Python 2.7.
80 if not os.path.isdir(TMP_DIR):
81 os.makedirs(TMP_DIR)
82 else:
83 os.makedirs(TMP_DIR, exist_ok=True)
84
85 def get_interval(self):
86 return int(self.eset_interval) * 60
87
88 def _download_all_report_stuff(self, connection, report, base_path):
89 """Download xml, pdf and adds (if available) from given *report* into paths starting with *base_path*."""
90 for fmt in ["pdf", "xml", "adds"]:
91 ext = fmt if fmt != "adds" else "zip"
92 connection.get_report(report, fmt, file_path="{}.{}".format(base_path, ext))
93
94 def _import_reports(self, work_id, start_epoch):
95 connection = eti_api.Connection(
96 username=self.eset_username,
97 password=self.eset_password,
98 host="eti.eset.com",
99 )
100 from_date = datetime.datetime.utcfromtimestamp(start_epoch).astimezone(pytz.utc)
101 i = 0
102 for report in connection.list_reports(
103 type="all", datetimefrom=from_date.isoformat()
104 ):
105 bundle_objects = []
106 if report["status"] != "finished":
107 self.helper.log_info("Finished")
108 continue # Skip not generated reports.
109 i += 1
110 file_path = os.path.join(TMP_DIR, "{}_{:02d}".format("all", i))
111 self._download_all_report_stuff(connection, report, file_path)
112 if os.path.isfile(file_path + ".pdf"):
113 name = report["filename"].replace(".pdf", "")
114 date = parse(report["date"])
115 with open(file_path + ".pdf", "rb") as f:
116 file_data_encoded = base64.b64encode(f.read())
117 file = {
118 "name": report["filename"],
119 "data": file_data_encoded.decode("utf-8"),
120 "mime_type": "application/pdf",
121 "no_trigger_import": True,
122 }
123 stix_report = stix2.Report(
124 id=Report.generate_id(name, date),
125 name=name,
126 report_types=["APT Report"],
127 description=name,
128 published=date,
129 labels=["apt", "eset"],
130 confidence=self.helper.connect_confidence_level,
131 created_by_ref=self.identity["standard_id"],
132 object_refs=[self.identity["standard_id"]],
133 allow_custom=True,
134 x_opencti_files=[file],
135 object_marking_refs=[stix2.TLP_AMBER.get("id")],
136 )
137 bundle_objects.append(stix_report)
138 try:
139 self.helper.log_debug("Objects to be sent " + str(bundle_objects))
140 self.helper.send_stix2_bundle(
141 self.helper.stix2_create_bundle(bundle_objects),
142 update=self.update_existing_data,
143 bypass_split=True,
144 work_id=work_id,
145 )
146 except Exception as e:
147 self.helper.log_info("Failed to process report " + name)
148 self.helper.log_info("ERROR: " + str(e))
149 os.remove(file_path + ".pdf")
150 if os.path.isfile(file_path + ".xml"):
151 os.remove(file_path + ".xml")
152 if os.path.isfile(file_path + ".zip"):
153 os.remove(file_path + ".zip")
154
155 def _import_collection(self, collection, work_id, start_epoch):
156 object_types_with_confidence = [
157 "attack-pattern",
158 "course-of-action",
159 "threat-actor",
160 "intrusion-set",
161 "campaign",
162 "malware",
163 "tool",
164 "vulnerability",
165 "report",
166 "relationship",
167 "indicator",
168 ]
169 client = cabby.create_client(
170 self.eset_api_url, discovery_path="/taxiiservice/discovery", use_https=True
171 )
172 client.set_auth(username=self.eset_username, password=self.eset_password)
173 no_more_result = False
174 end_epoch = start_epoch + 3600
175 while no_more_result is False:
176 self.helper.log_info(
177 "Iterating with collection="
178 + str(collection)
179 + ", start_epoch="
180 + str(start_epoch)
181 + ", end_epoch="
182 + str(end_epoch)
183 )
184 begin_date = datetime.datetime.utcfromtimestamp(start_epoch).astimezone(
185 pytz.utc
186 )
187 end_date = datetime.datetime.utcfromtimestamp(end_epoch).astimezone(
188 pytz.utc
189 )
190 try:
191 for item in client.poll(
192 collection + " (stix2)", begin_date=begin_date, end_date=end_date
193 ):
194 if not item.content: # Skip empty packages.
195 continue
196 parsed_content = json.loads(item.content)
197 objects = []
198 for object in parsed_content["objects"]:
199 if "confidence" in object_types_with_confidence:
200 if "confidence" not in object:
201 object["confidence"] = int(
202 self.helper.connect_confidence_level
203 )
204 if object["type"] == "indicator":
205 object["name"] = object["pattern"]
206 object["pattern_type"] = "stix"
207 object["pattern"] = (
208 object["pattern"]
209 .replace("SHA1", "'SHA-1'")
210 .replace("SHA256", "'SHA-256'")
211 )
212 if self.eset_create_observables:
213 object["x_opencti_create_observables"] = (
214 self.eset_create_observables
215 )
216 objects.append(object)
217 parsed_content["objects"] = objects
218 self.helper.send_stix2_bundle(
219 json.dumps(parsed_content),
220 update=self.update_existing_data,
221 work_id=work_id,
222 )
223 except Exception as e:
224 self.helper.log_error(str(e))
225 if end_epoch > int(time.time()):
226 no_more_result = True
227 else:
228 start_epoch = end_epoch
229 end_epoch = start_epoch + 3600
230
231 def run(self):
232 while True:
233 try:
234 self.helper.log_info("Synchronizing with ESET API...")
235 timestamp = int(time.time())
236 now = datetime.datetime.utcfromtimestamp(timestamp)
237 friendly_name = "ESET run @ " + now.strftime("%Y-%m-%d %H:%M:%S")
238 work_id = self.helper.api.work.initiate_work(
239 self.helper.connect_id, friendly_name
240 )
241 current_state = self.helper.get_state()
242 if current_state is None:
243 self.helper.set_state({"last_run": self.added_after})
244 # Get collections
245 current_state = self.helper.get_state()
246
247 if self.eset_collections is not None:
248 for collection in self.eset_collections:
249 self._import_collection(
250 collection, work_id, current_state["last_run"]
251 )
252 if self.eset_import_apt_reports:
253 self._import_reports(work_id, current_state["last_run"])
254 self.helper.set_state({"last_run": timestamp})
255 message = "End of synchronization"
256 self.helper.api.work.to_processed(work_id, message)
257 self.helper.log_info(message)
258
259 if self.helper.connect_run_and_terminate:
260 self.helper.log_info("Connector stop")
261 self.helper.force_ping()
262 sys.exit(0)
263
264 time.sleep(self.get_interval())
265 except (KeyboardInterrupt, SystemExit):
266 self.helper.log_info("Connector stop")
267 sys.exit(0)
268
269 except Exception as e:
270 self.helper.log_error(str(e))
271
272 if self.helper.connect_run_and_terminate:
273 self.helper.log_info("Connector stop")
274 sys.exit(0)
275
276 time.sleep(60)
277
278
279 if __name__ == "__main__":
280 try:
281 esetConnector = Eset()
282 esetConnector.run()
283 except Exception as e:
284 print(e)
285 time.sleep(10)
286 sys.exit(0)
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/external-import/eset/src/eset.py b/external-import/eset/src/eset.py
--- a/external-import/eset/src/eset.py
+++ b/external-import/eset/src/eset.py
@@ -11,7 +11,13 @@
import stix2
import yaml
from dateutil.parser import parse
-from pycti import OpenCTIConnectorHelper, Report, get_config_variable
+from pycti import (
+ Indicator,
+ Malware,
+ OpenCTIConnectorHelper,
+ Report,
+ get_config_variable,
+)
TMP_DIR = "TMP"
@@ -195,12 +201,55 @@
continue
parsed_content = json.loads(item.content)
objects = []
+ id_remaps = {}
+ removed_ids = set()
for object in parsed_content["objects"]:
if "confidence" in object_types_with_confidence:
if "confidence" not in object:
object["confidence"] = int(
self.helper.connect_confidence_level
)
+ # Don't consume identity entities w/ "customer" as the name.
+ # ESET uses this to indicate country targeting, and consuming
+ # these causes problems due to dedupe.
+ # TODO: Convert these & relevant relationship refs to country
+ # locations.
+ if (
+ object["type"] == "identity"
+ and "name" in object
+ and object["name"] == "customer"
+ ) or object["type"] == "observed-data":
+ removed_ids.add(object["id"])
+ continue
+
+ # Malware STIX IDs need to be manually recomputed so they're
+ # deterministic by malware name
+ if object["type"] == "malware" and "name" in object:
+ new_id = Malware.generate_id(object["name"])
+ if object["id"] in id_remaps:
+ new_id = id_remaps[object["id"]]
+ else:
+ id_remaps[object["id"]] = new_id
+ object["id"] = new_id
+
+ # If we remapped a STIX id earlier to a pycti one, we need to
+ # reflect that properly in any relevant relationship too
+ if object["type"] == "relationship":
+ if "source_ref" in object:
+ if object["source_ref"] in removed_ids:
+ continue # skip relationship if either ref is in removed_ids
+ if object["source_ref"] in id_remaps:
+ object["source_ref"] = id_remaps[
+ object["source_ref"]
+ ]
+ if "target_ref" in object:
+ if object["target_ref"] in removed_ids:
+ continue # skip relationship if either ref is in removed_ids
+ if object["target_ref"] in id_remaps:
+ object["target_ref"] = id_remaps[
+ object["target_ref"]
+ ]
+
if object["type"] == "indicator":
object["name"] = object["pattern"]
object["pattern_type"] = "stix"
@@ -209,6 +258,12 @@
.replace("SHA1", "'SHA-1'")
.replace("SHA256", "'SHA-256'")
)
+ new_id = Indicator.generate_id(object["pattern"])
+ if object["id"] in id_remaps:
+ new_id = id_remaps[object["id"]]
+ else:
+ id_remaps[object["id"]] = new_id
+ object["id"] = new_id
if self.eset_create_observables:
object["x_opencti_create_observables"] = (
self.eset_create_observables
| {"golden_diff": "diff --git a/external-import/eset/src/eset.py b/external-import/eset/src/eset.py\n--- a/external-import/eset/src/eset.py\n+++ b/external-import/eset/src/eset.py\n@@ -11,7 +11,13 @@\n import stix2\n import yaml\n from dateutil.parser import parse\n-from pycti import OpenCTIConnectorHelper, Report, get_config_variable\n+from pycti import (\n+ Indicator,\n+ Malware,\n+ OpenCTIConnectorHelper,\n+ Report,\n+ get_config_variable,\n+)\n \n TMP_DIR = \"TMP\"\n \n@@ -195,12 +201,55 @@\n continue\n parsed_content = json.loads(item.content)\n objects = []\n+ id_remaps = {}\n+ removed_ids = set()\n for object in parsed_content[\"objects\"]:\n if \"confidence\" in object_types_with_confidence:\n if \"confidence\" not in object:\n object[\"confidence\"] = int(\n self.helper.connect_confidence_level\n )\n+ # Don't consume identity entities w/ \"customer\" as the name.\n+ # ESET uses this to indicate country targeting, and consuming\n+ # these causes problems due to dedupe.\n+ # TODO: Convert these & relevant relationship refs to country\n+ # locations.\n+ if (\n+ object[\"type\"] == \"identity\"\n+ and \"name\" in object\n+ and object[\"name\"] == \"customer\"\n+ ) or object[\"type\"] == \"observed-data\":\n+ removed_ids.add(object[\"id\"])\n+ continue\n+\n+ # Malware STIX IDs need to be manually recomputed so they're\n+ # deterministic by malware name\n+ if object[\"type\"] == \"malware\" and \"name\" in object:\n+ new_id = Malware.generate_id(object[\"name\"])\n+ if object[\"id\"] in id_remaps:\n+ new_id = id_remaps[object[\"id\"]]\n+ else:\n+ id_remaps[object[\"id\"]] = new_id\n+ object[\"id\"] = new_id\n+\n+ # If we remapped a STIX id earlier to a pycti one, we need to\n+ # reflect that properly in any relevant relationship too\n+ if object[\"type\"] == \"relationship\":\n+ if \"source_ref\" in object:\n+ if object[\"source_ref\"] in removed_ids:\n+ continue # skip relationship if either ref is in removed_ids\n+ if object[\"source_ref\"] in id_remaps:\n+ object[\"source_ref\"] = id_remaps[\n+ object[\"source_ref\"]\n+ ]\n+ if \"target_ref\" in object:\n+ if object[\"target_ref\"] in removed_ids:\n+ continue # skip relationship if either ref is in removed_ids\n+ if object[\"target_ref\"] in id_remaps:\n+ object[\"target_ref\"] = id_remaps[\n+ object[\"target_ref\"]\n+ ]\n+\n if object[\"type\"] == \"indicator\":\n object[\"name\"] = object[\"pattern\"]\n object[\"pattern_type\"] = \"stix\"\n@@ -209,6 +258,12 @@\n .replace(\"SHA1\", \"'SHA-1'\")\n .replace(\"SHA256\", \"'SHA-256'\")\n )\n+ new_id = Indicator.generate_id(object[\"pattern\"])\n+ if object[\"id\"] in id_remaps:\n+ new_id = id_remaps[object[\"id\"]]\n+ else:\n+ id_remaps[object[\"id\"]] = new_id\n+ object[\"id\"] = new_id\n if self.eset_create_observables:\n object[\"x_opencti_create_observables\"] = (\n self.eset_create_observables\n", "issue": "ESET connector causing runaway Redis memory consumption despite REDIS__TRIMMING=1000000\n## Description\r\n\r\nWhen enabling the `external-import/eset` connector with defaults (from `external-import/eset/docker-compose.yml`), it causes redis to rapidly run away with consuming all memory. I have `REDIS__TRIMMING=1000000` set in my `docker-compose.yml` and it doesn't seem to make a difference for this particular connector, but seems to be working for everything else.\r\n\r\nThe `connector-eset` section of my `docker-compose.yml` is lifted almost verbatim from the example provided in the connector's directory:\r\n```yaml\r\n connector-eset:\r\n image: opencti/connector-eset:5.12.32\r\n environment:\r\n - OPENCTI_URL=http://opencti-url\r\n - OPENCTI_TOKEN=${OPENCTI_ADMIN_TOKEN}\r\n - CONNECTOR_ID=${ESET_CONNECTOR_ID}\r\n - \"CONNECTOR_NAME=ESET Cyber Threat Intelligence\"\r\n - CONNECTOR_SCOPE=eset\r\n - CONNECTOR_CONFIDENCE_LEVEL=30 # From 0 (Unknown) to 100 (Fully trusted)\r\n - CONNECTOR_LOG_LEVEL=error\r\n - ESET_API_URL=eti.eset.com\r\n - ESET_USERNAME=${ESET_CONNECTOR_USER}\r\n - ESET_PASSWORD=${ESET_CONNECTOR_PASSWORD}\r\n - ESET_COLLECTIONS=ei.misp,ei.cc,ei.urls\r\n - ESET_IMPORT_APT_REPORTS=true\r\n - ESET_IMPORT_START_DATE=2022-04-01\r\n - ESET_CREATE_OBSERVABLES=true\r\n - ESET_INTERVAL=60 # Required, in minutes\r\n restart: always\r\n```\r\n\r\n## Environment\r\n\r\n1. OS (where OpenCTI server runs): Amazon Linux 2023\r\n2. OpenCTI version: 5.12.32\r\n3. OpenCTI client: connector\r\n4. Other environment details: Using Docker CE and `docker-compose`\r\n\r\n## Reproducible Steps\r\n\r\nSteps to create the smallest reproducible scenario:\r\n1. Add `connector-eset` in my `docker-compose.yml`\r\n2. Update `.env` with proper credentials for access\r\n3. `docker-compose up -d`\r\n4. Wait for awhile and eventually redis grows to consume all RAM (in my case, it got to around 12GB in usage)\r\n\r\n## Expected Output\r\n\r\nRedis shouldn't consume all memory and `REDIS__TRIMMING=....` should be able to keep it within a reasonably predictable ceiling. In this particular case, `redis` resident memory size seems to stay under 2GB when trimming is set to `1000000`.\r\n\r\n## Actual Output\r\n\r\n`redis` memory consumption grows without restraint until the system runs out of memory and the OOM reaper has to kill something.\r\n\r\n## Additional information\r\n\r\nHere is a `docker-compose.yml` that seems to be working well for me with `REDIS__TRIMMING=2000000`: https://github.com/ckane/opencti-docker/blob/tf-main/docker-compose.yml\r\n\r\nIn the deployment I am trying to use it in, I reduced this to `1000000` to see if it would fix the problem, but it doesn't seem to have any impact at all.\r\n\r\nIn this case, `redis` memory consumption stays under control, but if I add `connector-eset` to it, then when the `eset` connector starts ingesting intel from their feed, `redis` consumption will grow rapidly until it exhausts all RAM in the system.\n", "before_files": [{"content": "import base64\nimport datetime\nimport json\nimport os\nimport sys\nimport time\n\nimport cabby\nimport eti_api\nimport pytz\nimport stix2\nimport yaml\nfrom dateutil.parser import parse\nfrom pycti import OpenCTIConnectorHelper, Report, get_config_variable\n\nTMP_DIR = \"TMP\"\n\n\nclass Eset:\n def __init__(self):\n # Instantiate the connector helper from config\n config_file_path = os.path.dirname(os.path.abspath(__file__)) + \"/config.yml\"\n config = (\n yaml.load(open(config_file_path), Loader=yaml.FullLoader)\n if os.path.isfile(config_file_path)\n else {}\n )\n self.helper = OpenCTIConnectorHelper(config)\n # Extra config\n self.eset_api_url = get_config_variable(\n \"ESET_API_URL\", [\"eset\", \"api_url\"], config\n )\n self.eset_username = get_config_variable(\n \"ESET_USERNAME\", [\"eset\", \"username\"], config\n )\n self.eset_password = get_config_variable(\n \"ESET_PASSWORD\", [\"eset\", \"password\"], config\n )\n self.eset_collections = get_config_variable(\n \"ESET_COLLECTIONS\", [\"eset\", \"collections\"], config\n )\n self.eset_import_apt_reports = get_config_variable(\n \"ESET_IMPORT_APT_REPORTS\",\n [\"eset\", \"import_apt_reports\"],\n config,\n False,\n True,\n )\n self.eset_import_start_date = get_config_variable(\n \"ESET_IMPORT_START_DATE\",\n [\"eset\", \"import_start_date\"],\n config,\n )\n self.eset_create_observables = get_config_variable(\n \"ESET_CREATE_OBSERVABLES\",\n [\"eset\", \"create_observables\"],\n config,\n )\n self.eset_interval = get_config_variable(\n \"ESET_INTERVAL\", [\"eset\", \"interval\"], config, True\n )\n self.update_existing_data = get_config_variable(\n \"CONNECTOR_UPDATE_EXISTING_DATA\",\n [\"connector\", \"update_existing_data\"],\n config,\n )\n self.identity = self.helper.api.identity.create(\n type=\"Organization\",\n name=\"ESET\",\n description=\"ESET, s.r.o., is a software company specializing in cybersecurity.\",\n )\n self.added_after = int(parse(self.eset_import_start_date).timestamp())\n # Init variables\n self.cache = {}\n if self.eset_collections is not None:\n self.eset_collections = self.eset_collections.split(\",\")\n\n # Create temporary dir and initialize logging.\n if sys.version_info.major == 2: # Compatibility with Python 2.7.\n if not os.path.isdir(TMP_DIR):\n os.makedirs(TMP_DIR)\n else:\n os.makedirs(TMP_DIR, exist_ok=True)\n\n def get_interval(self):\n return int(self.eset_interval) * 60\n\n def _download_all_report_stuff(self, connection, report, base_path):\n \"\"\"Download xml, pdf and adds (if available) from given *report* into paths starting with *base_path*.\"\"\"\n for fmt in [\"pdf\", \"xml\", \"adds\"]:\n ext = fmt if fmt != \"adds\" else \"zip\"\n connection.get_report(report, fmt, file_path=\"{}.{}\".format(base_path, ext))\n\n def _import_reports(self, work_id, start_epoch):\n connection = eti_api.Connection(\n username=self.eset_username,\n password=self.eset_password,\n host=\"eti.eset.com\",\n )\n from_date = datetime.datetime.utcfromtimestamp(start_epoch).astimezone(pytz.utc)\n i = 0\n for report in connection.list_reports(\n type=\"all\", datetimefrom=from_date.isoformat()\n ):\n bundle_objects = []\n if report[\"status\"] != \"finished\":\n self.helper.log_info(\"Finished\")\n continue # Skip not generated reports.\n i += 1\n file_path = os.path.join(TMP_DIR, \"{}_{:02d}\".format(\"all\", i))\n self._download_all_report_stuff(connection, report, file_path)\n if os.path.isfile(file_path + \".pdf\"):\n name = report[\"filename\"].replace(\".pdf\", \"\")\n date = parse(report[\"date\"])\n with open(file_path + \".pdf\", \"rb\") as f:\n file_data_encoded = base64.b64encode(f.read())\n file = {\n \"name\": report[\"filename\"],\n \"data\": file_data_encoded.decode(\"utf-8\"),\n \"mime_type\": \"application/pdf\",\n \"no_trigger_import\": True,\n }\n stix_report = stix2.Report(\n id=Report.generate_id(name, date),\n name=name,\n report_types=[\"APT Report\"],\n description=name,\n published=date,\n labels=[\"apt\", \"eset\"],\n confidence=self.helper.connect_confidence_level,\n created_by_ref=self.identity[\"standard_id\"],\n object_refs=[self.identity[\"standard_id\"]],\n allow_custom=True,\n x_opencti_files=[file],\n object_marking_refs=[stix2.TLP_AMBER.get(\"id\")],\n )\n bundle_objects.append(stix_report)\n try:\n self.helper.log_debug(\"Objects to be sent \" + str(bundle_objects))\n self.helper.send_stix2_bundle(\n self.helper.stix2_create_bundle(bundle_objects),\n update=self.update_existing_data,\n bypass_split=True,\n work_id=work_id,\n )\n except Exception as e:\n self.helper.log_info(\"Failed to process report \" + name)\n self.helper.log_info(\"ERROR: \" + str(e))\n os.remove(file_path + \".pdf\")\n if os.path.isfile(file_path + \".xml\"):\n os.remove(file_path + \".xml\")\n if os.path.isfile(file_path + \".zip\"):\n os.remove(file_path + \".zip\")\n\n def _import_collection(self, collection, work_id, start_epoch):\n object_types_with_confidence = [\n \"attack-pattern\",\n \"course-of-action\",\n \"threat-actor\",\n \"intrusion-set\",\n \"campaign\",\n \"malware\",\n \"tool\",\n \"vulnerability\",\n \"report\",\n \"relationship\",\n \"indicator\",\n ]\n client = cabby.create_client(\n self.eset_api_url, discovery_path=\"/taxiiservice/discovery\", use_https=True\n )\n client.set_auth(username=self.eset_username, password=self.eset_password)\n no_more_result = False\n end_epoch = start_epoch + 3600\n while no_more_result is False:\n self.helper.log_info(\n \"Iterating with collection=\"\n + str(collection)\n + \", start_epoch=\"\n + str(start_epoch)\n + \", end_epoch=\"\n + str(end_epoch)\n )\n begin_date = datetime.datetime.utcfromtimestamp(start_epoch).astimezone(\n pytz.utc\n )\n end_date = datetime.datetime.utcfromtimestamp(end_epoch).astimezone(\n pytz.utc\n )\n try:\n for item in client.poll(\n collection + \" (stix2)\", begin_date=begin_date, end_date=end_date\n ):\n if not item.content: # Skip empty packages.\n continue\n parsed_content = json.loads(item.content)\n objects = []\n for object in parsed_content[\"objects\"]:\n if \"confidence\" in object_types_with_confidence:\n if \"confidence\" not in object:\n object[\"confidence\"] = int(\n self.helper.connect_confidence_level\n )\n if object[\"type\"] == \"indicator\":\n object[\"name\"] = object[\"pattern\"]\n object[\"pattern_type\"] = \"stix\"\n object[\"pattern\"] = (\n object[\"pattern\"]\n .replace(\"SHA1\", \"'SHA-1'\")\n .replace(\"SHA256\", \"'SHA-256'\")\n )\n if self.eset_create_observables:\n object[\"x_opencti_create_observables\"] = (\n self.eset_create_observables\n )\n objects.append(object)\n parsed_content[\"objects\"] = objects\n self.helper.send_stix2_bundle(\n json.dumps(parsed_content),\n update=self.update_existing_data,\n work_id=work_id,\n )\n except Exception as e:\n self.helper.log_error(str(e))\n if end_epoch > int(time.time()):\n no_more_result = True\n else:\n start_epoch = end_epoch\n end_epoch = start_epoch + 3600\n\n def run(self):\n while True:\n try:\n self.helper.log_info(\"Synchronizing with ESET API...\")\n timestamp = int(time.time())\n now = datetime.datetime.utcfromtimestamp(timestamp)\n friendly_name = \"ESET run @ \" + now.strftime(\"%Y-%m-%d %H:%M:%S\")\n work_id = self.helper.api.work.initiate_work(\n self.helper.connect_id, friendly_name\n )\n current_state = self.helper.get_state()\n if current_state is None:\n self.helper.set_state({\"last_run\": self.added_after})\n # Get collections\n current_state = self.helper.get_state()\n\n if self.eset_collections is not None:\n for collection in self.eset_collections:\n self._import_collection(\n collection, work_id, current_state[\"last_run\"]\n )\n if self.eset_import_apt_reports:\n self._import_reports(work_id, current_state[\"last_run\"])\n self.helper.set_state({\"last_run\": timestamp})\n message = \"End of synchronization\"\n self.helper.api.work.to_processed(work_id, message)\n self.helper.log_info(message)\n\n if self.helper.connect_run_and_terminate:\n self.helper.log_info(\"Connector stop\")\n self.helper.force_ping()\n sys.exit(0)\n\n time.sleep(self.get_interval())\n except (KeyboardInterrupt, SystemExit):\n self.helper.log_info(\"Connector stop\")\n sys.exit(0)\n\n except Exception as e:\n self.helper.log_error(str(e))\n\n if self.helper.connect_run_and_terminate:\n self.helper.log_info(\"Connector stop\")\n sys.exit(0)\n\n time.sleep(60)\n\n\nif __name__ == \"__main__\":\n try:\n esetConnector = Eset()\n esetConnector.run()\n except Exception as e:\n print(e)\n time.sleep(10)\n sys.exit(0)\n", "path": "external-import/eset/src/eset.py"}], "after_files": [{"content": "import base64\nimport datetime\nimport json\nimport os\nimport sys\nimport time\n\nimport cabby\nimport eti_api\nimport pytz\nimport stix2\nimport yaml\nfrom dateutil.parser import parse\nfrom pycti import (\n Indicator,\n Malware,\n OpenCTIConnectorHelper,\n Report,\n get_config_variable,\n)\n\nTMP_DIR = \"TMP\"\n\n\nclass Eset:\n def __init__(self):\n # Instantiate the connector helper from config\n config_file_path = os.path.dirname(os.path.abspath(__file__)) + \"/config.yml\"\n config = (\n yaml.load(open(config_file_path), Loader=yaml.FullLoader)\n if os.path.isfile(config_file_path)\n else {}\n )\n self.helper = OpenCTIConnectorHelper(config)\n # Extra config\n self.eset_api_url = get_config_variable(\n \"ESET_API_URL\", [\"eset\", \"api_url\"], config\n )\n self.eset_username = get_config_variable(\n \"ESET_USERNAME\", [\"eset\", \"username\"], config\n )\n self.eset_password = get_config_variable(\n \"ESET_PASSWORD\", [\"eset\", \"password\"], config\n )\n self.eset_collections = get_config_variable(\n \"ESET_COLLECTIONS\", [\"eset\", \"collections\"], config\n )\n self.eset_import_apt_reports = get_config_variable(\n \"ESET_IMPORT_APT_REPORTS\",\n [\"eset\", \"import_apt_reports\"],\n config,\n False,\n True,\n )\n self.eset_import_start_date = get_config_variable(\n \"ESET_IMPORT_START_DATE\",\n [\"eset\", \"import_start_date\"],\n config,\n )\n self.eset_create_observables = get_config_variable(\n \"ESET_CREATE_OBSERVABLES\",\n [\"eset\", \"create_observables\"],\n config,\n )\n self.eset_interval = get_config_variable(\n \"ESET_INTERVAL\", [\"eset\", \"interval\"], config, True\n )\n self.update_existing_data = get_config_variable(\n \"CONNECTOR_UPDATE_EXISTING_DATA\",\n [\"connector\", \"update_existing_data\"],\n config,\n )\n self.identity = self.helper.api.identity.create(\n type=\"Organization\",\n name=\"ESET\",\n description=\"ESET, s.r.o., is a software company specializing in cybersecurity.\",\n )\n self.added_after = int(parse(self.eset_import_start_date).timestamp())\n # Init variables\n self.cache = {}\n if self.eset_collections is not None:\n self.eset_collections = self.eset_collections.split(\",\")\n\n # Create temporary dir and initialize logging.\n if sys.version_info.major == 2: # Compatibility with Python 2.7.\n if not os.path.isdir(TMP_DIR):\n os.makedirs(TMP_DIR)\n else:\n os.makedirs(TMP_DIR, exist_ok=True)\n\n def get_interval(self):\n return int(self.eset_interval) * 60\n\n def _download_all_report_stuff(self, connection, report, base_path):\n \"\"\"Download xml, pdf and adds (if available) from given *report* into paths starting with *base_path*.\"\"\"\n for fmt in [\"pdf\", \"xml\", \"adds\"]:\n ext = fmt if fmt != \"adds\" else \"zip\"\n connection.get_report(report, fmt, file_path=\"{}.{}\".format(base_path, ext))\n\n def _import_reports(self, work_id, start_epoch):\n connection = eti_api.Connection(\n username=self.eset_username,\n password=self.eset_password,\n host=\"eti.eset.com\",\n )\n from_date = datetime.datetime.utcfromtimestamp(start_epoch).astimezone(pytz.utc)\n i = 0\n for report in connection.list_reports(\n type=\"all\", datetimefrom=from_date.isoformat()\n ):\n bundle_objects = []\n if report[\"status\"] != \"finished\":\n self.helper.log_info(\"Finished\")\n continue # Skip not generated reports.\n i += 1\n file_path = os.path.join(TMP_DIR, \"{}_{:02d}\".format(\"all\", i))\n self._download_all_report_stuff(connection, report, file_path)\n if os.path.isfile(file_path + \".pdf\"):\n name = report[\"filename\"].replace(\".pdf\", \"\")\n date = parse(report[\"date\"])\n with open(file_path + \".pdf\", \"rb\") as f:\n file_data_encoded = base64.b64encode(f.read())\n file = {\n \"name\": report[\"filename\"],\n \"data\": file_data_encoded.decode(\"utf-8\"),\n \"mime_type\": \"application/pdf\",\n \"no_trigger_import\": True,\n }\n stix_report = stix2.Report(\n id=Report.generate_id(name, date),\n name=name,\n report_types=[\"APT Report\"],\n description=name,\n published=date,\n labels=[\"apt\", \"eset\"],\n confidence=self.helper.connect_confidence_level,\n created_by_ref=self.identity[\"standard_id\"],\n object_refs=[self.identity[\"standard_id\"]],\n allow_custom=True,\n x_opencti_files=[file],\n object_marking_refs=[stix2.TLP_AMBER.get(\"id\")],\n )\n bundle_objects.append(stix_report)\n try:\n self.helper.log_debug(\"Objects to be sent \" + str(bundle_objects))\n self.helper.send_stix2_bundle(\n self.helper.stix2_create_bundle(bundle_objects),\n update=self.update_existing_data,\n bypass_split=True,\n work_id=work_id,\n )\n except Exception as e:\n self.helper.log_info(\"Failed to process report \" + name)\n self.helper.log_info(\"ERROR: \" + str(e))\n os.remove(file_path + \".pdf\")\n if os.path.isfile(file_path + \".xml\"):\n os.remove(file_path + \".xml\")\n if os.path.isfile(file_path + \".zip\"):\n os.remove(file_path + \".zip\")\n\n def _import_collection(self, collection, work_id, start_epoch):\n object_types_with_confidence = [\n \"attack-pattern\",\n \"course-of-action\",\n \"threat-actor\",\n \"intrusion-set\",\n \"campaign\",\n \"malware\",\n \"tool\",\n \"vulnerability\",\n \"report\",\n \"relationship\",\n \"indicator\",\n ]\n client = cabby.create_client(\n self.eset_api_url, discovery_path=\"/taxiiservice/discovery\", use_https=True\n )\n client.set_auth(username=self.eset_username, password=self.eset_password)\n no_more_result = False\n end_epoch = start_epoch + 3600\n while no_more_result is False:\n self.helper.log_info(\n \"Iterating with collection=\"\n + str(collection)\n + \", start_epoch=\"\n + str(start_epoch)\n + \", end_epoch=\"\n + str(end_epoch)\n )\n begin_date = datetime.datetime.utcfromtimestamp(start_epoch).astimezone(\n pytz.utc\n )\n end_date = datetime.datetime.utcfromtimestamp(end_epoch).astimezone(\n pytz.utc\n )\n try:\n for item in client.poll(\n collection + \" (stix2)\", begin_date=begin_date, end_date=end_date\n ):\n if not item.content: # Skip empty packages.\n continue\n parsed_content = json.loads(item.content)\n objects = []\n id_remaps = {}\n removed_ids = set()\n for object in parsed_content[\"objects\"]:\n if \"confidence\" in object_types_with_confidence:\n if \"confidence\" not in object:\n object[\"confidence\"] = int(\n self.helper.connect_confidence_level\n )\n # Don't consume identity entities w/ \"customer\" as the name.\n # ESET uses this to indicate country targeting, and consuming\n # these causes problems due to dedupe.\n # TODO: Convert these & relevant relationship refs to country\n # locations.\n if (\n object[\"type\"] == \"identity\"\n and \"name\" in object\n and object[\"name\"] == \"customer\"\n ) or object[\"type\"] == \"observed-data\":\n removed_ids.add(object[\"id\"])\n continue\n\n # Malware STIX IDs need to be manually recomputed so they're\n # deterministic by malware name\n if object[\"type\"] == \"malware\" and \"name\" in object:\n new_id = Malware.generate_id(object[\"name\"])\n if object[\"id\"] in id_remaps:\n new_id = id_remaps[object[\"id\"]]\n else:\n id_remaps[object[\"id\"]] = new_id\n object[\"id\"] = new_id\n\n # If we remapped a STIX id earlier to a pycti one, we need to\n # reflect that properly in any relevant relationship too\n if object[\"type\"] == \"relationship\":\n if \"source_ref\" in object:\n if object[\"source_ref\"] in removed_ids:\n continue # skip relationship if either ref is in removed_ids\n if object[\"source_ref\"] in id_remaps:\n object[\"source_ref\"] = id_remaps[\n object[\"source_ref\"]\n ]\n if \"target_ref\" in object:\n if object[\"target_ref\"] in removed_ids:\n continue # skip relationship if either ref is in removed_ids\n if object[\"target_ref\"] in id_remaps:\n object[\"target_ref\"] = id_remaps[\n object[\"target_ref\"]\n ]\n\n if object[\"type\"] == \"indicator\":\n object[\"name\"] = object[\"pattern\"]\n object[\"pattern_type\"] = \"stix\"\n object[\"pattern\"] = (\n object[\"pattern\"]\n .replace(\"SHA1\", \"'SHA-1'\")\n .replace(\"SHA256\", \"'SHA-256'\")\n )\n new_id = Indicator.generate_id(object[\"pattern\"])\n if object[\"id\"] in id_remaps:\n new_id = id_remaps[object[\"id\"]]\n else:\n id_remaps[object[\"id\"]] = new_id\n object[\"id\"] = new_id\n if self.eset_create_observables:\n object[\"x_opencti_create_observables\"] = (\n self.eset_create_observables\n )\n objects.append(object)\n parsed_content[\"objects\"] = objects\n self.helper.send_stix2_bundle(\n json.dumps(parsed_content),\n update=self.update_existing_data,\n work_id=work_id,\n )\n except Exception as e:\n self.helper.log_error(str(e))\n if end_epoch > int(time.time()):\n no_more_result = True\n else:\n start_epoch = end_epoch\n end_epoch = start_epoch + 3600\n\n def run(self):\n while True:\n try:\n self.helper.log_info(\"Synchronizing with ESET API...\")\n timestamp = int(time.time())\n now = datetime.datetime.utcfromtimestamp(timestamp)\n friendly_name = \"ESET run @ \" + now.strftime(\"%Y-%m-%d %H:%M:%S\")\n work_id = self.helper.api.work.initiate_work(\n self.helper.connect_id, friendly_name\n )\n current_state = self.helper.get_state()\n if current_state is None:\n self.helper.set_state({\"last_run\": self.added_after})\n # Get collections\n current_state = self.helper.get_state()\n\n if self.eset_collections is not None:\n for collection in self.eset_collections:\n self._import_collection(\n collection, work_id, current_state[\"last_run\"]\n )\n if self.eset_import_apt_reports:\n self._import_reports(work_id, current_state[\"last_run\"])\n self.helper.set_state({\"last_run\": timestamp})\n message = \"End of synchronization\"\n self.helper.api.work.to_processed(work_id, message)\n self.helper.log_info(message)\n\n if self.helper.connect_run_and_terminate:\n self.helper.log_info(\"Connector stop\")\n self.helper.force_ping()\n sys.exit(0)\n\n time.sleep(self.get_interval())\n except (KeyboardInterrupt, SystemExit):\n self.helper.log_info(\"Connector stop\")\n sys.exit(0)\n\n except Exception as e:\n self.helper.log_error(str(e))\n\n if self.helper.connect_run_and_terminate:\n self.helper.log_info(\"Connector stop\")\n sys.exit(0)\n\n time.sleep(60)\n\n\nif __name__ == \"__main__\":\n try:\n esetConnector = Eset()\n esetConnector.run()\n except Exception as e:\n print(e)\n time.sleep(10)\n sys.exit(0)\n", "path": "external-import/eset/src/eset.py"}]} | 4,005 | 833 |
gh_patches_debug_40687 | rasdani/github-patches | git_diff | facebookresearch__hydra-1545 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support multirun partial failure in Nevergrad sweeper
Context here: https://github.com/facebookresearch/hydra/issues/1377
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from dataclasses import dataclass, field
3 from typing import Any, Dict, Optional
4
5 from hydra.core.config_store import ConfigStore
6
7
8 @dataclass
9 class ScalarConfigSpec:
10 """Representation of all the options to define
11 a scalar.
12 """
13
14 # lower bound if any
15 lower: Optional[float] = None
16
17 # upper bound if any
18 upper: Optional[float] = None
19
20 # initial value
21 # default to the middle point if completely bounded
22 init: Optional[float] = None
23
24 # step size for an update
25 # defaults to 1 if unbounded
26 # or 1/6 of the range if completely bounded
27 step: Optional[float] = None
28
29 # cast to integer
30 integer: bool = False
31
32 # logarithmically distributed
33 log: bool = False
34
35
36 @dataclass
37 class OptimConf:
38
39 # name of the Nevergrad optimizer to use. Here is a sample:
40 # - "OnePlusOne" extremely simple and robust, especially at low budget, but
41 # tends to converge early.
42 # - "CMA" very good algorithm, but may require a significant budget (> 120)
43 # - "TwoPointsDE": an algorithm good in a wide range of settings, for significant
44 # budgets (> 120).
45 # - "NGOpt" an algorithm aiming at identifying the best optimizer given your input
46 # definition (updated regularly)
47 # find out more within nevergrad's documentation:
48 # https://github.com/facebookresearch/nevergrad/
49 optimizer: str = "NGOpt"
50
51 # total number of function evaluations to perform
52 budget: int = 80
53
54 # number of parallel workers for performing function evaluations
55 num_workers: int = 10
56
57 # set to true if the function evaluations are noisy
58 noisy: bool = False
59
60 # set to true for performing maximization instead of minimization
61 maximize: bool = False
62
63 # optimization seed, for reproducibility
64 seed: Optional[int] = None
65
66
67 @dataclass
68 class NevergradSweeperConf:
69 _target_: str = (
70 "hydra_plugins.hydra_nevergrad_sweeper.nevergrad_sweeper.NevergradSweeper"
71 )
72
73 # configuration of the optimizer
74 optim: OptimConf = OptimConf()
75
76 # default parametrization of the search space
77 # can be specified:
78 # - as a string, like commandline arguments
79 # - as a list, for categorical variables
80 # - as a full scalar specification
81 parametrization: Dict[str, Any] = field(default_factory=dict)
82
83
84 ConfigStore.instance().store(
85 group="hydra/sweeper",
86 name="nevergrad",
87 node=NevergradSweeperConf,
88 provider="nevergrad",
89 )
90
```
Path: `plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 from typing import (
4 Any,
5 Dict,
6 List,
7 MutableMapping,
8 MutableSequence,
9 Optional,
10 Tuple,
11 Union,
12 )
13
14 import nevergrad as ng
15 from hydra.core.override_parser.overrides_parser import OverridesParser
16 from hydra.core.override_parser.types import (
17 ChoiceSweep,
18 IntervalSweep,
19 Override,
20 Transformer,
21 )
22 from hydra.core.plugins import Plugins
23 from hydra.plugins.launcher import Launcher
24 from hydra.plugins.sweeper import Sweeper
25 from hydra.types import HydraContext, TaskFunction
26 from omegaconf import DictConfig, ListConfig, OmegaConf
27
28 from .config import OptimConf, ScalarConfigSpec
29
30 log = logging.getLogger(__name__)
31
32
33 def create_nevergrad_param_from_config(
34 config: Union[MutableSequence[Any], MutableMapping[str, Any]]
35 ) -> Any:
36 if isinstance(config, MutableSequence):
37 if isinstance(config, ListConfig):
38 config = OmegaConf.to_container(config, resolve=True) # type: ignore
39 return ng.p.Choice(config)
40 if isinstance(config, MutableMapping):
41 specs = ScalarConfigSpec(**config)
42 init = ["init", "lower", "upper"]
43 init_params = {x: getattr(specs, x) for x in init}
44 if not specs.log:
45 scalar = ng.p.Scalar(**init_params)
46 if specs.step is not None:
47 scalar.set_mutation(sigma=specs.step)
48 else:
49 if specs.step is not None:
50 init_params["exponent"] = specs.step
51 scalar = ng.p.Log(**init_params)
52 if specs.integer:
53 scalar.set_integer_casting()
54 return scalar
55 return config
56
57
58 def create_nevergrad_parameter_from_override(override: Override) -> Any:
59 val = override.value()
60 if not override.is_sweep_override():
61 return val
62 if override.is_choice_sweep():
63 assert isinstance(val, ChoiceSweep)
64 vals = [x for x in override.sweep_iterator(transformer=Transformer.encode)]
65 if "ordered" in val.tags:
66 return ng.p.TransitionChoice(vals)
67 else:
68 return ng.p.Choice(vals)
69 elif override.is_range_sweep():
70 vals = [x for x in override.sweep_iterator(transformer=Transformer.encode)]
71 return ng.p.Choice(vals)
72 elif override.is_interval_sweep():
73 assert isinstance(val, IntervalSweep)
74 if "log" in val.tags:
75 scalar = ng.p.Log(lower=val.start, upper=val.end)
76 else:
77 scalar = ng.p.Scalar(lower=val.start, upper=val.end) # type: ignore
78 if isinstance(val.start, int):
79 scalar.set_integer_casting()
80 return scalar
81
82
83 class NevergradSweeperImpl(Sweeper):
84 def __init__(
85 self,
86 optim: OptimConf,
87 parametrization: Optional[DictConfig],
88 ):
89 self.opt_config = optim
90 self.config: Optional[DictConfig] = None
91 self.launcher: Optional[Launcher] = None
92 self.hydra_context: Optional[HydraContext] = None
93 self.job_results = None
94 self.parametrization: Dict[str, Any] = {}
95 if parametrization is not None:
96 assert isinstance(parametrization, DictConfig)
97 self.parametrization = {
98 str(x): create_nevergrad_param_from_config(y)
99 for x, y in parametrization.items()
100 }
101 self.job_idx: Optional[int] = None
102
103 def setup(
104 self,
105 *,
106 hydra_context: HydraContext,
107 task_function: TaskFunction,
108 config: DictConfig,
109 ) -> None:
110 self.job_idx = 0
111 self.config = config
112 self.hydra_context = hydra_context
113 self.launcher = Plugins.instance().instantiate_launcher(
114 hydra_context=hydra_context, task_function=task_function, config=config
115 )
116
117 def sweep(self, arguments: List[str]) -> None:
118
119 assert self.config is not None
120 assert self.launcher is not None
121 assert self.job_idx is not None
122 direction = -1 if self.opt_config.maximize else 1
123 name = "maximization" if self.opt_config.maximize else "minimization"
124 # Override the parametrization from commandline
125 params = dict(self.parametrization)
126
127 parser = OverridesParser.create()
128 parsed = parser.parse_overrides(arguments)
129
130 for override in parsed:
131 params[
132 override.get_key_element()
133 ] = create_nevergrad_parameter_from_override(override)
134
135 parametrization = ng.p.Dict(**params)
136 parametrization.function.deterministic = not self.opt_config.noisy
137 parametrization.random_state.seed(self.opt_config.seed)
138 # log and build the optimizer
139 opt = self.opt_config.optimizer
140 remaining_budget = self.opt_config.budget
141 nw = self.opt_config.num_workers
142 log.info(
143 f"NevergradSweeper(optimizer={opt}, budget={remaining_budget}, "
144 f"num_workers={nw}) {name}"
145 )
146 log.info(f"with parametrization {parametrization}")
147 log.info(f"Sweep output dir: {self.config.hydra.sweep.dir}")
148 optimizer = ng.optimizers.registry[opt](parametrization, remaining_budget, nw)
149 # loop!
150 all_returns: List[Any] = []
151 best: Tuple[float, ng.p.Parameter] = (float("inf"), parametrization)
152 while remaining_budget > 0:
153 batch = min(nw, remaining_budget)
154 remaining_budget -= batch
155 candidates = [optimizer.ask() for _ in range(batch)]
156 overrides = list(
157 tuple(f"{x}={y}" for x, y in c.value.items()) for c in candidates
158 )
159 self.validate_batch_is_legal(overrides)
160 returns = self.launcher.launch(overrides, initial_job_idx=self.job_idx)
161 self.job_idx += len(returns)
162 # would have been nice to avoid waiting for all jobs to finish
163 # aka batch size Vs steady state (launching a new job whenever one is done)
164 for cand, ret in zip(candidates, returns):
165 loss = direction * ret.return_value
166 optimizer.tell(cand, loss)
167 if loss < best[0]:
168 best = (loss, cand)
169 all_returns.extend(returns)
170 recom = optimizer.provide_recommendation()
171 results_to_serialize = {
172 "name": "nevergrad",
173 "best_evaluated_params": best[1].value,
174 "best_evaluated_result": direction * best[0],
175 }
176 OmegaConf.save(
177 OmegaConf.create(results_to_serialize),
178 f"{self.config.hydra.sweep.dir}/optimization_results.yaml",
179 )
180 log.info(
181 "Best parameters: %s", " ".join(f"{x}={y}" for x, y in recom.value.items())
182 )
183
```
Path: `plugins/hydra_nevergrad_sweeper/example/my_app.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3
4 import hydra
5 from omegaconf import DictConfig
6
7 log = logging.getLogger(__name__)
8
9
10 @hydra.main(config_path=".", config_name="config")
11 def dummy_training(cfg: DictConfig) -> float:
12 """A dummy function to minimize
13 Minimum is 0.0 at:
14 lr = 0.12, dropout=0.33, db=mnist, batch_size=4
15 """
16 do = cfg.dropout
17 bs = cfg.batch_size
18 out = float(
19 abs(do - 0.33) + int(cfg.db == "mnist") + abs(cfg.lr - 0.12) + abs(bs - 4)
20 )
21 log.info(
22 f"dummy_training(dropout={do:.3f}, lr={cfg.lr:.3f}, db={cfg.db}, batch_size={bs}) = {out:.3f}",
23 )
24 return out
25
26
27 if __name__ == "__main__":
28 dummy_training()
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/hydra_nevergrad_sweeper/example/my_app.py b/plugins/hydra_nevergrad_sweeper/example/my_app.py
--- a/plugins/hydra_nevergrad_sweeper/example/my_app.py
+++ b/plugins/hydra_nevergrad_sweeper/example/my_app.py
@@ -21,6 +21,8 @@
log.info(
f"dummy_training(dropout={do:.3f}, lr={cfg.lr:.3f}, db={cfg.db}, batch_size={bs}) = {out:.3f}",
)
+ if cfg.error:
+ raise RuntimeError("cfg.error is True")
return out
diff --git a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py
--- a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py
+++ b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py
@@ -1,5 +1,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
+import math
from typing import (
Any,
Dict,
@@ -12,6 +13,7 @@
)
import nevergrad as ng
+from hydra.core import utils
from hydra.core.override_parser.overrides_parser import OverridesParser
from hydra.core.override_parser.types import (
ChoiceSweep,
@@ -158,14 +160,32 @@
)
self.validate_batch_is_legal(overrides)
returns = self.launcher.launch(overrides, initial_job_idx=self.job_idx)
- self.job_idx += len(returns)
# would have been nice to avoid waiting for all jobs to finish
# aka batch size Vs steady state (launching a new job whenever one is done)
+ self.job_idx += len(returns)
+ # check job status and prepare losses
+ failures = 0
for cand, ret in zip(candidates, returns):
- loss = direction * ret.return_value
- optimizer.tell(cand, loss)
- if loss < best[0]:
- best = (loss, cand)
+ if ret.status == utils.JobStatus.COMPLETED:
+ rectified_loss = direction * ret.return_value
+ else:
+ rectified_loss = math.inf
+ failures += 1
+ try:
+ ret.return_value
+ except Exception as e:
+ log.warning(f"Returning infinity for failed experiment: {e}")
+ optimizer.tell(cand, rectified_loss)
+ if rectified_loss < best[0]:
+ best = (rectified_loss, cand)
+ # raise if too many failures
+ if failures / len(returns) > self.opt_config.max_failure_rate:
+ log.error(
+ f"Failed {failures} times out of {len(returns)} "
+ f"with max_failure_rate={self.opt_config.max_failure_rate}"
+ )
+ for ret in returns:
+ ret.return_value # delegate raising to JobReturn, with actual traceback
all_returns.extend(returns)
recom = optimizer.provide_recommendation()
results_to_serialize = {
diff --git a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py
--- a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py
+++ b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py
@@ -63,6 +63,9 @@
# optimization seed, for reproducibility
seed: Optional[int] = None
+ # maximum authorized failure rate for a batch of parameters
+ max_failure_rate: float = 0.0
+
@dataclass
class NevergradSweeperConf:
| {"golden_diff": "diff --git a/plugins/hydra_nevergrad_sweeper/example/my_app.py b/plugins/hydra_nevergrad_sweeper/example/my_app.py\n--- a/plugins/hydra_nevergrad_sweeper/example/my_app.py\n+++ b/plugins/hydra_nevergrad_sweeper/example/my_app.py\n@@ -21,6 +21,8 @@\n log.info(\n f\"dummy_training(dropout={do:.3f}, lr={cfg.lr:.3f}, db={cfg.db}, batch_size={bs}) = {out:.3f}\",\n )\n+ if cfg.error:\n+ raise RuntimeError(\"cfg.error is True\")\n return out\n \n \ndiff --git a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py\n--- a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py\n+++ b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py\n@@ -1,5 +1,6 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n import logging\n+import math\n from typing import (\n Any,\n Dict,\n@@ -12,6 +13,7 @@\n )\n \n import nevergrad as ng\n+from hydra.core import utils\n from hydra.core.override_parser.overrides_parser import OverridesParser\n from hydra.core.override_parser.types import (\n ChoiceSweep,\n@@ -158,14 +160,32 @@\n )\n self.validate_batch_is_legal(overrides)\n returns = self.launcher.launch(overrides, initial_job_idx=self.job_idx)\n- self.job_idx += len(returns)\n # would have been nice to avoid waiting for all jobs to finish\n # aka batch size Vs steady state (launching a new job whenever one is done)\n+ self.job_idx += len(returns)\n+ # check job status and prepare losses\n+ failures = 0\n for cand, ret in zip(candidates, returns):\n- loss = direction * ret.return_value\n- optimizer.tell(cand, loss)\n- if loss < best[0]:\n- best = (loss, cand)\n+ if ret.status == utils.JobStatus.COMPLETED:\n+ rectified_loss = direction * ret.return_value\n+ else:\n+ rectified_loss = math.inf\n+ failures += 1\n+ try:\n+ ret.return_value\n+ except Exception as e:\n+ log.warning(f\"Returning infinity for failed experiment: {e}\")\n+ optimizer.tell(cand, rectified_loss)\n+ if rectified_loss < best[0]:\n+ best = (rectified_loss, cand)\n+ # raise if too many failures\n+ if failures / len(returns) > self.opt_config.max_failure_rate:\n+ log.error(\n+ f\"Failed {failures} times out of {len(returns)} \"\n+ f\"with max_failure_rate={self.opt_config.max_failure_rate}\"\n+ )\n+ for ret in returns:\n+ ret.return_value # delegate raising to JobReturn, with actual traceback\n all_returns.extend(returns)\n recom = optimizer.provide_recommendation()\n results_to_serialize = {\ndiff --git a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py\n--- a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py\n+++ b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py\n@@ -63,6 +63,9 @@\n # optimization seed, for reproducibility\n seed: Optional[int] = None\n \n+ # maximum authorized failure rate for a batch of parameters\n+ max_failure_rate: float = 0.0\n+\n \n @dataclass\n class NevergradSweeperConf:\n", "issue": "Support multirun partial failure in Nevergrad sweeper\nContext here: https://github.com/facebookresearch/hydra/issues/1377\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, Optional\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass ScalarConfigSpec:\n \"\"\"Representation of all the options to define\n a scalar.\n \"\"\"\n\n # lower bound if any\n lower: Optional[float] = None\n\n # upper bound if any\n upper: Optional[float] = None\n\n # initial value\n # default to the middle point if completely bounded\n init: Optional[float] = None\n\n # step size for an update\n # defaults to 1 if unbounded\n # or 1/6 of the range if completely bounded\n step: Optional[float] = None\n\n # cast to integer\n integer: bool = False\n\n # logarithmically distributed\n log: bool = False\n\n\n@dataclass\nclass OptimConf:\n\n # name of the Nevergrad optimizer to use. Here is a sample:\n # - \"OnePlusOne\" extremely simple and robust, especially at low budget, but\n # tends to converge early.\n # - \"CMA\" very good algorithm, but may require a significant budget (> 120)\n # - \"TwoPointsDE\": an algorithm good in a wide range of settings, for significant\n # budgets (> 120).\n # - \"NGOpt\" an algorithm aiming at identifying the best optimizer given your input\n # definition (updated regularly)\n # find out more within nevergrad's documentation:\n # https://github.com/facebookresearch/nevergrad/\n optimizer: str = \"NGOpt\"\n\n # total number of function evaluations to perform\n budget: int = 80\n\n # number of parallel workers for performing function evaluations\n num_workers: int = 10\n\n # set to true if the function evaluations are noisy\n noisy: bool = False\n\n # set to true for performing maximization instead of minimization\n maximize: bool = False\n\n # optimization seed, for reproducibility\n seed: Optional[int] = None\n\n\n@dataclass\nclass NevergradSweeperConf:\n _target_: str = (\n \"hydra_plugins.hydra_nevergrad_sweeper.nevergrad_sweeper.NevergradSweeper\"\n )\n\n # configuration of the optimizer\n optim: OptimConf = OptimConf()\n\n # default parametrization of the search space\n # can be specified:\n # - as a string, like commandline arguments\n # - as a list, for categorical variables\n # - as a full scalar specification\n parametrization: Dict[str, Any] = field(default_factory=dict)\n\n\nConfigStore.instance().store(\n group=\"hydra/sweeper\",\n name=\"nevergrad\",\n node=NevergradSweeperConf,\n provider=\"nevergrad\",\n)\n", "path": "plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nfrom typing import (\n Any,\n Dict,\n List,\n MutableMapping,\n MutableSequence,\n Optional,\n Tuple,\n Union,\n)\n\nimport nevergrad as ng\nfrom hydra.core.override_parser.overrides_parser import OverridesParser\nfrom hydra.core.override_parser.types import (\n ChoiceSweep,\n IntervalSweep,\n Override,\n Transformer,\n)\nfrom hydra.core.plugins import Plugins\nfrom hydra.plugins.launcher import Launcher\nfrom hydra.plugins.sweeper import Sweeper\nfrom hydra.types import HydraContext, TaskFunction\nfrom omegaconf import DictConfig, ListConfig, OmegaConf\n\nfrom .config import OptimConf, ScalarConfigSpec\n\nlog = logging.getLogger(__name__)\n\n\ndef create_nevergrad_param_from_config(\n config: Union[MutableSequence[Any], MutableMapping[str, Any]]\n) -> Any:\n if isinstance(config, MutableSequence):\n if isinstance(config, ListConfig):\n config = OmegaConf.to_container(config, resolve=True) # type: ignore\n return ng.p.Choice(config)\n if isinstance(config, MutableMapping):\n specs = ScalarConfigSpec(**config)\n init = [\"init\", \"lower\", \"upper\"]\n init_params = {x: getattr(specs, x) for x in init}\n if not specs.log:\n scalar = ng.p.Scalar(**init_params)\n if specs.step is not None:\n scalar.set_mutation(sigma=specs.step)\n else:\n if specs.step is not None:\n init_params[\"exponent\"] = specs.step\n scalar = ng.p.Log(**init_params)\n if specs.integer:\n scalar.set_integer_casting()\n return scalar\n return config\n\n\ndef create_nevergrad_parameter_from_override(override: Override) -> Any:\n val = override.value()\n if not override.is_sweep_override():\n return val\n if override.is_choice_sweep():\n assert isinstance(val, ChoiceSweep)\n vals = [x for x in override.sweep_iterator(transformer=Transformer.encode)]\n if \"ordered\" in val.tags:\n return ng.p.TransitionChoice(vals)\n else:\n return ng.p.Choice(vals)\n elif override.is_range_sweep():\n vals = [x for x in override.sweep_iterator(transformer=Transformer.encode)]\n return ng.p.Choice(vals)\n elif override.is_interval_sweep():\n assert isinstance(val, IntervalSweep)\n if \"log\" in val.tags:\n scalar = ng.p.Log(lower=val.start, upper=val.end)\n else:\n scalar = ng.p.Scalar(lower=val.start, upper=val.end) # type: ignore\n if isinstance(val.start, int):\n scalar.set_integer_casting()\n return scalar\n\n\nclass NevergradSweeperImpl(Sweeper):\n def __init__(\n self,\n optim: OptimConf,\n parametrization: Optional[DictConfig],\n ):\n self.opt_config = optim\n self.config: Optional[DictConfig] = None\n self.launcher: Optional[Launcher] = None\n self.hydra_context: Optional[HydraContext] = None\n self.job_results = None\n self.parametrization: Dict[str, Any] = {}\n if parametrization is not None:\n assert isinstance(parametrization, DictConfig)\n self.parametrization = {\n str(x): create_nevergrad_param_from_config(y)\n for x, y in parametrization.items()\n }\n self.job_idx: Optional[int] = None\n\n def setup(\n self,\n *,\n hydra_context: HydraContext,\n task_function: TaskFunction,\n config: DictConfig,\n ) -> None:\n self.job_idx = 0\n self.config = config\n self.hydra_context = hydra_context\n self.launcher = Plugins.instance().instantiate_launcher(\n hydra_context=hydra_context, task_function=task_function, config=config\n )\n\n def sweep(self, arguments: List[str]) -> None:\n\n assert self.config is not None\n assert self.launcher is not None\n assert self.job_idx is not None\n direction = -1 if self.opt_config.maximize else 1\n name = \"maximization\" if self.opt_config.maximize else \"minimization\"\n # Override the parametrization from commandline\n params = dict(self.parametrization)\n\n parser = OverridesParser.create()\n parsed = parser.parse_overrides(arguments)\n\n for override in parsed:\n params[\n override.get_key_element()\n ] = create_nevergrad_parameter_from_override(override)\n\n parametrization = ng.p.Dict(**params)\n parametrization.function.deterministic = not self.opt_config.noisy\n parametrization.random_state.seed(self.opt_config.seed)\n # log and build the optimizer\n opt = self.opt_config.optimizer\n remaining_budget = self.opt_config.budget\n nw = self.opt_config.num_workers\n log.info(\n f\"NevergradSweeper(optimizer={opt}, budget={remaining_budget}, \"\n f\"num_workers={nw}) {name}\"\n )\n log.info(f\"with parametrization {parametrization}\")\n log.info(f\"Sweep output dir: {self.config.hydra.sweep.dir}\")\n optimizer = ng.optimizers.registry[opt](parametrization, remaining_budget, nw)\n # loop!\n all_returns: List[Any] = []\n best: Tuple[float, ng.p.Parameter] = (float(\"inf\"), parametrization)\n while remaining_budget > 0:\n batch = min(nw, remaining_budget)\n remaining_budget -= batch\n candidates = [optimizer.ask() for _ in range(batch)]\n overrides = list(\n tuple(f\"{x}={y}\" for x, y in c.value.items()) for c in candidates\n )\n self.validate_batch_is_legal(overrides)\n returns = self.launcher.launch(overrides, initial_job_idx=self.job_idx)\n self.job_idx += len(returns)\n # would have been nice to avoid waiting for all jobs to finish\n # aka batch size Vs steady state (launching a new job whenever one is done)\n for cand, ret in zip(candidates, returns):\n loss = direction * ret.return_value\n optimizer.tell(cand, loss)\n if loss < best[0]:\n best = (loss, cand)\n all_returns.extend(returns)\n recom = optimizer.provide_recommendation()\n results_to_serialize = {\n \"name\": \"nevergrad\",\n \"best_evaluated_params\": best[1].value,\n \"best_evaluated_result\": direction * best[0],\n }\n OmegaConf.save(\n OmegaConf.create(results_to_serialize),\n f\"{self.config.hydra.sweep.dir}/optimization_results.yaml\",\n )\n log.info(\n \"Best parameters: %s\", \" \".join(f\"{x}={y}\" for x, y in recom.value.items())\n )\n", "path": "plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\n\nimport hydra\nfrom omegaconf import DictConfig\n\nlog = logging.getLogger(__name__)\n\n\[email protected](config_path=\".\", config_name=\"config\")\ndef dummy_training(cfg: DictConfig) -> float:\n \"\"\"A dummy function to minimize\n Minimum is 0.0 at:\n lr = 0.12, dropout=0.33, db=mnist, batch_size=4\n \"\"\"\n do = cfg.dropout\n bs = cfg.batch_size\n out = float(\n abs(do - 0.33) + int(cfg.db == \"mnist\") + abs(cfg.lr - 0.12) + abs(bs - 4)\n )\n log.info(\n f\"dummy_training(dropout={do:.3f}, lr={cfg.lr:.3f}, db={cfg.db}, batch_size={bs}) = {out:.3f}\",\n )\n return out\n\n\nif __name__ == \"__main__\":\n dummy_training()\n", "path": "plugins/hydra_nevergrad_sweeper/example/my_app.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, Optional\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass ScalarConfigSpec:\n \"\"\"Representation of all the options to define\n a scalar.\n \"\"\"\n\n # lower bound if any\n lower: Optional[float] = None\n\n # upper bound if any\n upper: Optional[float] = None\n\n # initial value\n # default to the middle point if completely bounded\n init: Optional[float] = None\n\n # step size for an update\n # defaults to 1 if unbounded\n # or 1/6 of the range if completely bounded\n step: Optional[float] = None\n\n # cast to integer\n integer: bool = False\n\n # logarithmically distributed\n log: bool = False\n\n\n@dataclass\nclass OptimConf:\n\n # name of the Nevergrad optimizer to use. Here is a sample:\n # - \"OnePlusOne\" extremely simple and robust, especially at low budget, but\n # tends to converge early.\n # - \"CMA\" very good algorithm, but may require a significant budget (> 120)\n # - \"TwoPointsDE\": an algorithm good in a wide range of settings, for significant\n # budgets (> 120).\n # - \"NGOpt\" an algorithm aiming at identifying the best optimizer given your input\n # definition (updated regularly)\n # find out more within nevergrad's documentation:\n # https://github.com/facebookresearch/nevergrad/\n optimizer: str = \"NGOpt\"\n\n # total number of function evaluations to perform\n budget: int = 80\n\n # number of parallel workers for performing function evaluations\n num_workers: int = 10\n\n # set to true if the function evaluations are noisy\n noisy: bool = False\n\n # set to true for performing maximization instead of minimization\n maximize: bool = False\n\n # optimization seed, for reproducibility\n seed: Optional[int] = None\n\n # maximum authorized failure rate for a batch of parameters\n max_failure_rate: float = 0.0\n\n\n@dataclass\nclass NevergradSweeperConf:\n _target_: str = (\n \"hydra_plugins.hydra_nevergrad_sweeper.nevergrad_sweeper.NevergradSweeper\"\n )\n\n # configuration of the optimizer\n optim: OptimConf = OptimConf()\n\n # default parametrization of the search space\n # can be specified:\n # - as a string, like commandline arguments\n # - as a list, for categorical variables\n # - as a full scalar specification\n parametrization: Dict[str, Any] = field(default_factory=dict)\n\n\nConfigStore.instance().store(\n group=\"hydra/sweeper\",\n name=\"nevergrad\",\n node=NevergradSweeperConf,\n provider=\"nevergrad\",\n)\n", "path": "plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport math\nfrom typing import (\n Any,\n Dict,\n List,\n MutableMapping,\n MutableSequence,\n Optional,\n Tuple,\n Union,\n)\n\nimport nevergrad as ng\nfrom hydra.core import utils\nfrom hydra.core.override_parser.overrides_parser import OverridesParser\nfrom hydra.core.override_parser.types import (\n ChoiceSweep,\n IntervalSweep,\n Override,\n Transformer,\n)\nfrom hydra.core.plugins import Plugins\nfrom hydra.plugins.launcher import Launcher\nfrom hydra.plugins.sweeper import Sweeper\nfrom hydra.types import HydraContext, TaskFunction\nfrom omegaconf import DictConfig, ListConfig, OmegaConf\n\nfrom .config import OptimConf, ScalarConfigSpec\n\nlog = logging.getLogger(__name__)\n\n\ndef create_nevergrad_param_from_config(\n config: Union[MutableSequence[Any], MutableMapping[str, Any]]\n) -> Any:\n if isinstance(config, MutableSequence):\n if isinstance(config, ListConfig):\n config = OmegaConf.to_container(config, resolve=True) # type: ignore\n return ng.p.Choice(config)\n if isinstance(config, MutableMapping):\n specs = ScalarConfigSpec(**config)\n init = [\"init\", \"lower\", \"upper\"]\n init_params = {x: getattr(specs, x) for x in init}\n if not specs.log:\n scalar = ng.p.Scalar(**init_params)\n if specs.step is not None:\n scalar.set_mutation(sigma=specs.step)\n else:\n if specs.step is not None:\n init_params[\"exponent\"] = specs.step\n scalar = ng.p.Log(**init_params)\n if specs.integer:\n scalar.set_integer_casting()\n return scalar\n return config\n\n\ndef create_nevergrad_parameter_from_override(override: Override) -> Any:\n val = override.value()\n if not override.is_sweep_override():\n return val\n if override.is_choice_sweep():\n assert isinstance(val, ChoiceSweep)\n vals = [x for x in override.sweep_iterator(transformer=Transformer.encode)]\n if \"ordered\" in val.tags:\n return ng.p.TransitionChoice(vals)\n else:\n return ng.p.Choice(vals)\n elif override.is_range_sweep():\n vals = [x for x in override.sweep_iterator(transformer=Transformer.encode)]\n return ng.p.Choice(vals)\n elif override.is_interval_sweep():\n assert isinstance(val, IntervalSweep)\n if \"log\" in val.tags:\n scalar = ng.p.Log(lower=val.start, upper=val.end)\n else:\n scalar = ng.p.Scalar(lower=val.start, upper=val.end) # type: ignore\n if isinstance(val.start, int):\n scalar.set_integer_casting()\n return scalar\n\n\nclass NevergradSweeperImpl(Sweeper):\n def __init__(\n self,\n optim: OptimConf,\n parametrization: Optional[DictConfig],\n ):\n self.opt_config = optim\n self.config: Optional[DictConfig] = None\n self.launcher: Optional[Launcher] = None\n self.hydra_context: Optional[HydraContext] = None\n self.job_results = None\n self.parametrization: Dict[str, Any] = {}\n if parametrization is not None:\n assert isinstance(parametrization, DictConfig)\n self.parametrization = {\n str(x): create_nevergrad_param_from_config(y)\n for x, y in parametrization.items()\n }\n self.job_idx: Optional[int] = None\n\n def setup(\n self,\n *,\n hydra_context: HydraContext,\n task_function: TaskFunction,\n config: DictConfig,\n ) -> None:\n self.job_idx = 0\n self.config = config\n self.hydra_context = hydra_context\n self.launcher = Plugins.instance().instantiate_launcher(\n hydra_context=hydra_context, task_function=task_function, config=config\n )\n\n def sweep(self, arguments: List[str]) -> None:\n\n assert self.config is not None\n assert self.launcher is not None\n assert self.job_idx is not None\n direction = -1 if self.opt_config.maximize else 1\n name = \"maximization\" if self.opt_config.maximize else \"minimization\"\n # Override the parametrization from commandline\n params = dict(self.parametrization)\n\n parser = OverridesParser.create()\n parsed = parser.parse_overrides(arguments)\n\n for override in parsed:\n params[\n override.get_key_element()\n ] = create_nevergrad_parameter_from_override(override)\n\n parametrization = ng.p.Dict(**params)\n parametrization.function.deterministic = not self.opt_config.noisy\n parametrization.random_state.seed(self.opt_config.seed)\n # log and build the optimizer\n opt = self.opt_config.optimizer\n remaining_budget = self.opt_config.budget\n nw = self.opt_config.num_workers\n log.info(\n f\"NevergradSweeper(optimizer={opt}, budget={remaining_budget}, \"\n f\"num_workers={nw}) {name}\"\n )\n log.info(f\"with parametrization {parametrization}\")\n log.info(f\"Sweep output dir: {self.config.hydra.sweep.dir}\")\n optimizer = ng.optimizers.registry[opt](parametrization, remaining_budget, nw)\n # loop!\n all_returns: List[Any] = []\n best: Tuple[float, ng.p.Parameter] = (float(\"inf\"), parametrization)\n while remaining_budget > 0:\n batch = min(nw, remaining_budget)\n remaining_budget -= batch\n candidates = [optimizer.ask() for _ in range(batch)]\n overrides = list(\n tuple(f\"{x}={y}\" for x, y in c.value.items()) for c in candidates\n )\n self.validate_batch_is_legal(overrides)\n returns = self.launcher.launch(overrides, initial_job_idx=self.job_idx)\n # would have been nice to avoid waiting for all jobs to finish\n # aka batch size Vs steady state (launching a new job whenever one is done)\n self.job_idx += len(returns)\n # check job status and prepare losses\n failures = 0\n for cand, ret in zip(candidates, returns):\n if ret.status == utils.JobStatus.COMPLETED:\n rectified_loss = direction * ret.return_value\n else:\n rectified_loss = math.inf\n failures += 1\n try:\n ret.return_value\n except Exception as e:\n log.warning(f\"Returning infinity for failed experiment: {e}\")\n optimizer.tell(cand, rectified_loss)\n if rectified_loss < best[0]:\n best = (rectified_loss, cand)\n # raise if too many failures\n if failures / len(returns) > self.opt_config.max_failure_rate:\n log.error(\n f\"Failed {failures} times out of {len(returns)} \"\n f\"with max_failure_rate={self.opt_config.max_failure_rate}\"\n )\n for ret in returns:\n ret.return_value # delegate raising to JobReturn, with actual traceback\n all_returns.extend(returns)\n recom = optimizer.provide_recommendation()\n results_to_serialize = {\n \"name\": \"nevergrad\",\n \"best_evaluated_params\": best[1].value,\n \"best_evaluated_result\": direction * best[0],\n }\n OmegaConf.save(\n OmegaConf.create(results_to_serialize),\n f\"{self.config.hydra.sweep.dir}/optimization_results.yaml\",\n )\n log.info(\n \"Best parameters: %s\", \" \".join(f\"{x}={y}\" for x, y in recom.value.items())\n )\n", "path": "plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/_impl.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\n\nimport hydra\nfrom omegaconf import DictConfig\n\nlog = logging.getLogger(__name__)\n\n\[email protected](config_path=\".\", config_name=\"config\")\ndef dummy_training(cfg: DictConfig) -> float:\n \"\"\"A dummy function to minimize\n Minimum is 0.0 at:\n lr = 0.12, dropout=0.33, db=mnist, batch_size=4\n \"\"\"\n do = cfg.dropout\n bs = cfg.batch_size\n out = float(\n abs(do - 0.33) + int(cfg.db == \"mnist\") + abs(cfg.lr - 0.12) + abs(bs - 4)\n )\n log.info(\n f\"dummy_training(dropout={do:.3f}, lr={cfg.lr:.3f}, db={cfg.db}, batch_size={bs}) = {out:.3f}\",\n )\n if cfg.error:\n raise RuntimeError(\"cfg.error is True\")\n return out\n\n\nif __name__ == \"__main__\":\n dummy_training()\n", "path": "plugins/hydra_nevergrad_sweeper/example/my_app.py"}]} | 3,404 | 930 |
gh_patches_debug_31784 | rasdani/github-patches | git_diff | opentensor__bittensor-1231 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
new_hotkey is listed twice under 'btcli --help' menu
new_hotkey is listed twice under 'btcli --help' menu
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bittensor/_cli/__init__.py`
Content:
```
1 """
2 Create and init the CLI class, which handles the coldkey, hotkey and money transfer
3 """
4 # The MIT License (MIT)
5 # Copyright © 2021 Yuma Rao
6 # Copyright © 2022 Opentensor Foundation
7
8 # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
9 # documentation files (the “Software”), to deal in the Software without restriction, including without limitation
10 # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
11 # and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
12
13 # The above copyright notice and this permission notice shall be included in all copies or substantial portions of
14 # the Software.
15
16 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
17 # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 # DEALINGS IN THE SOFTWARE.
21
22 import sys
23 import argparse
24 import bittensor
25 from . import cli_impl
26 from .commands import *
27 from typing import List, Optional
28 from .naka_cli_impl import CLI as naka_CLI
29 console = bittensor.__console__
30
31 # Turn off rich console locals trace.
32 from rich.traceback import install
33 install(show_locals=False)
34
35 class cli:
36 """
37 Create and init the CLI class, which handles the coldkey, hotkey and tao transfer
38 """
39 def __new__(
40 cls,
41 config: Optional['bittensor.Config'] = None,
42 args: Optional[List[str]] = None,
43 ) -> 'bittensor.CLI':
44 r""" Creates a new bittensor.cli from passed arguments.
45 Args:
46 config (:obj:`bittensor.Config`, `optional`):
47 bittensor.cli.config()
48 args (`List[str]`, `optional`):
49 The arguments to parse from the command line.
50 """
51 if config == None:
52 config = cli.config(args)
53 cli.check_config( config )
54 if config.subtensor:
55 network = config.subtensor.get('network', bittensor.defaults.subtensor.network)
56
57 if network == 'nakamoto':
58 # Use nakamoto version of the CLI
59 return naka_CLI(config=config)
60 else:
61 return cli_impl.CLI( config = config)
62
63 @staticmethod
64 def config(args: List[str]) -> 'bittensor.config':
65 """ From the argument parser, add config to bittensor.executor and local config
66 Return: bittensor.config object
67 """
68 parser = argparse.ArgumentParser(
69 description=f"bittensor cli v{bittensor.__version__}",
70 usage="btcli <command> <command args>",
71 add_help=True)
72
73 cmd_parsers = parser.add_subparsers(dest='command')
74 RunCommand.add_args( cmd_parsers )
75 HelpCommand.add_args( cmd_parsers )
76 ListCommand.add_args( cmd_parsers )
77 QueryCommand.add_args( cmd_parsers )
78 StakeCommand.add_args( cmd_parsers )
79 UpdateCommand.add_args( cmd_parsers )
80 InspectCommand.add_args( cmd_parsers )
81 WeightsCommand.add_args( cmd_parsers )
82 UnStakeCommand.add_args( cmd_parsers )
83 OverviewCommand.add_args( cmd_parsers )
84 RegisterCommand.add_args( cmd_parsers )
85 TransferCommand.add_args( cmd_parsers )
86 NominateCommand.add_args( cmd_parsers )
87 NewHotkeyCommand.add_args( cmd_parsers )
88 MetagraphCommand.add_args( cmd_parsers )
89 SetWeightsCommand.add_args( cmd_parsers )
90 NewColdkeyCommand.add_args( cmd_parsers )
91 NewHotkeyCommand.add_args( cmd_parsers )
92 MyDelegatesCommand.add_args( cmd_parsers )
93 ListSubnetsCommand.add_args( cmd_parsers )
94 RegenHotkeyCommand.add_args( cmd_parsers )
95 RegenColdkeyCommand.add_args( cmd_parsers )
96 DelegateStakeCommand.add_args( cmd_parsers )
97 DelegateUnstakeCommand.add_args( cmd_parsers )
98 ListDelegatesCommand.add_args( cmd_parsers )
99 RegenColdkeypubCommand.add_args( cmd_parsers )
100 RecycleRegisterCommand.add_args( cmd_parsers )
101
102 # If no arguments are passed, print help text.
103 if len(args) == 0:
104 parser.print_help()
105 sys.exit()
106
107 return bittensor.config( parser, args=args )
108
109 @staticmethod
110 def check_config (config: 'bittensor.Config'):
111 """ Check if the essential config exist under different command
112 """
113 if config.command == "run":
114 RunCommand.check_config( config )
115 elif config.command == "transfer":
116 TransferCommand.check_config( config )
117 elif config.command == "register":
118 RegisterCommand.check_config( config )
119 elif config.command == "unstake":
120 UnStakeCommand.check_config( config )
121 elif config.command == "stake":
122 StakeCommand.check_config( config )
123 elif config.command == "overview":
124 OverviewCommand.check_config( config )
125 elif config.command == "new_coldkey":
126 NewColdkeyCommand.check_config( config )
127 elif config.command == "new_hotkey":
128 NewHotkeyCommand.check_config( config )
129 elif config.command == "regen_coldkey":
130 RegenColdkeyCommand.check_config( config )
131 elif config.command == "regen_coldkeypub":
132 RegenColdkeypubCommand.check_config( config )
133 elif config.command == "regen_hotkey":
134 RegenHotkeyCommand.check_config( config )
135 elif config.command == "metagraph":
136 MetagraphCommand.check_config( config )
137 elif config.command == "weights":
138 WeightsCommand.check_config( config )
139 elif config.command == "set_weights":
140 SetWeightsCommand.check_config( config )
141 elif config.command == "list":
142 ListCommand.check_config( config )
143 elif config.command == "inspect":
144 InspectCommand.check_config( config )
145 elif config.command == "query":
146 QueryCommand.check_config( config )
147 elif config.command == "help":
148 HelpCommand.check_config( config )
149 elif config.command == "update":
150 UpdateCommand.check_config( config )
151 elif config.command == "nominate":
152 NominateCommand.check_config( config )
153 elif config.command == "list_delegates":
154 ListDelegatesCommand.check_config( config )
155 elif config.command == "list_subnets":
156 ListSubnetsCommand.check_config( config )
157 elif config.command == "delegate":
158 DelegateStakeCommand.check_config( config )
159 elif config.command == "undelegate":
160 DelegateUnstakeCommand.check_config( config )
161 elif config.command == "my_delegates":
162 MyDelegatesCommand.check_config( config )
163 elif config.command == "recycle_register":
164 RecycleRegisterCommand.check_config( config )
165 else:
166 console.print(":cross_mark:[red]Unknown command: {}[/red]".format(config.command))
167 sys.exit()
168
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bittensor/_cli/__init__.py b/bittensor/_cli/__init__.py
--- a/bittensor/_cli/__init__.py
+++ b/bittensor/_cli/__init__.py
@@ -59,11 +59,10 @@
return naka_CLI(config=config)
else:
return cli_impl.CLI( config = config)
-
- @staticmethod
- def config(args: List[str]) -> 'bittensor.config':
- """ From the argument parser, add config to bittensor.executor and local config
- Return: bittensor.config object
+
+ @staticmethod
+ def __create_parser__() -> 'argparse.ArgumentParser':
+ """ Creates the argument parser for the bittensor cli.
"""
parser = argparse.ArgumentParser(
description=f"bittensor cli v{bittensor.__version__}",
@@ -88,7 +87,6 @@
MetagraphCommand.add_args( cmd_parsers )
SetWeightsCommand.add_args( cmd_parsers )
NewColdkeyCommand.add_args( cmd_parsers )
- NewHotkeyCommand.add_args( cmd_parsers )
MyDelegatesCommand.add_args( cmd_parsers )
ListSubnetsCommand.add_args( cmd_parsers )
RegenHotkeyCommand.add_args( cmd_parsers )
@@ -99,6 +97,15 @@
RegenColdkeypubCommand.add_args( cmd_parsers )
RecycleRegisterCommand.add_args( cmd_parsers )
+ return parser
+
+ @staticmethod
+ def config(args: List[str]) -> 'bittensor.config':
+ """ From the argument parser, add config to bittensor.executor and local config
+ Return: bittensor.config object
+ """
+ parser = cli.__create_parser__()
+
# If no arguments are passed, print help text.
if len(args) == 0:
parser.print_help()
| {"golden_diff": "diff --git a/bittensor/_cli/__init__.py b/bittensor/_cli/__init__.py\n--- a/bittensor/_cli/__init__.py\n+++ b/bittensor/_cli/__init__.py\n@@ -59,11 +59,10 @@\n return naka_CLI(config=config)\n else:\n return cli_impl.CLI( config = config)\n-\n- @staticmethod \n- def config(args: List[str]) -> 'bittensor.config':\n- \"\"\" From the argument parser, add config to bittensor.executor and local config \n- Return: bittensor.config object\n+ \n+ @staticmethod\n+ def __create_parser__() -> 'argparse.ArgumentParser':\n+ \"\"\" Creates the argument parser for the bittensor cli.\n \"\"\"\n parser = argparse.ArgumentParser(\n description=f\"bittensor cli v{bittensor.__version__}\",\n@@ -88,7 +87,6 @@\n MetagraphCommand.add_args( cmd_parsers )\n SetWeightsCommand.add_args( cmd_parsers )\n NewColdkeyCommand.add_args( cmd_parsers )\n- NewHotkeyCommand.add_args( cmd_parsers )\n MyDelegatesCommand.add_args( cmd_parsers )\n ListSubnetsCommand.add_args( cmd_parsers )\n RegenHotkeyCommand.add_args( cmd_parsers )\n@@ -99,6 +97,15 @@\n RegenColdkeypubCommand.add_args( cmd_parsers )\n RecycleRegisterCommand.add_args( cmd_parsers )\n \n+ return parser\n+\n+ @staticmethod \n+ def config(args: List[str]) -> 'bittensor.config':\n+ \"\"\" From the argument parser, add config to bittensor.executor and local config \n+ Return: bittensor.config object\n+ \"\"\"\n+ parser = cli.__create_parser__()\n+\n # If no arguments are passed, print help text.\n if len(args) == 0:\n parser.print_help()\n", "issue": "new_hotkey is listed twice under 'btcli --help' menu\nnew_hotkey is listed twice under 'btcli --help' menu\n", "before_files": [{"content": "\"\"\"\nCreate and init the CLI class, which handles the coldkey, hotkey and money transfer \n\"\"\"\n# The MIT License (MIT)\n# Copyright \u00a9 2021 Yuma Rao\n# Copyright \u00a9 2022 Opentensor Foundation\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated \n# documentation files (the \u201cSoftware\u201d), to deal in the Software without restriction, including without limitation \n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, \n# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of \n# the Software.\n\n# THE SOFTWARE IS PROVIDED \u201cAS IS\u201d, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\n# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL \n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION \n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \n# DEALINGS IN THE SOFTWARE.\n\nimport sys\nimport argparse\nimport bittensor\nfrom . import cli_impl\nfrom .commands import *\nfrom typing import List, Optional\nfrom .naka_cli_impl import CLI as naka_CLI\nconsole = bittensor.__console__\n\n# Turn off rich console locals trace.\nfrom rich.traceback import install\ninstall(show_locals=False)\n\nclass cli:\n \"\"\"\n Create and init the CLI class, which handles the coldkey, hotkey and tao transfer \n \"\"\"\n def __new__(\n cls,\n config: Optional['bittensor.Config'] = None,\n args: Optional[List[str]] = None, \n ) -> 'bittensor.CLI':\n r\"\"\" Creates a new bittensor.cli from passed arguments.\n Args:\n config (:obj:`bittensor.Config`, `optional`): \n bittensor.cli.config()\n args (`List[str]`, `optional`): \n The arguments to parse from the command line.\n \"\"\"\n if config == None: \n config = cli.config(args)\n cli.check_config( config )\n if config.subtensor:\n network = config.subtensor.get('network', bittensor.defaults.subtensor.network)\n\n if network == 'nakamoto':\n # Use nakamoto version of the CLI\n return naka_CLI(config=config)\n else:\n return cli_impl.CLI( config = config)\n\n @staticmethod \n def config(args: List[str]) -> 'bittensor.config':\n \"\"\" From the argument parser, add config to bittensor.executor and local config \n Return: bittensor.config object\n \"\"\"\n parser = argparse.ArgumentParser(\n description=f\"bittensor cli v{bittensor.__version__}\",\n usage=\"btcli <command> <command args>\",\n add_help=True)\n\n cmd_parsers = parser.add_subparsers(dest='command')\n RunCommand.add_args( cmd_parsers )\n HelpCommand.add_args( cmd_parsers ) \n ListCommand.add_args( cmd_parsers )\n QueryCommand.add_args( cmd_parsers )\n StakeCommand.add_args( cmd_parsers )\n UpdateCommand.add_args( cmd_parsers )\n InspectCommand.add_args( cmd_parsers ) \n WeightsCommand.add_args( cmd_parsers )\n UnStakeCommand.add_args( cmd_parsers )\n OverviewCommand.add_args( cmd_parsers )\n RegisterCommand.add_args( cmd_parsers )\n TransferCommand.add_args( cmd_parsers )\n NominateCommand.add_args( cmd_parsers )\n NewHotkeyCommand.add_args( cmd_parsers )\n MetagraphCommand.add_args( cmd_parsers )\n SetWeightsCommand.add_args( cmd_parsers )\n NewColdkeyCommand.add_args( cmd_parsers )\n NewHotkeyCommand.add_args( cmd_parsers )\n MyDelegatesCommand.add_args( cmd_parsers )\n ListSubnetsCommand.add_args( cmd_parsers )\n RegenHotkeyCommand.add_args( cmd_parsers )\n RegenColdkeyCommand.add_args( cmd_parsers )\n DelegateStakeCommand.add_args( cmd_parsers )\n DelegateUnstakeCommand.add_args( cmd_parsers )\n ListDelegatesCommand.add_args( cmd_parsers )\n RegenColdkeypubCommand.add_args( cmd_parsers )\n RecycleRegisterCommand.add_args( cmd_parsers )\n\n # If no arguments are passed, print help text.\n if len(args) == 0:\n parser.print_help()\n sys.exit()\n\n return bittensor.config( parser, args=args )\n\n @staticmethod \n def check_config (config: 'bittensor.Config'):\n \"\"\" Check if the essential config exist under different command\n \"\"\"\n if config.command == \"run\":\n RunCommand.check_config( config )\n elif config.command == \"transfer\":\n TransferCommand.check_config( config )\n elif config.command == \"register\":\n RegisterCommand.check_config( config )\n elif config.command == \"unstake\":\n UnStakeCommand.check_config( config )\n elif config.command == \"stake\":\n StakeCommand.check_config( config )\n elif config.command == \"overview\":\n OverviewCommand.check_config( config )\n elif config.command == \"new_coldkey\":\n NewColdkeyCommand.check_config( config )\n elif config.command == \"new_hotkey\":\n NewHotkeyCommand.check_config( config )\n elif config.command == \"regen_coldkey\":\n RegenColdkeyCommand.check_config( config )\n elif config.command == \"regen_coldkeypub\":\n RegenColdkeypubCommand.check_config( config )\n elif config.command == \"regen_hotkey\":\n RegenHotkeyCommand.check_config( config )\n elif config.command == \"metagraph\":\n MetagraphCommand.check_config( config )\n elif config.command == \"weights\":\n WeightsCommand.check_config( config )\n elif config.command == \"set_weights\":\n SetWeightsCommand.check_config( config )\n elif config.command == \"list\":\n ListCommand.check_config( config )\n elif config.command == \"inspect\":\n InspectCommand.check_config( config )\n elif config.command == \"query\":\n QueryCommand.check_config( config )\n elif config.command == \"help\":\n HelpCommand.check_config( config )\n elif config.command == \"update\":\n UpdateCommand.check_config( config )\n elif config.command == \"nominate\":\n NominateCommand.check_config( config )\n elif config.command == \"list_delegates\":\n ListDelegatesCommand.check_config( config )\n elif config.command == \"list_subnets\":\n ListSubnetsCommand.check_config( config )\n elif config.command == \"delegate\":\n DelegateStakeCommand.check_config( config )\n elif config.command == \"undelegate\":\n DelegateUnstakeCommand.check_config( config )\n elif config.command == \"my_delegates\":\n MyDelegatesCommand.check_config( config )\n elif config.command == \"recycle_register\":\n RecycleRegisterCommand.check_config( config )\n else:\n console.print(\":cross_mark:[red]Unknown command: {}[/red]\".format(config.command))\n sys.exit()\n\n ", "path": "bittensor/_cli/__init__.py"}], "after_files": [{"content": "\"\"\"\nCreate and init the CLI class, which handles the coldkey, hotkey and money transfer \n\"\"\"\n# The MIT License (MIT)\n# Copyright \u00a9 2021 Yuma Rao\n# Copyright \u00a9 2022 Opentensor Foundation\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated \n# documentation files (the \u201cSoftware\u201d), to deal in the Software without restriction, including without limitation \n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, \n# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of \n# the Software.\n\n# THE SOFTWARE IS PROVIDED \u201cAS IS\u201d, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\n# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL \n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION \n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \n# DEALINGS IN THE SOFTWARE.\n\nimport sys\nimport argparse\nimport bittensor\nfrom . import cli_impl\nfrom .commands import *\nfrom typing import List, Optional\nfrom .naka_cli_impl import CLI as naka_CLI\nconsole = bittensor.__console__\n\n# Turn off rich console locals trace.\nfrom rich.traceback import install\ninstall(show_locals=False)\n\nclass cli:\n \"\"\"\n Create and init the CLI class, which handles the coldkey, hotkey and tao transfer \n \"\"\"\n def __new__(\n cls,\n config: Optional['bittensor.Config'] = None,\n args: Optional[List[str]] = None, \n ) -> 'bittensor.CLI':\n r\"\"\" Creates a new bittensor.cli from passed arguments.\n Args:\n config (:obj:`bittensor.Config`, `optional`): \n bittensor.cli.config()\n args (`List[str]`, `optional`): \n The arguments to parse from the command line.\n \"\"\"\n if config == None: \n config = cli.config(args)\n cli.check_config( config )\n if config.subtensor:\n network = config.subtensor.get('network', bittensor.defaults.subtensor.network)\n\n if network == 'nakamoto':\n # Use nakamoto version of the CLI\n return naka_CLI(config=config)\n else:\n return cli_impl.CLI( config = config)\n \n @staticmethod\n def __create_parser__() -> 'argparse.ArgumentParser':\n \"\"\" Creates the argument parser for the bittensor cli.\n \"\"\"\n parser = argparse.ArgumentParser(\n description=f\"bittensor cli v{bittensor.__version__}\",\n usage=\"btcli <command> <command args>\",\n add_help=True)\n\n cmd_parsers = parser.add_subparsers(dest='command')\n RunCommand.add_args( cmd_parsers )\n HelpCommand.add_args( cmd_parsers ) \n ListCommand.add_args( cmd_parsers )\n QueryCommand.add_args( cmd_parsers )\n StakeCommand.add_args( cmd_parsers )\n UpdateCommand.add_args( cmd_parsers )\n InspectCommand.add_args( cmd_parsers ) \n WeightsCommand.add_args( cmd_parsers )\n UnStakeCommand.add_args( cmd_parsers )\n OverviewCommand.add_args( cmd_parsers )\n RegisterCommand.add_args( cmd_parsers )\n TransferCommand.add_args( cmd_parsers )\n NominateCommand.add_args( cmd_parsers )\n NewHotkeyCommand.add_args( cmd_parsers )\n MetagraphCommand.add_args( cmd_parsers )\n SetWeightsCommand.add_args( cmd_parsers )\n NewColdkeyCommand.add_args( cmd_parsers )\n MyDelegatesCommand.add_args( cmd_parsers )\n ListSubnetsCommand.add_args( cmd_parsers )\n RegenHotkeyCommand.add_args( cmd_parsers )\n RegenColdkeyCommand.add_args( cmd_parsers )\n DelegateStakeCommand.add_args( cmd_parsers )\n DelegateUnstakeCommand.add_args( cmd_parsers )\n ListDelegatesCommand.add_args( cmd_parsers )\n RegenColdkeypubCommand.add_args( cmd_parsers )\n RecycleRegisterCommand.add_args( cmd_parsers )\n\n return parser\n\n @staticmethod \n def config(args: List[str]) -> 'bittensor.config':\n \"\"\" From the argument parser, add config to bittensor.executor and local config \n Return: bittensor.config object\n \"\"\"\n parser = cli.__create_parser__()\n\n # If no arguments are passed, print help text.\n if len(args) == 0:\n parser.print_help()\n sys.exit()\n\n return bittensor.config( parser, args=args )\n\n @staticmethod \n def check_config (config: 'bittensor.Config'):\n \"\"\" Check if the essential config exist under different command\n \"\"\"\n if config.command == \"run\":\n RunCommand.check_config( config )\n elif config.command == \"transfer\":\n TransferCommand.check_config( config )\n elif config.command == \"register\":\n RegisterCommand.check_config( config )\n elif config.command == \"unstake\":\n UnStakeCommand.check_config( config )\n elif config.command == \"stake\":\n StakeCommand.check_config( config )\n elif config.command == \"overview\":\n OverviewCommand.check_config( config )\n elif config.command == \"new_coldkey\":\n NewColdkeyCommand.check_config( config )\n elif config.command == \"new_hotkey\":\n NewHotkeyCommand.check_config( config )\n elif config.command == \"regen_coldkey\":\n RegenColdkeyCommand.check_config( config )\n elif config.command == \"regen_coldkeypub\":\n RegenColdkeypubCommand.check_config( config )\n elif config.command == \"regen_hotkey\":\n RegenHotkeyCommand.check_config( config )\n elif config.command == \"metagraph\":\n MetagraphCommand.check_config( config )\n elif config.command == \"weights\":\n WeightsCommand.check_config( config )\n elif config.command == \"set_weights\":\n SetWeightsCommand.check_config( config )\n elif config.command == \"list\":\n ListCommand.check_config( config )\n elif config.command == \"inspect\":\n InspectCommand.check_config( config )\n elif config.command == \"query\":\n QueryCommand.check_config( config )\n elif config.command == \"help\":\n HelpCommand.check_config( config )\n elif config.command == \"update\":\n UpdateCommand.check_config( config )\n elif config.command == \"nominate\":\n NominateCommand.check_config( config )\n elif config.command == \"list_delegates\":\n ListDelegatesCommand.check_config( config )\n elif config.command == \"list_subnets\":\n ListSubnetsCommand.check_config( config )\n elif config.command == \"delegate\":\n DelegateStakeCommand.check_config( config )\n elif config.command == \"undelegate\":\n DelegateUnstakeCommand.check_config( config )\n elif config.command == \"my_delegates\":\n MyDelegatesCommand.check_config( config )\n elif config.command == \"recycle_register\":\n RecycleRegisterCommand.check_config( config )\n else:\n console.print(\":cross_mark:[red]Unknown command: {}[/red]\".format(config.command))\n sys.exit()\n\n ", "path": "bittensor/_cli/__init__.py"}]} | 2,249 | 425 |
gh_patches_debug_17528 | rasdani/github-patches | git_diff | allegro__ralph-3222 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Table 'ralph_ng.transitions_transition' doesn't exist
when I follow the document to setup a develop environment, I met the error" default: django.db.utils.ProgrammingError: (1146, "Table 'ralph_ng.transitions_transition' doesn't exist") ". I think it is because there are no such tables when newly install ralph3 develop environment but ralph3 try to migrate them(from ralph2). I am on mac and have download the default box manually which will be used in vagrant up.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ralph/lib/transitions/checks.py`
Content:
```
1 from django.core.checks import Error
2 from django.db.utils import OperationalError
3 from django.template.base import TemplateDoesNotExist
4 from django.template.loader import get_template
5
6
7 def check_transition_templates(transition_templates):
8 # to prevent AppRegistryNotReady
9 from ralph.lib.transitions.models import Transition
10
11 errors = []
12 if transition_templates:
13 if not isinstance(transition_templates, (list, tuple)):
14 errors.append(Error(
15 'TRANSITION_TEMPLATES must be a list or a tuple',
16 id='transitions.E001'
17 ))
18 else:
19 for index, item in enumerate(transition_templates):
20 try:
21 path, template = item
22 except (ValueError, TypeError):
23 errors.append(Error(
24 'Element #{} must be a two elements tuple'.format(
25 index
26 ),
27 id='transitions.E003'
28 ))
29 continue
30 try:
31 get_template(path)
32 except TemplateDoesNotExist:
33 errors.append(Error(
34 'Template {} ({}) doesn\'t exist'.format(
35 template, path
36 ),
37 hint='Check TRANSITION_TEMPLATES settings',
38 id='transitions.E002'
39 ))
40 excluded_templates = ['']
41 if transition_templates:
42 try:
43 excluded_templates.extend(
44 {template for template, _ in transition_templates}
45 )
46 except ValueError:
47 pass
48 transitions_with_custom_templates = Transition.objects.exclude(
49 template_name__in=excluded_templates
50 )
51 try:
52 for transition in transitions_with_custom_templates:
53 errors.append(Error(
54 'Template {} for {} transition is '
55 'defined only in transition'.format(
56 transition.template_name, transition
57 ),
58 hint=(
59 'Change your TRANSITION_TEMPLATES settings by adding'
60 ' ({}, "Your template name") and then '
61 'edit {} transition').format(
62 transition.template_name, transition
63 ),
64 id='transitions.E004'
65 ))
66 except OperationalError:
67 pass
68 return errors
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/ralph/lib/transitions/checks.py b/src/ralph/lib/transitions/checks.py
--- a/src/ralph/lib/transitions/checks.py
+++ b/src/ralph/lib/transitions/checks.py
@@ -1,9 +1,14 @@
+import logging
+
from django.core.checks import Error
-from django.db.utils import OperationalError
+from django.db.utils import DatabaseError
from django.template.base import TemplateDoesNotExist
from django.template.loader import get_template
+logger = logging.getLogger(__name__)
+
+
def check_transition_templates(transition_templates):
# to prevent AppRegistryNotReady
from ralph.lib.transitions.models import Transition
@@ -63,6 +68,6 @@
),
id='transitions.E004'
))
- except OperationalError:
- pass
+ except DatabaseError as e:
+ logger.error(e)
return errors
| {"golden_diff": "diff --git a/src/ralph/lib/transitions/checks.py b/src/ralph/lib/transitions/checks.py\n--- a/src/ralph/lib/transitions/checks.py\n+++ b/src/ralph/lib/transitions/checks.py\n@@ -1,9 +1,14 @@\n+import logging\n+\n from django.core.checks import Error\n-from django.db.utils import OperationalError\n+from django.db.utils import DatabaseError\n from django.template.base import TemplateDoesNotExist\n from django.template.loader import get_template\n \n \n+logger = logging.getLogger(__name__)\n+\n+\n def check_transition_templates(transition_templates):\n # to prevent AppRegistryNotReady\n from ralph.lib.transitions.models import Transition\n@@ -63,6 +68,6 @@\n ),\n id='transitions.E004'\n ))\n- except OperationalError:\n- pass\n+ except DatabaseError as e:\n+ logger.error(e)\n return errors\n", "issue": "Table 'ralph_ng.transitions_transition' doesn't exist\nwhen I follow the document to setup a develop environment, I met the error\" default: django.db.utils.ProgrammingError: (1146, \"Table 'ralph_ng.transitions_transition' doesn't exist\") \". I think it is because there are no such tables when newly install ralph3 develop environment but ralph3 try to migrate them(from ralph2). I am on mac and have download the default box manually which will be used in vagrant up.\n", "before_files": [{"content": "from django.core.checks import Error\nfrom django.db.utils import OperationalError\nfrom django.template.base import TemplateDoesNotExist\nfrom django.template.loader import get_template\n\n\ndef check_transition_templates(transition_templates):\n # to prevent AppRegistryNotReady\n from ralph.lib.transitions.models import Transition\n\n errors = []\n if transition_templates:\n if not isinstance(transition_templates, (list, tuple)):\n errors.append(Error(\n 'TRANSITION_TEMPLATES must be a list or a tuple',\n id='transitions.E001'\n ))\n else:\n for index, item in enumerate(transition_templates):\n try:\n path, template = item\n except (ValueError, TypeError):\n errors.append(Error(\n 'Element #{} must be a two elements tuple'.format(\n index\n ),\n id='transitions.E003'\n ))\n continue\n try:\n get_template(path)\n except TemplateDoesNotExist:\n errors.append(Error(\n 'Template {} ({}) doesn\\'t exist'.format(\n template, path\n ),\n hint='Check TRANSITION_TEMPLATES settings',\n id='transitions.E002'\n ))\n excluded_templates = ['']\n if transition_templates:\n try:\n excluded_templates.extend(\n {template for template, _ in transition_templates}\n )\n except ValueError:\n pass\n transitions_with_custom_templates = Transition.objects.exclude(\n template_name__in=excluded_templates\n )\n try:\n for transition in transitions_with_custom_templates:\n errors.append(Error(\n 'Template {} for {} transition is '\n 'defined only in transition'.format(\n transition.template_name, transition\n ),\n hint=(\n 'Change your TRANSITION_TEMPLATES settings by adding'\n ' ({}, \"Your template name\") and then '\n 'edit {} transition').format(\n transition.template_name, transition\n ),\n id='transitions.E004'\n ))\n except OperationalError:\n pass\n return errors\n", "path": "src/ralph/lib/transitions/checks.py"}], "after_files": [{"content": "import logging\n\nfrom django.core.checks import Error\nfrom django.db.utils import DatabaseError\nfrom django.template.base import TemplateDoesNotExist\nfrom django.template.loader import get_template\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_transition_templates(transition_templates):\n # to prevent AppRegistryNotReady\n from ralph.lib.transitions.models import Transition\n\n errors = []\n if transition_templates:\n if not isinstance(transition_templates, (list, tuple)):\n errors.append(Error(\n 'TRANSITION_TEMPLATES must be a list or a tuple',\n id='transitions.E001'\n ))\n else:\n for index, item in enumerate(transition_templates):\n try:\n path, template = item\n except (ValueError, TypeError):\n errors.append(Error(\n 'Element #{} must be a two elements tuple'.format(\n index\n ),\n id='transitions.E003'\n ))\n continue\n try:\n get_template(path)\n except TemplateDoesNotExist:\n errors.append(Error(\n 'Template {} ({}) doesn\\'t exist'.format(\n template, path\n ),\n hint='Check TRANSITION_TEMPLATES settings',\n id='transitions.E002'\n ))\n excluded_templates = ['']\n if transition_templates:\n try:\n excluded_templates.extend(\n {template for template, _ in transition_templates}\n )\n except ValueError:\n pass\n transitions_with_custom_templates = Transition.objects.exclude(\n template_name__in=excluded_templates\n )\n try:\n for transition in transitions_with_custom_templates:\n errors.append(Error(\n 'Template {} for {} transition is '\n 'defined only in transition'.format(\n transition.template_name, transition\n ),\n hint=(\n 'Change your TRANSITION_TEMPLATES settings by adding'\n ' ({}, \"Your template name\") and then '\n 'edit {} transition').format(\n transition.template_name, transition\n ),\n id='transitions.E004'\n ))\n except DatabaseError as e:\n logger.error(e)\n return errors\n", "path": "src/ralph/lib/transitions/checks.py"}]} | 926 | 202 |
gh_patches_debug_33816 | rasdani/github-patches | git_diff | marshmallow-code__webargs-464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RFC: Only accept delimited string in DelimitedList
`DelimitedList` accepts either a list or a delimited string (e.g. "foo,bar,baz").
I'd like to make it more strict by only accepting a delimited list. Rather than adding a `strict` parameter, I'm thinking of dropping the whole "also accept a list" feature.
Any reason to support both?
I understand it inherits from `List` because once the string is parsed, it can be deserialized as a normal list. But are there cases where you'd expect either a list or a delimited string?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/fields.py`
Content:
```
1 """Field classes.
2
3 Includes all fields from `marshmallow.fields` in addition to a custom
4 `Nested` field and `DelimitedList`.
5
6 All fields can optionally take a special `location` keyword argument, which
7 tells webargs where to parse the request argument from.
8
9 .. code-block:: python
10
11 args = {
12 "active": fields.Bool(location="query"),
13 "content_type": fields.Str(data_key="Content-Type", location="headers"),
14 }
15
16 Note: `data_key` replaced `load_from` in marshmallow 3.
17 When using marshmallow 2, use `load_from`.
18 """
19 import marshmallow as ma
20
21 # Expose all fields from marshmallow.fields.
22 from marshmallow.fields import * # noqa: F40
23 from webargs.compat import MARSHMALLOW_VERSION_INFO
24 from webargs.dict2schema import dict2schema
25
26 __all__ = ["DelimitedList"] + ma.fields.__all__
27
28
29 class Nested(ma.fields.Nested):
30 """Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
31 the first argument, which will be converted to a `marshmallow.Schema`.
32
33 .. note::
34
35 The schema class here will always be `marshmallow.Schema`, regardless
36 of whether a custom schema class is set on the parser. Pass an explicit schema
37 class if necessary.
38 """
39
40 def __init__(self, nested, *args, **kwargs):
41 if isinstance(nested, dict):
42 nested = dict2schema(nested)
43 super().__init__(nested, *args, **kwargs)
44
45
46 class DelimitedList(ma.fields.List):
47 """Same as `marshmallow.fields.List`, except can load from either a list or
48 a delimited string (e.g. "foo,bar,baz").
49
50 :param Field cls_or_instance: A field class or instance.
51 :param str delimiter: Delimiter between values.
52 :param bool as_string: Dump values to string.
53 """
54
55 delimiter = ","
56
57 def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):
58 self.delimiter = delimiter or self.delimiter
59 self.as_string = as_string
60 super().__init__(cls_or_instance, **kwargs)
61
62 def _serialize(self, value, attr, obj):
63 ret = super()._serialize(value, attr, obj)
64 if self.as_string:
65 return self.delimiter.join(format(each) for each in ret)
66 return ret
67
68 def _deserialize(self, value, attr, data, **kwargs):
69 try:
70 ret = (
71 value
72 if ma.utils.is_iterable_but_not_string(value)
73 else value.split(self.delimiter)
74 )
75 except AttributeError:
76 if MARSHMALLOW_VERSION_INFO[0] < 3:
77 self.fail("invalid")
78 else:
79 raise self.make_error("invalid")
80 return super()._deserialize(ret, attr, data, **kwargs)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/webargs/fields.py b/src/webargs/fields.py
--- a/src/webargs/fields.py
+++ b/src/webargs/fields.py
@@ -44,37 +44,35 @@
class DelimitedList(ma.fields.List):
- """Same as `marshmallow.fields.List`, except can load from either a list or
- a delimited string (e.g. "foo,bar,baz").
+ """A field which is similar to a List, but takes its input as a delimited
+ string (e.g. "foo,bar,baz").
+
+ Like List, it can be given a nested field type which it will use to
+ de/serialize each element of the list.
:param Field cls_or_instance: A field class or instance.
:param str delimiter: Delimiter between values.
- :param bool as_string: Dump values to string.
"""
+ default_error_messages = {"invalid": "Not a valid delimited list."}
delimiter = ","
- def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):
+ def __init__(self, cls_or_instance, delimiter=None, **kwargs):
self.delimiter = delimiter or self.delimiter
- self.as_string = as_string
super().__init__(cls_or_instance, **kwargs)
def _serialize(self, value, attr, obj):
- ret = super()._serialize(value, attr, obj)
- if self.as_string:
- return self.delimiter.join(format(each) for each in ret)
- return ret
+ # serializing will start with List serialization, so that we correctly
+ # output lists of non-primitive types, e.g. DelimitedList(DateTime)
+ return self.delimiter.join(
+ format(each) for each in super()._serialize(value, attr, obj)
+ )
def _deserialize(self, value, attr, data, **kwargs):
- try:
- ret = (
- value
- if ma.utils.is_iterable_but_not_string(value)
- else value.split(self.delimiter)
- )
- except AttributeError:
+ # attempting to deserialize from a non-string source is an error
+ if not isinstance(value, (str, bytes)):
if MARSHMALLOW_VERSION_INFO[0] < 3:
self.fail("invalid")
else:
raise self.make_error("invalid")
- return super()._deserialize(ret, attr, data, **kwargs)
+ return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
| {"golden_diff": "diff --git a/src/webargs/fields.py b/src/webargs/fields.py\n--- a/src/webargs/fields.py\n+++ b/src/webargs/fields.py\n@@ -44,37 +44,35 @@\n \n \n class DelimitedList(ma.fields.List):\n- \"\"\"Same as `marshmallow.fields.List`, except can load from either a list or\n- a delimited string (e.g. \"foo,bar,baz\").\n+ \"\"\"A field which is similar to a List, but takes its input as a delimited\n+ string (e.g. \"foo,bar,baz\").\n+\n+ Like List, it can be given a nested field type which it will use to\n+ de/serialize each element of the list.\n \n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n- :param bool as_string: Dump values to string.\n \"\"\"\n \n+ default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n \n- def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):\n+ def __init__(self, cls_or_instance, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n- self.as_string = as_string\n super().__init__(cls_or_instance, **kwargs)\n \n def _serialize(self, value, attr, obj):\n- ret = super()._serialize(value, attr, obj)\n- if self.as_string:\n- return self.delimiter.join(format(each) for each in ret)\n- return ret\n+ # serializing will start with List serialization, so that we correctly\n+ # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n+ return self.delimiter.join(\n+ format(each) for each in super()._serialize(value, attr, obj)\n+ )\n \n def _deserialize(self, value, attr, data, **kwargs):\n- try:\n- ret = (\n- value\n- if ma.utils.is_iterable_but_not_string(value)\n- else value.split(self.delimiter)\n- )\n- except AttributeError:\n+ # attempting to deserialize from a non-string source is an error\n+ if not isinstance(value, (str, bytes)):\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n- return super()._deserialize(ret, attr, data, **kwargs)\n+ return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n", "issue": "RFC: Only accept delimited string in DelimitedList\n`DelimitedList` accepts either a list or a delimited string (e.g. \"foo,bar,baz\").\r\n\r\nI'd like to make it more strict by only accepting a delimited list. Rather than adding a `strict` parameter, I'm thinking of dropping the whole \"also accept a list\" feature.\r\n\r\nAny reason to support both?\r\n\r\nI understand it inherits from `List` because once the string is parsed, it can be deserialized as a normal list. But are there cases where you'd expect either a list or a delimited string?\n", "before_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\nNote: `data_key` replaced `load_from` in marshmallow 3.\nWhen using marshmallow 2, use `load_from`.\n\"\"\"\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.dict2schema import dict2schema\n\n__all__ = [\"DelimitedList\"] + ma.fields.__all__\n\n\nclass Nested(ma.fields.Nested):\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = dict2schema(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedList(ma.fields.List):\n \"\"\"Same as `marshmallow.fields.List`, except can load from either a list or\n a delimited string (e.g. \"foo,bar,baz\").\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n :param bool as_string: Dump values to string.\n \"\"\"\n\n delimiter = \",\"\n\n def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):\n self.delimiter = delimiter or self.delimiter\n self.as_string = as_string\n super().__init__(cls_or_instance, **kwargs)\n\n def _serialize(self, value, attr, obj):\n ret = super()._serialize(value, attr, obj)\n if self.as_string:\n return self.delimiter.join(format(each) for each in ret)\n return ret\n\n def _deserialize(self, value, attr, data, **kwargs):\n try:\n ret = (\n value\n if ma.utils.is_iterable_but_not_string(value)\n else value.split(self.delimiter)\n )\n except AttributeError:\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(ret, attr, data, **kwargs)\n", "path": "src/webargs/fields.py"}], "after_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\nNote: `data_key` replaced `load_from` in marshmallow 3.\nWhen using marshmallow 2, use `load_from`.\n\"\"\"\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.dict2schema import dict2schema\n\n__all__ = [\"DelimitedList\"] + ma.fields.__all__\n\n\nclass Nested(ma.fields.Nested):\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = dict2schema(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedList(ma.fields.List):\n \"\"\"A field which is similar to a List, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like List, it can be given a nested field type which it will use to\n de/serialize each element of the list.\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n\n def __init__(self, cls_or_instance, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(cls_or_instance, **kwargs)\n\n def _serialize(self, value, attr, obj):\n # serializing will start with List serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n )\n\n def _deserialize(self, value, attr, data, **kwargs):\n # attempting to deserialize from a non-string source is an error\n if not isinstance(value, (str, bytes)):\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n", "path": "src/webargs/fields.py"}]} | 1,162 | 565 |
gh_patches_debug_16085 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-508 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create canonical ordering for tables and return it by default
## Problem
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
We should have a canonical ordering for each table whenever possible. This will make infinite scroll easier to deal with.
## Proposed solution
<!-- A clear and concise description of your proposed solution or feature. -->
We should order by primary key by default if the table has one. Otherwise, we should use the entire row as a sorting key (it won't be possible to tell the difference if two identical rows "switch order"). We should always return rows in these orders when they are accessed unless the user specifies a different `ORDER BY`.
## Additional context
<!-- Add any other context or screenshots about the feature request here.-->
- Please see #361
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/records.py`
Content:
```
1 import logging
2 from sqlalchemy import delete, select, Column, func
3 from sqlalchemy.inspection import inspect
4 from sqlalchemy_filters import apply_filters, apply_sort
5 from sqlalchemy_filters.exceptions import FieldNotFound
6
7
8 logger = logging.getLogger(__name__)
9
10
11 # Grouping exceptions follow the sqlalchemy_filters exceptions patterns
12 class BadGroupFormat(Exception):
13 pass
14
15
16 class GroupFieldNotFound(FieldNotFound):
17 pass
18
19
20 def _get_primary_key_column(table):
21 primary_key_list = list(inspect(table).primary_key)
22 # We do not support getting by composite primary keys
23 assert len(primary_key_list) == 1
24 return primary_key_list[0]
25
26
27 def _create_col_objects(table, column_list):
28 return [
29 table.columns[col] if type(col) == str else table.columns[col.name]
30 for col in column_list
31 ]
32
33
34 def _get_query(table, limit, offset, order_by, filters):
35 query = select(table).limit(limit).offset(offset)
36 if order_by is not None:
37 query = apply_sort(query, order_by)
38 if filters is not None:
39 query = apply_filters(query, filters)
40 return query
41
42
43 def _execute_query(query, engine):
44 with engine.begin() as conn:
45 records = conn.execute(query).fetchall()
46 return records
47
48
49 def get_record(table, engine, id_value):
50 primary_key_column = _get_primary_key_column(table)
51 query = select(table).where(primary_key_column == id_value)
52 result = _execute_query(query, engine)
53 assert len(result) <= 1
54 return result[0] if result else None
55
56
57 def get_records(
58 table, engine, limit=None, offset=None, order_by=[], filters=[],
59 ):
60 """
61 Returns records from a table.
62
63 Args:
64 table: SQLAlchemy table object
65 engine: SQLAlchemy engine object
66 limit: int, gives number of rows to return
67 offset: int, gives number of rows to skip
68 order_by: list of dictionaries, where each dictionary has a 'field' and
69 'direction' field.
70 See: https://github.com/centerofci/sqlalchemy-filters#sort-format
71 filters: list of dictionaries, where each dictionary has a 'field' and 'op'
72 field, in addition to an 'value' field if appropriate.
73 See: https://github.com/centerofci/sqlalchemy-filters#filters-format
74 """
75 query = _get_query(table, limit, offset, order_by, filters)
76 return _execute_query(query, engine)
77
78
79 def get_group_counts(
80 table, engine, group_by, limit=None, offset=None, order_by=[], filters=[],
81 ):
82 """
83 Returns counts by specified groupings
84
85 Args:
86 table: SQLAlchemy table object
87 engine: SQLAlchemy engine object
88 limit: int, gives number of rows to return
89 offset: int, gives number of rows to skip
90 group_by: list or tuple of column names or column objects to group by
91 order_by: list of dictionaries, where each dictionary has a 'field' and
92 'direction' field.
93 See: https://github.com/centerofci/sqlalchemy-filters#sort-format
94 filters: list of dictionaries, where each dictionary has a 'field' and 'op'
95 field, in addition to an 'value' field if appropriate.
96 See: https://github.com/centerofci/sqlalchemy-filters#filters-format
97 """
98 if type(group_by) not in (tuple, list):
99 raise BadGroupFormat(f"Group spec {group_by} must be list or tuple.")
100 for field in group_by:
101 if type(field) not in (str, Column):
102 raise BadGroupFormat(f"Group field {field} must be a string or Column.")
103 field_name = field if type(field) == str else field.name
104 if field_name not in table.c:
105 raise GroupFieldNotFound(f"Group field {field} not found in {table}.")
106
107 table_columns = _create_col_objects(table, group_by)
108 count_query = (
109 select(*table_columns, func.count(table_columns[0]))
110 .group_by(*table_columns)
111 )
112 if filters is not None:
113 count_query = apply_filters(count_query, filters)
114 filtered_count_query = _get_filtered_group_by_count_query(
115 table, engine, group_by, limit, offset, order_by, filters, count_query
116 )
117 if filtered_count_query is not None:
118 records = _execute_query(filtered_count_query, engine)
119 # Last field is the count, preceding fields are the group by fields
120 counts = {(*record[:-1],): record[-1] for record in records}
121 else:
122 counts = {}
123 return counts
124
125
126 def _get_filtered_group_by_count_query(
127 table, engine, group_by, limit, offset, order_by, filters, count_query
128 ):
129 # Get the list of groups that we should count.
130 # We're considering limit and offset here so that we only count relevant groups
131 relevant_subtable_query = _get_query(table, limit, offset, order_by, filters)
132 relevant_subtable_cte = relevant_subtable_query.cte()
133 cte_columns = _create_col_objects(relevant_subtable_cte, group_by)
134 distinct_tuples = get_distinct_tuple_values(cte_columns, engine, output_table=table)
135 if distinct_tuples:
136 limited_filters = [
137 {
138 "or": [
139 distinct_tuples_to_filter(distinct_tuple_spec)
140 for distinct_tuple_spec in distinct_tuples
141 ]
142 }
143 ]
144 filtered_count_query = apply_filters(count_query, limited_filters)
145 else:
146 filtered_count_query = None
147 return filtered_count_query
148
149
150 def get_distinct_tuple_values(
151 column_list, engine, table=None, limit=None, offset=None, output_table=None
152 ):
153 """
154 Returns distinct tuples from a given list of columns.
155
156 Args:
157 column_list: list of column names or SQLAlchemy column objects
158 engine: SQLAlchemy engine object
159 table: SQLAlchemy table object
160 limit: int, gives number of rows to return
161 offset: int, gives number of rows to skip
162
163 If no table is given, the column_list must consist entirely of
164 SQLAlchemy column objects associated with a table.
165 """
166 if table is not None:
167 column_objects = _create_col_objects(table, column_list)
168 else:
169 column_objects = column_list
170 try:
171 assert all([type(col) == Column for col in column_objects])
172 except AssertionError as e:
173 logger.error("All columns must be str or sqlalchemy.Column type")
174 raise e
175
176 query = (
177 select(*column_objects)
178 .distinct()
179 .limit(limit)
180 .offset(offset)
181 )
182 result = _execute_query(query, engine)
183 if output_table is not None:
184 column_objects = [output_table.columns[col.name] for col in column_objects]
185 return [tuple(zip(column_objects, row)) for row in result]
186
187
188 def distinct_tuples_to_filter(distinct_tuples):
189 filters = []
190 for col, value in distinct_tuples:
191 filters.append({
192 "field": col,
193 "op": "==",
194 "value": value,
195 })
196 return filters
197
198
199 def create_record_or_records(table, engine, record_data):
200 """
201 record_data can be a dictionary, tuple, or list of dictionaries or tuples.
202 if record_data is a list, it creates multiple records.
203 """
204 id_value = None
205 with engine.begin() as connection:
206 result = connection.execute(table.insert(), record_data)
207 # If there was only a single record created, return the record.
208 if result.rowcount == 1:
209 # We need to manually commit insertion so that we can retrieve the record.
210 connection.commit()
211 id_value = result.inserted_primary_key[0]
212 if id_value is not None:
213 return get_record(table, engine, id_value)
214 # Do not return any records if multiple rows were added.
215 return None
216
217
218 def create_records_from_csv(
219 table,
220 engine,
221 csv_filename,
222 column_names,
223 header,
224 delimiter=None,
225 escape=None,
226 quote=None,
227 ):
228 with open(csv_filename, 'rb') as csv_file:
229 with engine.begin() as conn:
230 cursor = conn.connection.cursor()
231 relation = '.'.join('"{}"'.format(part) for part in (table.schema, table.name))
232 formatted_columns = '({})'.format(','.join([f'"{column_name}"' for column_name in column_names]))
233
234 copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV'
235 if header:
236 copy_sql += " HEADER"
237 if delimiter:
238 copy_sql += f" DELIMITER E'{delimiter}'"
239 if escape:
240 copy_sql += f" ESCAPE '{escape}'"
241 if quote:
242 if quote == "'":
243 quote = "''"
244 copy_sql += f" QUOTE '{quote}'"
245
246 cursor.copy_expert(copy_sql, csv_file)
247
248
249 def update_record(table, engine, id_value, record_data):
250 primary_key_column = _get_primary_key_column(table)
251 with engine.begin() as connection:
252 connection.execute(
253 table.update().where(primary_key_column == id_value).values(record_data)
254 )
255 return get_record(table, engine, id_value)
256
257
258 def delete_record(table, engine, id_value):
259 primary_key_column = _get_primary_key_column(table)
260 query = delete(table).where(primary_key_column == id_value)
261 with engine.begin() as conn:
262 return conn.execute(query)
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/records.py b/db/records.py
--- a/db/records.py
+++ b/db/records.py
@@ -72,6 +72,17 @@
field, in addition to an 'value' field if appropriate.
See: https://github.com/centerofci/sqlalchemy-filters#filters-format
"""
+ if not order_by:
+ # Set default ordering if none was requested
+ if len(table.primary_key.columns) > 0:
+ # If there are primary keys, order by all primary keys
+ order_by = [{'field': col, 'direction': 'asc'}
+ for col in table.primary_key.columns]
+ else:
+ # If there aren't primary keys, order by all columns
+ order_by = [{'field': col, 'direction': 'asc'}
+ for col in table.columns]
+
query = _get_query(table, limit, offset, order_by, filters)
return _execute_query(query, engine)
| {"golden_diff": "diff --git a/db/records.py b/db/records.py\n--- a/db/records.py\n+++ b/db/records.py\n@@ -72,6 +72,17 @@\n field, in addition to an 'value' field if appropriate.\n See: https://github.com/centerofci/sqlalchemy-filters#filters-format\n \"\"\"\n+ if not order_by:\n+ # Set default ordering if none was requested\n+ if len(table.primary_key.columns) > 0:\n+ # If there are primary keys, order by all primary keys\n+ order_by = [{'field': col, 'direction': 'asc'}\n+ for col in table.primary_key.columns]\n+ else:\n+ # If there aren't primary keys, order by all columns\n+ order_by = [{'field': col, 'direction': 'asc'}\n+ for col in table.columns]\n+\n query = _get_query(table, limit, offset, order_by, filters)\n return _execute_query(query, engine)\n", "issue": "Create canonical ordering for tables and return it by default\n## Problem\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nWe should have a canonical ordering for each table whenever possible. This will make infinite scroll easier to deal with.\r\n\r\n## Proposed solution\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nWe should order by primary key by default if the table has one. Otherwise, we should use the entire row as a sorting key (it won't be possible to tell the difference if two identical rows \"switch order\"). We should always return rows in these orders when they are accessed unless the user specifies a different `ORDER BY`.\r\n\r\n## Additional context\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n- Please see #361\n", "before_files": [{"content": "import logging\nfrom sqlalchemy import delete, select, Column, func\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy_filters import apply_filters, apply_sort\nfrom sqlalchemy_filters.exceptions import FieldNotFound\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Grouping exceptions follow the sqlalchemy_filters exceptions patterns\nclass BadGroupFormat(Exception):\n pass\n\n\nclass GroupFieldNotFound(FieldNotFound):\n pass\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef _create_col_objects(table, column_list):\n return [\n table.columns[col] if type(col) == str else table.columns[col.name]\n for col in column_list\n ]\n\n\ndef _get_query(table, limit, offset, order_by, filters):\n query = select(table).limit(limit).offset(offset)\n if order_by is not None:\n query = apply_sort(query, order_by)\n if filters is not None:\n query = apply_filters(query, filters)\n return query\n\n\ndef _execute_query(query, engine):\n with engine.begin() as conn:\n records = conn.execute(query).fetchall()\n return records\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n result = _execute_query(query, engine)\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(\n table, engine, limit=None, offset=None, order_by=[], filters=[],\n):\n \"\"\"\n Returns records from a table.\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n order_by: list of dictionaries, where each dictionary has a 'field' and\n 'direction' field.\n See: https://github.com/centerofci/sqlalchemy-filters#sort-format\n filters: list of dictionaries, where each dictionary has a 'field' and 'op'\n field, in addition to an 'value' field if appropriate.\n See: https://github.com/centerofci/sqlalchemy-filters#filters-format\n \"\"\"\n query = _get_query(table, limit, offset, order_by, filters)\n return _execute_query(query, engine)\n\n\ndef get_group_counts(\n table, engine, group_by, limit=None, offset=None, order_by=[], filters=[],\n):\n \"\"\"\n Returns counts by specified groupings\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n group_by: list or tuple of column names or column objects to group by\n order_by: list of dictionaries, where each dictionary has a 'field' and\n 'direction' field.\n See: https://github.com/centerofci/sqlalchemy-filters#sort-format\n filters: list of dictionaries, where each dictionary has a 'field' and 'op'\n field, in addition to an 'value' field if appropriate.\n See: https://github.com/centerofci/sqlalchemy-filters#filters-format\n \"\"\"\n if type(group_by) not in (tuple, list):\n raise BadGroupFormat(f\"Group spec {group_by} must be list or tuple.\")\n for field in group_by:\n if type(field) not in (str, Column):\n raise BadGroupFormat(f\"Group field {field} must be a string or Column.\")\n field_name = field if type(field) == str else field.name\n if field_name not in table.c:\n raise GroupFieldNotFound(f\"Group field {field} not found in {table}.\")\n\n table_columns = _create_col_objects(table, group_by)\n count_query = (\n select(*table_columns, func.count(table_columns[0]))\n .group_by(*table_columns)\n )\n if filters is not None:\n count_query = apply_filters(count_query, filters)\n filtered_count_query = _get_filtered_group_by_count_query(\n table, engine, group_by, limit, offset, order_by, filters, count_query\n )\n if filtered_count_query is not None:\n records = _execute_query(filtered_count_query, engine)\n # Last field is the count, preceding fields are the group by fields\n counts = {(*record[:-1],): record[-1] for record in records}\n else:\n counts = {}\n return counts\n\n\ndef _get_filtered_group_by_count_query(\n table, engine, group_by, limit, offset, order_by, filters, count_query\n):\n # Get the list of groups that we should count.\n # We're considering limit and offset here so that we only count relevant groups\n relevant_subtable_query = _get_query(table, limit, offset, order_by, filters)\n relevant_subtable_cte = relevant_subtable_query.cte()\n cte_columns = _create_col_objects(relevant_subtable_cte, group_by)\n distinct_tuples = get_distinct_tuple_values(cte_columns, engine, output_table=table)\n if distinct_tuples:\n limited_filters = [\n {\n \"or\": [\n distinct_tuples_to_filter(distinct_tuple_spec)\n for distinct_tuple_spec in distinct_tuples\n ]\n }\n ]\n filtered_count_query = apply_filters(count_query, limited_filters)\n else:\n filtered_count_query = None\n return filtered_count_query\n\n\ndef get_distinct_tuple_values(\n column_list, engine, table=None, limit=None, offset=None, output_table=None\n):\n \"\"\"\n Returns distinct tuples from a given list of columns.\n\n Args:\n column_list: list of column names or SQLAlchemy column objects\n engine: SQLAlchemy engine object\n table: SQLAlchemy table object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n\n If no table is given, the column_list must consist entirely of\n SQLAlchemy column objects associated with a table.\n \"\"\"\n if table is not None:\n column_objects = _create_col_objects(table, column_list)\n else:\n column_objects = column_list\n try:\n assert all([type(col) == Column for col in column_objects])\n except AssertionError as e:\n logger.error(\"All columns must be str or sqlalchemy.Column type\")\n raise e\n\n query = (\n select(*column_objects)\n .distinct()\n .limit(limit)\n .offset(offset)\n )\n result = _execute_query(query, engine)\n if output_table is not None:\n column_objects = [output_table.columns[col.name] for col in column_objects]\n return [tuple(zip(column_objects, row)) for row in result]\n\n\ndef distinct_tuples_to_filter(distinct_tuples):\n filters = []\n for col, value in distinct_tuples:\n filters.append({\n \"field\": col,\n \"op\": \"==\",\n \"value\": value,\n })\n return filters\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef create_records_from_csv(\n table,\n engine,\n csv_filename,\n column_names,\n header,\n delimiter=None,\n escape=None,\n quote=None,\n):\n with open(csv_filename, 'rb') as csv_file:\n with engine.begin() as conn:\n cursor = conn.connection.cursor()\n relation = '.'.join('\"{}\"'.format(part) for part in (table.schema, table.name))\n formatted_columns = '({})'.format(','.join([f'\"{column_name}\"' for column_name in column_names]))\n\n copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV'\n if header:\n copy_sql += \" HEADER\"\n if delimiter:\n copy_sql += f\" DELIMITER E'{delimiter}'\"\n if escape:\n copy_sql += f\" ESCAPE '{escape}'\"\n if quote:\n if quote == \"'\":\n quote = \"''\"\n copy_sql += f\" QUOTE '{quote}'\"\n\n cursor.copy_expert(copy_sql, csv_file)\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}], "after_files": [{"content": "import logging\nfrom sqlalchemy import delete, select, Column, func\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy_filters import apply_filters, apply_sort\nfrom sqlalchemy_filters.exceptions import FieldNotFound\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Grouping exceptions follow the sqlalchemy_filters exceptions patterns\nclass BadGroupFormat(Exception):\n pass\n\n\nclass GroupFieldNotFound(FieldNotFound):\n pass\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef _create_col_objects(table, column_list):\n return [\n table.columns[col] if type(col) == str else table.columns[col.name]\n for col in column_list\n ]\n\n\ndef _get_query(table, limit, offset, order_by, filters):\n query = select(table).limit(limit).offset(offset)\n if order_by is not None:\n query = apply_sort(query, order_by)\n if filters is not None:\n query = apply_filters(query, filters)\n return query\n\n\ndef _execute_query(query, engine):\n with engine.begin() as conn:\n records = conn.execute(query).fetchall()\n return records\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n result = _execute_query(query, engine)\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(\n table, engine, limit=None, offset=None, order_by=[], filters=[],\n):\n \"\"\"\n Returns records from a table.\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n order_by: list of dictionaries, where each dictionary has a 'field' and\n 'direction' field.\n See: https://github.com/centerofci/sqlalchemy-filters#sort-format\n filters: list of dictionaries, where each dictionary has a 'field' and 'op'\n field, in addition to an 'value' field if appropriate.\n See: https://github.com/centerofci/sqlalchemy-filters#filters-format\n \"\"\"\n if not order_by:\n # Set default ordering if none was requested\n if len(table.primary_key.columns) > 0:\n # If there are primary keys, order by all primary keys\n order_by = [{'field': col, 'direction': 'asc'}\n for col in table.primary_key.columns]\n else:\n # If there aren't primary keys, order by all columns\n order_by = [{'field': col, 'direction': 'asc'}\n for col in table.columns]\n\n query = _get_query(table, limit, offset, order_by, filters)\n return _execute_query(query, engine)\n\n\ndef get_group_counts(\n table, engine, group_by, limit=None, offset=None, order_by=[], filters=[],\n):\n \"\"\"\n Returns counts by specified groupings\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n group_by: list or tuple of column names or column objects to group by\n order_by: list of dictionaries, where each dictionary has a 'field' and\n 'direction' field.\n See: https://github.com/centerofci/sqlalchemy-filters#sort-format\n filters: list of dictionaries, where each dictionary has a 'field' and 'op'\n field, in addition to an 'value' field if appropriate.\n See: https://github.com/centerofci/sqlalchemy-filters#filters-format\n \"\"\"\n if type(group_by) not in (tuple, list):\n raise BadGroupFormat(f\"Group spec {group_by} must be list or tuple.\")\n for field in group_by:\n if type(field) not in (str, Column):\n raise BadGroupFormat(f\"Group field {field} must be a string or Column.\")\n field_name = field if type(field) == str else field.name\n if field_name not in table.c:\n raise GroupFieldNotFound(f\"Group field {field} not found in {table}.\")\n\n table_columns = _create_col_objects(table, group_by)\n count_query = (\n select(*table_columns, func.count(table_columns[0]))\n .group_by(*table_columns)\n )\n if filters is not None:\n count_query = apply_filters(count_query, filters)\n filtered_count_query = _get_filtered_group_by_count_query(\n table, engine, group_by, limit, offset, order_by, filters, count_query\n )\n if filtered_count_query is not None:\n records = _execute_query(filtered_count_query, engine)\n # Last field is the count, preceding fields are the group by fields\n counts = {(*record[:-1],): record[-1] for record in records}\n else:\n counts = {}\n return counts\n\n\ndef _get_filtered_group_by_count_query(\n table, engine, group_by, limit, offset, order_by, filters, count_query\n):\n # Get the list of groups that we should count.\n # We're considering limit and offset here so that we only count relevant groups\n relevant_subtable_query = _get_query(table, limit, offset, order_by, filters)\n relevant_subtable_cte = relevant_subtable_query.cte()\n cte_columns = _create_col_objects(relevant_subtable_cte, group_by)\n distinct_tuples = get_distinct_tuple_values(cte_columns, engine, output_table=table)\n if distinct_tuples:\n limited_filters = [\n {\n \"or\": [\n distinct_tuples_to_filter(distinct_tuple_spec)\n for distinct_tuple_spec in distinct_tuples\n ]\n }\n ]\n filtered_count_query = apply_filters(count_query, limited_filters)\n else:\n filtered_count_query = None\n return filtered_count_query\n\n\ndef get_distinct_tuple_values(\n column_list, engine, table=None, limit=None, offset=None, output_table=None\n):\n \"\"\"\n Returns distinct tuples from a given list of columns.\n\n Args:\n column_list: list of column names or SQLAlchemy column objects\n engine: SQLAlchemy engine object\n table: SQLAlchemy table object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n\n If no table is given, the column_list must consist entirely of\n SQLAlchemy column objects associated with a table.\n \"\"\"\n if table is not None:\n column_objects = _create_col_objects(table, column_list)\n else:\n column_objects = column_list\n try:\n assert all([type(col) == Column for col in column_objects])\n except AssertionError as e:\n logger.error(\"All columns must be str or sqlalchemy.Column type\")\n raise e\n\n query = (\n select(*column_objects)\n .distinct()\n .limit(limit)\n .offset(offset)\n )\n result = _execute_query(query, engine)\n if output_table is not None:\n column_objects = [output_table.columns[col.name] for col in column_objects]\n return [tuple(zip(column_objects, row)) for row in result]\n\n\ndef distinct_tuples_to_filter(distinct_tuples):\n filters = []\n for col, value in distinct_tuples:\n filters.append({\n \"field\": col,\n \"op\": \"==\",\n \"value\": value,\n })\n return filters\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef create_records_from_csv(\n table,\n engine,\n csv_filename,\n column_names,\n header,\n delimiter=None,\n escape=None,\n quote=None,\n):\n with open(csv_filename, 'rb') as csv_file:\n with engine.begin() as conn:\n cursor = conn.connection.cursor()\n relation = '.'.join('\"{}\"'.format(part) for part in (table.schema, table.name))\n formatted_columns = '({})'.format(','.join([f'\"{column_name}\"' for column_name in column_names]))\n\n copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV'\n if header:\n copy_sql += \" HEADER\"\n if delimiter:\n copy_sql += f\" DELIMITER E'{delimiter}'\"\n if escape:\n copy_sql += f\" ESCAPE '{escape}'\"\n if quote:\n if quote == \"'\":\n quote = \"''\"\n copy_sql += f\" QUOTE '{quote}'\"\n\n cursor.copy_expert(copy_sql, csv_file)\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}]} | 3,167 | 218 |
gh_patches_debug_49452 | rasdani/github-patches | git_diff | wagtail__wagtail-840 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Paginator and search pagination expect different parameters for page
The Paginator (as in `django.core.paginator`) used pretty much everywhere uses `page` as the query parameter. The search view, however, [expects](https://github.com/torchbox/wagtail/blob/100797796df0bc8ca96035092f32a9275d2b3713/wagtail/wagtailsearch/views/queries.py#L28) a `p` query parameter for pagination.
While not a bug, it is a bit confusing and makes it less elegant to share a pagination include. Certainly made me scratch my head.
Worth a PR?
Cheers,
Dan
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailsearch/views/frontend.py`
Content:
```
1 import json
2
3 from django.conf import settings
4 from django.shortcuts import render
5 from django.http import HttpResponse
6 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
7
8 from wagtail.wagtailcore import models
9 from wagtail.wagtailsearch.models import Query
10
11
12 def search(
13 request,
14 template=None,
15 template_ajax=None,
16 results_per_page=10,
17 use_json=False,
18 json_attrs=['title', 'url'],
19 show_unpublished=False,
20 search_title_only=False,
21 extra_filters={},
22 path=None,
23 ):
24
25 # Get default templates
26 if template is None:
27 if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'):
28 template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE
29 else:
30 template = 'wagtailsearch/search_results.html'
31
32 if template_ajax is None:
33 if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'):
34 template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX
35 else:
36 template_ajax = template
37
38 # Get query string and page from GET paramters
39 query_string = request.GET.get('q', '')
40 page = request.GET.get('p', 1)
41
42 # Search
43 if query_string != '':
44 search_results = models.Page.search(
45 query_string,
46 show_unpublished=show_unpublished,
47 search_title_only=search_title_only,
48 extra_filters=extra_filters,
49 path=path if path else request.site.root_page.path
50 )
51
52 # Get query object
53 query = Query.get(query_string)
54
55 # Add hit
56 query.add_hit()
57
58 # Pagination
59 paginator = Paginator(search_results, results_per_page)
60 try:
61 search_results = paginator.page(page)
62 except PageNotAnInteger:
63 search_results = paginator.page(1)
64 except EmptyPage:
65 search_results = paginator.page(paginator.num_pages)
66 else:
67 query = None
68 search_results = None
69
70 if use_json: # Return a json response
71 if search_results:
72 search_results_json = []
73 for result in search_results:
74 result_specific = result.specific
75
76 search_results_json.append(dict(
77 (attr, getattr(result_specific, attr))
78 for attr in json_attrs
79 if hasattr(result_specific, attr)
80 ))
81
82 return HttpResponse(json.dumps(search_results_json))
83 else:
84 return HttpResponse('[]')
85 else: # Render a template
86 if request.is_ajax() and template_ajax:
87 template = template_ajax
88
89 return render(request, template, dict(
90 query_string=query_string,
91 search_results=search_results,
92 is_ajax=request.is_ajax(),
93 query=query
94 ))
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailsearch/views/frontend.py b/wagtail/wagtailsearch/views/frontend.py
--- a/wagtail/wagtailsearch/views/frontend.py
+++ b/wagtail/wagtailsearch/views/frontend.py
@@ -37,7 +37,7 @@
# Get query string and page from GET paramters
query_string = request.GET.get('q', '')
- page = request.GET.get('p', 1)
+ page = request.GET.get('page', request.GET.get('p', 1))
# Search
if query_string != '':
| {"golden_diff": "diff --git a/wagtail/wagtailsearch/views/frontend.py b/wagtail/wagtailsearch/views/frontend.py\n--- a/wagtail/wagtailsearch/views/frontend.py\n+++ b/wagtail/wagtailsearch/views/frontend.py\n@@ -37,7 +37,7 @@\n \n # Get query string and page from GET paramters\n query_string = request.GET.get('q', '')\n- page = request.GET.get('p', 1)\n+ page = request.GET.get('page', request.GET.get('p', 1))\n \n # Search\n if query_string != '':\n", "issue": "Paginator and search pagination expect different parameters for page\nThe Paginator (as in `django.core.paginator`) used pretty much everywhere uses `page` as the query parameter. The search view, however, [expects](https://github.com/torchbox/wagtail/blob/100797796df0bc8ca96035092f32a9275d2b3713/wagtail/wagtailsearch/views/queries.py#L28) a `p` query parameter for pagination.\n\nWhile not a bug, it is a bit confusing and makes it less elegant to share a pagination include. Certainly made me scratch my head.\n\nWorth a PR?\n\nCheers,\nDan\n\n", "before_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom wagtail.wagtailcore import models\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(\n request,\n template=None,\n template_ajax=None,\n results_per_page=10,\n use_json=False,\n json_attrs=['title', 'url'],\n show_unpublished=False,\n search_title_only=False,\n extra_filters={},\n path=None,\n ):\n\n # Get default templates\n if template is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'):\n template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE\n else:\n template = 'wagtailsearch/search_results.html'\n\n if template_ajax is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'):\n template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX\n else:\n template_ajax = template\n\n # Get query string and page from GET paramters\n query_string = request.GET.get('q', '')\n page = request.GET.get('p', 1)\n\n # Search\n if query_string != '':\n search_results = models.Page.search(\n query_string,\n show_unpublished=show_unpublished,\n search_title_only=search_title_only,\n extra_filters=extra_filters,\n path=path if path else request.site.root_page.path\n )\n\n # Get query object\n query = Query.get(query_string)\n\n # Add hit\n query.add_hit()\n\n # Pagination\n paginator = Paginator(search_results, results_per_page)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n else:\n query = None\n search_results = None\n\n if use_json: # Return a json response\n if search_results:\n search_results_json = []\n for result in search_results:\n result_specific = result.specific\n\n search_results_json.append(dict(\n (attr, getattr(result_specific, attr))\n for attr in json_attrs\n if hasattr(result_specific, attr)\n ))\n\n return HttpResponse(json.dumps(search_results_json))\n else:\n return HttpResponse('[]')\n else: # Render a template\n if request.is_ajax() and template_ajax:\n template = template_ajax\n\n return render(request, template, dict(\n query_string=query_string,\n search_results=search_results,\n is_ajax=request.is_ajax(),\n query=query\n ))\n", "path": "wagtail/wagtailsearch/views/frontend.py"}], "after_files": [{"content": "import json\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom wagtail.wagtailcore import models\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(\n request,\n template=None,\n template_ajax=None,\n results_per_page=10,\n use_json=False,\n json_attrs=['title', 'url'],\n show_unpublished=False,\n search_title_only=False,\n extra_filters={},\n path=None,\n ):\n\n # Get default templates\n if template is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE'):\n template = settings.WAGTAILSEARCH_RESULTS_TEMPLATE\n else:\n template = 'wagtailsearch/search_results.html'\n\n if template_ajax is None:\n if hasattr(settings, 'WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX'):\n template_ajax = settings.WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX\n else:\n template_ajax = template\n\n # Get query string and page from GET paramters\n query_string = request.GET.get('q', '')\n page = request.GET.get('page', request.GET.get('p', 1))\n\n # Search\n if query_string != '':\n search_results = models.Page.search(\n query_string,\n show_unpublished=show_unpublished,\n search_title_only=search_title_only,\n extra_filters=extra_filters,\n path=path if path else request.site.root_page.path\n )\n\n # Get query object\n query = Query.get(query_string)\n\n # Add hit\n query.add_hit()\n\n # Pagination\n paginator = Paginator(search_results, results_per_page)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n else:\n query = None\n search_results = None\n\n if use_json: # Return a json response\n if search_results:\n search_results_json = []\n for result in search_results:\n result_specific = result.specific\n\n search_results_json.append(dict(\n (attr, getattr(result_specific, attr))\n for attr in json_attrs\n if hasattr(result_specific, attr)\n ))\n\n return HttpResponse(json.dumps(search_results_json))\n else:\n return HttpResponse('[]')\n else: # Render a template\n if request.is_ajax() and template_ajax:\n template = template_ajax\n\n return render(request, template, dict(\n query_string=query_string,\n search_results=search_results,\n is_ajax=request.is_ajax(),\n query=query\n ))\n", "path": "wagtail/wagtailsearch/views/frontend.py"}]} | 1,166 | 131 |
gh_patches_debug_10307 | rasdani/github-patches | git_diff | getnikola__nikola-2238 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deploy crashes with state system
Will investigate later.
``` pytb
Traceback (most recent call last):
File "/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/site-packages/doit/doit_cmd.py", line 168, in run
return command.parse_execute(args)
File "/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/site-packages/doit/cmd_base.py", line 122, in parse_execute
return self.execute(params, args)
File "/home/kwpolska/git/nikola/nikola/plugin_categories.py", line 124, in execute
return self._execute(options, args)
File "/home/kwpolska/git/nikola/nikola/plugins/command/deploy.py", line 135, in _execute
self.site.state.set('last_deploy', new_deploy.isoformat())
File "/home/kwpolska/git/nikola/nikola/state.py", line 64, in set
self._save()
File "/home/kwpolska/git/nikola/nikola/state.py", line 82, in _save
json.dump(self._local.data, outf, sort_keys=True, indent=2)
File "/usr/lib64/python3.5/json/__init__.py", line 179, in dump
fp.write(chunk)
File "/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/tempfile.py", line 483, in func_wrapper
return func(*args, **kwargs)
TypeError: a bytes-like object is required, not 'str'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/state.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2016 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Persistent state implementation."""
28
29 import json
30 import os
31 import shutil
32 import tempfile
33 import threading
34
35 from . import utils
36
37
38 class Persistor():
39 """Persist stuff in a place.
40
41 This is an intentionally dumb implementation. It is *not* meant to be
42 fast, or useful for arbitrarily large data. Use lightly.
43
44 Intentionally it has no namespaces, sections, etc. Use as a
45 responsible adult.
46 """
47
48 def __init__(self, path):
49 """Where do you want it persisted."""
50 self._path = path
51 utils.makedirs(os.path.dirname(path))
52 self._local = threading.local()
53 self._local.data = {}
54
55 def get(self, key):
56 """Get data stored in key."""
57 self._read()
58 return self._local.data.get(key)
59
60 def set(self, key, value):
61 """Store value in key."""
62 self._read()
63 self._local.data[key] = value
64 self._save()
65
66 def delete(self, key):
67 """Delete key and the value it contains."""
68 self._read()
69 if key in self._local.data:
70 self._local.data.pop(key)
71 self._save()
72
73 def _read(self):
74 if os.path.isfile(self._path):
75 with open(self._path) as inf:
76 self._local.data = json.load(inf)
77
78 def _save(self):
79 dname = os.path.dirname(self._path)
80 with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:
81 tname = outf.name
82 json.dump(self._local.data, outf, sort_keys=True, indent=2)
83 shutil.move(tname, self._path)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/state.py b/nikola/state.py
--- a/nikola/state.py
+++ b/nikola/state.py
@@ -78,6 +78,11 @@
def _save(self):
dname = os.path.dirname(self._path)
with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:
+ # TODO replace with encoding='utf-8' and mode 'w+' in v8
tname = outf.name
- json.dump(self._local.data, outf, sort_keys=True, indent=2)
+ data = json.dumps(self._local.data, sort_keys=True, indent=2)
+ try:
+ outf.write(data)
+ except TypeError:
+ outf.write(data.encode('utf-8'))
shutil.move(tname, self._path)
| {"golden_diff": "diff --git a/nikola/state.py b/nikola/state.py\n--- a/nikola/state.py\n+++ b/nikola/state.py\n@@ -78,6 +78,11 @@\n def _save(self):\n dname = os.path.dirname(self._path)\n with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:\n+ # TODO replace with encoding='utf-8' and mode 'w+' in v8\n tname = outf.name\n- json.dump(self._local.data, outf, sort_keys=True, indent=2)\n+ data = json.dumps(self._local.data, sort_keys=True, indent=2)\n+ try:\n+ outf.write(data)\n+ except TypeError:\n+ outf.write(data.encode('utf-8'))\n shutil.move(tname, self._path)\n", "issue": "deploy crashes with state system\nWill investigate later.\n\n``` pytb\nTraceback (most recent call last):\n File \"/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/site-packages/doit/doit_cmd.py\", line 168, in run\n return command.parse_execute(args)\n File \"/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/site-packages/doit/cmd_base.py\", line 122, in parse_execute\n return self.execute(params, args)\n File \"/home/kwpolska/git/nikola/nikola/plugin_categories.py\", line 124, in execute\n return self._execute(options, args)\n File \"/home/kwpolska/git/nikola/nikola/plugins/command/deploy.py\", line 135, in _execute\n self.site.state.set('last_deploy', new_deploy.isoformat())\n File \"/home/kwpolska/git/nikola/nikola/state.py\", line 64, in set\n self._save()\n File \"/home/kwpolska/git/nikola/nikola/state.py\", line 82, in _save\n json.dump(self._local.data, outf, sort_keys=True, indent=2)\n File \"/usr/lib64/python3.5/json/__init__.py\", line 179, in dump\n fp.write(chunk)\n File \"/home/kwpolska/virtualenvs/nikola-py3/lib/python3.5/tempfile.py\", line 483, in func_wrapper\n return func(*args, **kwargs)\nTypeError: a bytes-like object is required, not 'str'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Persistent state implementation.\"\"\"\n\nimport json\nimport os\nimport shutil\nimport tempfile\nimport threading\n\nfrom . import utils\n\n\nclass Persistor():\n \"\"\"Persist stuff in a place.\n\n This is an intentionally dumb implementation. It is *not* meant to be\n fast, or useful for arbitrarily large data. Use lightly.\n\n Intentionally it has no namespaces, sections, etc. Use as a\n responsible adult.\n \"\"\"\n\n def __init__(self, path):\n \"\"\"Where do you want it persisted.\"\"\"\n self._path = path\n utils.makedirs(os.path.dirname(path))\n self._local = threading.local()\n self._local.data = {}\n\n def get(self, key):\n \"\"\"Get data stored in key.\"\"\"\n self._read()\n return self._local.data.get(key)\n\n def set(self, key, value):\n \"\"\"Store value in key.\"\"\"\n self._read()\n self._local.data[key] = value\n self._save()\n\n def delete(self, key):\n \"\"\"Delete key and the value it contains.\"\"\"\n self._read()\n if key in self._local.data:\n self._local.data.pop(key)\n self._save()\n\n def _read(self):\n if os.path.isfile(self._path):\n with open(self._path) as inf:\n self._local.data = json.load(inf)\n\n def _save(self):\n dname = os.path.dirname(self._path)\n with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:\n tname = outf.name\n json.dump(self._local.data, outf, sort_keys=True, indent=2)\n shutil.move(tname, self._path)\n", "path": "nikola/state.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Persistent state implementation.\"\"\"\n\nimport json\nimport os\nimport shutil\nimport tempfile\nimport threading\n\nfrom . import utils\n\n\nclass Persistor():\n \"\"\"Persist stuff in a place.\n\n This is an intentionally dumb implementation. It is *not* meant to be\n fast, or useful for arbitrarily large data. Use lightly.\n\n Intentionally it has no namespaces, sections, etc. Use as a\n responsible adult.\n \"\"\"\n\n def __init__(self, path):\n \"\"\"Where do you want it persisted.\"\"\"\n self._path = path\n utils.makedirs(os.path.dirname(path))\n self._local = threading.local()\n self._local.data = {}\n\n def get(self, key):\n \"\"\"Get data stored in key.\"\"\"\n self._read()\n return self._local.data.get(key)\n\n def set(self, key, value):\n \"\"\"Store value in key.\"\"\"\n self._read()\n self._local.data[key] = value\n self._save()\n\n def delete(self, key):\n \"\"\"Delete key and the value it contains.\"\"\"\n self._read()\n if key in self._local.data:\n self._local.data.pop(key)\n self._save()\n\n def _read(self):\n if os.path.isfile(self._path):\n with open(self._path) as inf:\n self._local.data = json.load(inf)\n\n def _save(self):\n dname = os.path.dirname(self._path)\n with tempfile.NamedTemporaryFile(dir=dname, delete=False) as outf:\n # TODO replace with encoding='utf-8' and mode 'w+' in v8\n tname = outf.name\n data = json.dumps(self._local.data, sort_keys=True, indent=2)\n try:\n outf.write(data)\n except TypeError:\n outf.write(data.encode('utf-8'))\n shutil.move(tname, self._path)\n", "path": "nikola/state.py"}]} | 1,407 | 180 |
gh_patches_debug_29916 | rasdani/github-patches | git_diff | chainer__chainer-4738 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
F.bilinear requires huge GPU memory
The following code results `cupy.cuda.memory.OutOfMemoryError: out of memory to allocate 18014398509481984 bytes (total 18014399785863168 bytes)`
```
import chainer
import cupy
b = chainer.links.Bilinear(256, 256, 256).to_gpu()
e1 = cupy.random.randn(64, 256).astype('f')
e2 = cupy.random.randn(64, 256).astype('f')
y = b(e1, e2)
print(y)
```
How to fix: merge cupy/cupy#1218 (or do not use `cupy.einsum`).
I confirmed the code run in ~5sec with
- chainer: master(6bab773dec70f291108ab2575622805252f9a208)
- cupy: (Merge: cupy/cupy@6162f9a cupy/cupy@7f89bd0)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/connection/bilinear.py`
Content:
```
1 import numpy
2
3 import chainer
4 from chainer.backends import cuda
5 from chainer import function_node
6 from chainer.utils import type_check
7
8
9 def _as_mat(x):
10 if x.ndim == 2:
11 return x
12 return x.reshape(len(x), -1)
13
14
15 def _ij_ik_il_to_jkl(a, b, c):
16 ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
17 return chainer.functions.matmul(_as_mat(ab).T, c).reshape(
18 a.shape[1], b.shape[1], c.shape[1])
19
20
21 def _ij_ik_jkl_to_il(a, b, c):
22 ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk
23 c = c.reshape(-1, c.shape[-1]) # [jk]l
24 return chainer.functions.matmul(_as_mat(ab), c)
25
26
27 def _ij_il_jkl_to_ik(a, b, c):
28 return _ij_ik_jkl_to_il(a, b, chainer.functions.swapaxes(c, 1, 2))
29
30
31 def _ik_il_jkl_to_ij(a, b, c):
32 return _ij_ik_jkl_to_il(a, b, chainer.functions.rollaxis(c, 0, c.ndim))
33
34
35 class BilinearFunction(function_node.FunctionNode):
36 def check_type_forward(self, in_types):
37 n_in = type_check.eval(in_types.size())
38 if n_in != 3 and n_in != 6:
39 raise type_check.InvalidType(
40 '{0} or {1}'.format(
41 in_types.size() == 3, in_types.size() == 6),
42 '{0} == {1}'.format(in_types.size(), n_in))
43
44 e1_type, e2_type, W_type = in_types[:3]
45 type_check_prod = type_check.make_variable(numpy.prod, 'prod')
46 type_check.expect(
47 e1_type.dtype == numpy.float32,
48 e1_type.ndim >= 2,
49 e2_type.dtype == numpy.float32,
50 e2_type.ndim >= 2,
51 e1_type.shape[0] == e2_type.shape[0],
52 W_type.dtype == numpy.float32,
53 W_type.ndim == 3,
54 type_check_prod(e1_type.shape[1:]) == W_type.shape[0],
55 type_check_prod(e2_type.shape[1:]) == W_type.shape[1],
56 )
57
58 if n_in == 6:
59 out_size = W_type.shape[2]
60 V1_type, V2_type, b_type = in_types[3:]
61 type_check.expect(
62 V1_type.dtype == numpy.float32,
63 V1_type.ndim == 2,
64 V1_type.shape[0] == W_type.shape[0],
65 V1_type.shape[1] == out_size,
66 V2_type.dtype == numpy.float32,
67 V2_type.ndim == 2,
68 V2_type.shape[0] == W_type.shape[1],
69 V2_type.shape[1] == out_size,
70 b_type.dtype == numpy.float32,
71 b_type.ndim == 1,
72 b_type.shape[0] == out_size,
73 )
74
75 def forward(self, inputs):
76 self.retain_inputs(tuple(range(len(inputs))))
77
78 e1 = _as_mat(inputs[0])
79 e2 = _as_mat(inputs[1])
80 W = inputs[2]
81
82 xp = cuda.get_array_module(*inputs)
83 y = xp.einsum('ij,ik,jkl->il', e1, e2, W)
84
85 if len(inputs) == 6:
86 V1, V2, b = inputs[3:]
87 y += e1.dot(V1)
88 y += e2.dot(V2)
89 y += b
90 return y,
91
92 def backward(self, indexes, grad_outputs):
93 inputs = self.get_retained_inputs()
94 e1, e2, W = inputs[:3]
95 gy, = grad_outputs
96
97 if len(inputs) == 6:
98 V1, V2 = inputs[3], inputs[4]
99 return BilinearFunctionGrad().apply((e1, e2, W, V1, V2, gy))
100 return BilinearFunctionGrad().apply((e1, e2, W, gy))
101
102
103 class BilinearFunctionGrad(function_node.FunctionNode):
104
105 def forward(self, inputs):
106 self.retain_inputs(tuple(range(len(inputs))))
107
108 e1 = _as_mat(inputs[0])
109 e2 = _as_mat(inputs[1])
110 W, gy = inputs[2], inputs[-1]
111
112 xp = cuda.get_array_module(*inputs)
113 ge1 = xp.einsum('ik,jkl,il->ij', e2, W, gy)
114 ge2 = xp.einsum('ij,jkl,il->ik', e1, W, gy)
115 gW = xp.einsum('ij,ik,il->jkl', e1, e2, gy)
116
117 ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
118
119 if len(inputs) == 6:
120 V1, V2 = inputs[3], inputs[4]
121 gV1 = e1.T.dot(gy)
122 gV2 = e2.T.dot(gy)
123 gb = gy.sum(0)
124 ge1 += gy.dot(V1.T)
125 ge2 += gy.dot(V2.T)
126 ret += gV1, gV2, gb
127
128 return ret
129
130 def backward(self, indexes, grad_outputs):
131 inputs = self.get_retained_inputs()
132
133 e1 = _as_mat(inputs[0])
134 e2 = _as_mat(inputs[1])
135 W, gy = inputs[2], inputs[-1]
136
137 gge1 = _as_mat(grad_outputs[0])
138 gge2 = _as_mat(grad_outputs[1])
139 ggW = grad_outputs[2]
140
141 dge1_de2 = _ij_il_jkl_to_ik(gge1, gy, W)
142 dge1_dW = _ij_ik_il_to_jkl(gge1, e2, gy)
143 dge1_dgy = _ij_ik_jkl_to_il(gge1, e2, W)
144
145 dge2_de1 = _ik_il_jkl_to_ij(gge2, gy, W)
146 dge2_dW = _ij_ik_il_to_jkl(e1, gge2, gy)
147 dge2_dgy = _ij_ik_jkl_to_il(e1, gge2, W)
148
149 dgW_de1 = _ik_il_jkl_to_ij(e2, gy, ggW)
150 dgW_de2 = _ij_il_jkl_to_ik(e1, gy, ggW)
151 dgW_dgy = _ij_ik_jkl_to_il(e1, e2, ggW)
152
153 ge1 = dgW_de1 + dge2_de1
154 ge2 = dgW_de2 + dge1_de2
155 gW = dge1_dW + dge2_dW
156 ggy = dgW_dgy + dge1_dgy + dge2_dgy
157
158 if len(inputs) == 6:
159 V1, V2 = inputs[3], inputs[4]
160 ggV1, ggV2, ggb = grad_outputs[3:]
161
162 gV1 = chainer.functions.matmul(gge1, gy, transa=True)
163 gV2 = chainer.functions.matmul(gge2, gy, transa=True)
164
165 ge1 += chainer.functions.matmul(gy, ggV1, transb=True)
166 ge2 += chainer.functions.matmul(gy, ggV2, transb=True)
167 ggy += chainer.functions.matmul(gge1, V1)
168 ggy += chainer.functions.matmul(gge2, V2)
169 ggy += chainer.functions.matmul(e1, ggV1)
170 ggy += chainer.functions.matmul(e2, ggV2)
171 ggy += chainer.functions.broadcast_to(ggb, ggy.shape)
172
173 ge1 = ge1.reshape(inputs[0].shape)
174 ge2 = ge2.reshape(inputs[1].shape)
175
176 if len(inputs) == 6:
177 return ge1, ge2, gW, gV1, gV2, ggy
178 return ge1, ge2, gW, ggy
179
180
181 def bilinear(e1, e2, W, V1=None, V2=None, b=None):
182 """Applies a bilinear function based on given parameters.
183
184 This is a building block of Neural Tensor Network (see the reference paper
185 below). It takes two input variables and one or four parameters, and
186 outputs one variable.
187
188 To be precise, denote six input arrays mathematically by
189 :math:`e^1\\in \\mathbb{R}^{I\\cdot J}`,
190 :math:`e^2\\in \\mathbb{R}^{I\\cdot K}`,
191 :math:`W\\in \\mathbb{R}^{J \\cdot K \\cdot L}`,
192 :math:`V^1\\in \\mathbb{R}^{J \\cdot L}`,
193 :math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and
194 :math:`b\\in \\mathbb{R}^{L}`,
195 where :math:`I` is mini-batch size.
196 In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear
197 parameters.
198
199 The output of forward propagation is calculated as
200
201 .. math::
202
203 y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\
204 \\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.
205
206 Note that V1, V2, b are optional. If these are not given, then this
207 function omits the last three terms in the above equation.
208
209 .. note::
210
211 This function accepts an input variable ``e1`` or ``e2`` of a non-matrix
212 array. In this case, the leading dimension is treated as the batch
213 dimension, and the other dimensions are reduced to one dimension.
214
215 .. note::
216
217 In the original paper, :math:`J` and :math:`K`
218 must be equal and the author denotes :math:`[V^1 V^2]`
219 (concatenation of matrices) by :math:`V`.
220
221 Args:
222 e1 (~chainer.Variable): Left input variable.
223 e2 (~chainer.Variable): Right input variable.
224 W (~chainer.Variable): Quadratic weight variable.
225 V1 (~chainer.Variable): Left coefficient variable.
226 V2 (~chainer.Variable): Right coefficient variable.
227 b (~chainer.Variable): Bias variable.
228
229 Returns:
230 ~chainer.Variable: Output variable.
231
232 See:
233 `Reasoning With Neural Tensor Networks for Knowledge Base Completion
234 <https://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-
235 networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].
236
237 """
238 flags = [V1 is None, V2 is None, b is None]
239 if any(flags):
240 if not all(flags):
241 raise ValueError('All coefficients and bias for bilinear() must '
242 'be None, if at least one of them is None.')
243 return BilinearFunction().apply((e1, e2, W))[0]
244 return BilinearFunction().apply((e1, e2, W, V1, V2, b))[0]
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/connection/bilinear.py b/chainer/functions/connection/bilinear.py
--- a/chainer/functions/connection/bilinear.py
+++ b/chainer/functions/connection/bilinear.py
@@ -80,7 +80,19 @@
W = inputs[2]
xp = cuda.get_array_module(*inputs)
- y = xp.einsum('ij,ik,jkl->il', e1, e2, W)
+ if xp is numpy:
+ y = numpy.einsum('ij,ik,jkl->il', e1, e2, W)
+ else:
+ i_len, j_len = e1.shape
+ k_len = e2.shape[1]
+ # 'ij,ik->ijk'
+ e1e2 = e1[:, :, None] * e2[:, None, :]
+ # ijk->i[jk]
+ e1e2 = e1e2.reshape(i_len, j_len * k_len)
+ # jkl->[jk]l
+ W_mat = W.reshape(-1, W.shape[2])
+ # 'i[jk],[jk]l->il'
+ y = e1e2.dot(W_mat)
if len(inputs) == 6:
V1, V2, b = inputs[3:]
@@ -110,9 +122,23 @@
W, gy = inputs[2], inputs[-1]
xp = cuda.get_array_module(*inputs)
- ge1 = xp.einsum('ik,jkl,il->ij', e2, W, gy)
- ge2 = xp.einsum('ij,jkl,il->ik', e1, W, gy)
- gW = xp.einsum('ij,ik,il->jkl', e1, e2, gy)
+ if xp is numpy:
+ gW = numpy.einsum('ij,ik,il->jkl', e1, e2, gy)
+ ge1 = numpy.einsum('ik,jkl,il->ij', e2, W, gy)
+ ge2 = numpy.einsum('ij,jkl,il->ik', e1, W, gy)
+ else:
+ kern = cuda.reduce('T in0, T in1, T in2', 'T out',
+ 'in0 * in1 * in2', 'a + b', 'out = a', 0,
+ 'bilinear_product')
+
+ e1_b = e1[:, :, None, None] # ij
+ e2_b = e2[:, None, :, None] # ik
+ gy_b = gy[:, None, None, :] # il
+ W_b = W[None, :, :, :] # jkl
+
+ gW = kern(e1_b, e2_b, gy_b, axis=0) # 'ij,ik,il->jkl'
+ ge1 = kern(e2_b, W_b, gy_b, axis=(2, 3)) # 'ik,jkl,il->ij'
+ ge2 = kern(e1_b, W_b, gy_b, axis=(1, 3)) # 'ij,jkl,il->ik'
ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
| {"golden_diff": "diff --git a/chainer/functions/connection/bilinear.py b/chainer/functions/connection/bilinear.py\n--- a/chainer/functions/connection/bilinear.py\n+++ b/chainer/functions/connection/bilinear.py\n@@ -80,7 +80,19 @@\n W = inputs[2]\n \n xp = cuda.get_array_module(*inputs)\n- y = xp.einsum('ij,ik,jkl->il', e1, e2, W)\n+ if xp is numpy:\n+ y = numpy.einsum('ij,ik,jkl->il', e1, e2, W)\n+ else:\n+ i_len, j_len = e1.shape\n+ k_len = e2.shape[1]\n+ # 'ij,ik->ijk'\n+ e1e2 = e1[:, :, None] * e2[:, None, :]\n+ # ijk->i[jk]\n+ e1e2 = e1e2.reshape(i_len, j_len * k_len)\n+ # jkl->[jk]l\n+ W_mat = W.reshape(-1, W.shape[2])\n+ # 'i[jk],[jk]l->il'\n+ y = e1e2.dot(W_mat)\n \n if len(inputs) == 6:\n V1, V2, b = inputs[3:]\n@@ -110,9 +122,23 @@\n W, gy = inputs[2], inputs[-1]\n \n xp = cuda.get_array_module(*inputs)\n- ge1 = xp.einsum('ik,jkl,il->ij', e2, W, gy)\n- ge2 = xp.einsum('ij,jkl,il->ik', e1, W, gy)\n- gW = xp.einsum('ij,ik,il->jkl', e1, e2, gy)\n+ if xp is numpy:\n+ gW = numpy.einsum('ij,ik,il->jkl', e1, e2, gy)\n+ ge1 = numpy.einsum('ik,jkl,il->ij', e2, W, gy)\n+ ge2 = numpy.einsum('ij,jkl,il->ik', e1, W, gy)\n+ else:\n+ kern = cuda.reduce('T in0, T in1, T in2', 'T out',\n+ 'in0 * in1 * in2', 'a + b', 'out = a', 0,\n+ 'bilinear_product')\n+\n+ e1_b = e1[:, :, None, None] # ij\n+ e2_b = e2[:, None, :, None] # ik\n+ gy_b = gy[:, None, None, :] # il\n+ W_b = W[None, :, :, :] # jkl\n+\n+ gW = kern(e1_b, e2_b, gy_b, axis=0) # 'ij,ik,il->jkl'\n+ ge1 = kern(e2_b, W_b, gy_b, axis=(2, 3)) # 'ik,jkl,il->ij'\n+ ge2 = kern(e1_b, W_b, gy_b, axis=(1, 3)) # 'ij,jkl,il->ik'\n \n ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW\n", "issue": "F.bilinear requires huge GPU memory\nThe following code results `cupy.cuda.memory.OutOfMemoryError: out of memory to allocate 18014398509481984 bytes (total 18014399785863168 bytes)`\r\n```\r\nimport chainer\r\nimport cupy\r\nb = chainer.links.Bilinear(256, 256, 256).to_gpu()\r\ne1 = cupy.random.randn(64, 256).astype('f')\r\ne2 = cupy.random.randn(64, 256).astype('f')\r\ny = b(e1, e2)\r\nprint(y)\r\n```\r\n\r\nHow to fix: merge cupy/cupy#1218 (or do not use `cupy.einsum`).\r\nI confirmed the code run in ~5sec with\r\n- chainer: master(6bab773dec70f291108ab2575622805252f9a208)\r\n- cupy: (Merge: cupy/cupy@6162f9a cupy/cupy@7f89bd0)\n", "before_files": [{"content": "import numpy\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\ndef _as_mat(x):\n if x.ndim == 2:\n return x\n return x.reshape(len(x), -1)\n\n\ndef _ij_ik_il_to_jkl(a, b, c):\n ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk\n return chainer.functions.matmul(_as_mat(ab).T, c).reshape(\n a.shape[1], b.shape[1], c.shape[1])\n\n\ndef _ij_ik_jkl_to_il(a, b, c):\n ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk\n c = c.reshape(-1, c.shape[-1]) # [jk]l\n return chainer.functions.matmul(_as_mat(ab), c)\n\n\ndef _ij_il_jkl_to_ik(a, b, c):\n return _ij_ik_jkl_to_il(a, b, chainer.functions.swapaxes(c, 1, 2))\n\n\ndef _ik_il_jkl_to_ij(a, b, c):\n return _ij_ik_jkl_to_il(a, b, chainer.functions.rollaxis(c, 0, c.ndim))\n\n\nclass BilinearFunction(function_node.FunctionNode):\n def check_type_forward(self, in_types):\n n_in = type_check.eval(in_types.size())\n if n_in != 3 and n_in != 6:\n raise type_check.InvalidType(\n '{0} or {1}'.format(\n in_types.size() == 3, in_types.size() == 6),\n '{0} == {1}'.format(in_types.size(), n_in))\n\n e1_type, e2_type, W_type = in_types[:3]\n type_check_prod = type_check.make_variable(numpy.prod, 'prod')\n type_check.expect(\n e1_type.dtype == numpy.float32,\n e1_type.ndim >= 2,\n e2_type.dtype == numpy.float32,\n e2_type.ndim >= 2,\n e1_type.shape[0] == e2_type.shape[0],\n W_type.dtype == numpy.float32,\n W_type.ndim == 3,\n type_check_prod(e1_type.shape[1:]) == W_type.shape[0],\n type_check_prod(e2_type.shape[1:]) == W_type.shape[1],\n )\n\n if n_in == 6:\n out_size = W_type.shape[2]\n V1_type, V2_type, b_type = in_types[3:]\n type_check.expect(\n V1_type.dtype == numpy.float32,\n V1_type.ndim == 2,\n V1_type.shape[0] == W_type.shape[0],\n V1_type.shape[1] == out_size,\n V2_type.dtype == numpy.float32,\n V2_type.ndim == 2,\n V2_type.shape[0] == W_type.shape[1],\n V2_type.shape[1] == out_size,\n b_type.dtype == numpy.float32,\n b_type.ndim == 1,\n b_type.shape[0] == out_size,\n )\n\n def forward(self, inputs):\n self.retain_inputs(tuple(range(len(inputs))))\n\n e1 = _as_mat(inputs[0])\n e2 = _as_mat(inputs[1])\n W = inputs[2]\n\n xp = cuda.get_array_module(*inputs)\n y = xp.einsum('ij,ik,jkl->il', e1, e2, W)\n\n if len(inputs) == 6:\n V1, V2, b = inputs[3:]\n y += e1.dot(V1)\n y += e2.dot(V2)\n y += b\n return y,\n\n def backward(self, indexes, grad_outputs):\n inputs = self.get_retained_inputs()\n e1, e2, W = inputs[:3]\n gy, = grad_outputs\n\n if len(inputs) == 6:\n V1, V2 = inputs[3], inputs[4]\n return BilinearFunctionGrad().apply((e1, e2, W, V1, V2, gy))\n return BilinearFunctionGrad().apply((e1, e2, W, gy))\n\n\nclass BilinearFunctionGrad(function_node.FunctionNode):\n\n def forward(self, inputs):\n self.retain_inputs(tuple(range(len(inputs))))\n\n e1 = _as_mat(inputs[0])\n e2 = _as_mat(inputs[1])\n W, gy = inputs[2], inputs[-1]\n\n xp = cuda.get_array_module(*inputs)\n ge1 = xp.einsum('ik,jkl,il->ij', e2, W, gy)\n ge2 = xp.einsum('ij,jkl,il->ik', e1, W, gy)\n gW = xp.einsum('ij,ik,il->jkl', e1, e2, gy)\n\n ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW\n\n if len(inputs) == 6:\n V1, V2 = inputs[3], inputs[4]\n gV1 = e1.T.dot(gy)\n gV2 = e2.T.dot(gy)\n gb = gy.sum(0)\n ge1 += gy.dot(V1.T)\n ge2 += gy.dot(V2.T)\n ret += gV1, gV2, gb\n\n return ret\n\n def backward(self, indexes, grad_outputs):\n inputs = self.get_retained_inputs()\n\n e1 = _as_mat(inputs[0])\n e2 = _as_mat(inputs[1])\n W, gy = inputs[2], inputs[-1]\n\n gge1 = _as_mat(grad_outputs[0])\n gge2 = _as_mat(grad_outputs[1])\n ggW = grad_outputs[2]\n\n dge1_de2 = _ij_il_jkl_to_ik(gge1, gy, W)\n dge1_dW = _ij_ik_il_to_jkl(gge1, e2, gy)\n dge1_dgy = _ij_ik_jkl_to_il(gge1, e2, W)\n\n dge2_de1 = _ik_il_jkl_to_ij(gge2, gy, W)\n dge2_dW = _ij_ik_il_to_jkl(e1, gge2, gy)\n dge2_dgy = _ij_ik_jkl_to_il(e1, gge2, W)\n\n dgW_de1 = _ik_il_jkl_to_ij(e2, gy, ggW)\n dgW_de2 = _ij_il_jkl_to_ik(e1, gy, ggW)\n dgW_dgy = _ij_ik_jkl_to_il(e1, e2, ggW)\n\n ge1 = dgW_de1 + dge2_de1\n ge2 = dgW_de2 + dge1_de2\n gW = dge1_dW + dge2_dW\n ggy = dgW_dgy + dge1_dgy + dge2_dgy\n\n if len(inputs) == 6:\n V1, V2 = inputs[3], inputs[4]\n ggV1, ggV2, ggb = grad_outputs[3:]\n\n gV1 = chainer.functions.matmul(gge1, gy, transa=True)\n gV2 = chainer.functions.matmul(gge2, gy, transa=True)\n\n ge1 += chainer.functions.matmul(gy, ggV1, transb=True)\n ge2 += chainer.functions.matmul(gy, ggV2, transb=True)\n ggy += chainer.functions.matmul(gge1, V1)\n ggy += chainer.functions.matmul(gge2, V2)\n ggy += chainer.functions.matmul(e1, ggV1)\n ggy += chainer.functions.matmul(e2, ggV2)\n ggy += chainer.functions.broadcast_to(ggb, ggy.shape)\n\n ge1 = ge1.reshape(inputs[0].shape)\n ge2 = ge2.reshape(inputs[1].shape)\n\n if len(inputs) == 6:\n return ge1, ge2, gW, gV1, gV2, ggy\n return ge1, ge2, gW, ggy\n\n\ndef bilinear(e1, e2, W, V1=None, V2=None, b=None):\n \"\"\"Applies a bilinear function based on given parameters.\n\n This is a building block of Neural Tensor Network (see the reference paper\n below). It takes two input variables and one or four parameters, and\n outputs one variable.\n\n To be precise, denote six input arrays mathematically by\n :math:`e^1\\\\in \\\\mathbb{R}^{I\\\\cdot J}`,\n :math:`e^2\\\\in \\\\mathbb{R}^{I\\\\cdot K}`,\n :math:`W\\\\in \\\\mathbb{R}^{J \\\\cdot K \\\\cdot L}`,\n :math:`V^1\\\\in \\\\mathbb{R}^{J \\\\cdot L}`,\n :math:`V^2\\\\in \\\\mathbb{R}^{K \\\\cdot L}`, and\n :math:`b\\\\in \\\\mathbb{R}^{L}`,\n where :math:`I` is mini-batch size.\n In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear\n parameters.\n\n The output of forward propagation is calculated as\n\n .. math::\n\n y_{il} = \\\\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\\\\n \\\\sum_{j} e^1_{ij} V^1_{jl} + \\\\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.\n\n Note that V1, V2, b are optional. If these are not given, then this\n function omits the last three terms in the above equation.\n\n .. note::\n\n This function accepts an input variable ``e1`` or ``e2`` of a non-matrix\n array. In this case, the leading dimension is treated as the batch\n dimension, and the other dimensions are reduced to one dimension.\n\n .. note::\n\n In the original paper, :math:`J` and :math:`K`\n must be equal and the author denotes :math:`[V^1 V^2]`\n (concatenation of matrices) by :math:`V`.\n\n Args:\n e1 (~chainer.Variable): Left input variable.\n e2 (~chainer.Variable): Right input variable.\n W (~chainer.Variable): Quadratic weight variable.\n V1 (~chainer.Variable): Left coefficient variable.\n V2 (~chainer.Variable): Right coefficient variable.\n b (~chainer.Variable): Bias variable.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n See:\n `Reasoning With Neural Tensor Networks for Knowledge Base Completion\n <https://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-\n networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].\n\n \"\"\"\n flags = [V1 is None, V2 is None, b is None]\n if any(flags):\n if not all(flags):\n raise ValueError('All coefficients and bias for bilinear() must '\n 'be None, if at least one of them is None.')\n return BilinearFunction().apply((e1, e2, W))[0]\n return BilinearFunction().apply((e1, e2, W, V1, V2, b))[0]\n", "path": "chainer/functions/connection/bilinear.py"}], "after_files": [{"content": "import numpy\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\ndef _as_mat(x):\n if x.ndim == 2:\n return x\n return x.reshape(len(x), -1)\n\n\ndef _ij_ik_il_to_jkl(a, b, c):\n ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk\n return chainer.functions.matmul(_as_mat(ab).T, c).reshape(\n a.shape[1], b.shape[1], c.shape[1])\n\n\ndef _ij_ik_jkl_to_il(a, b, c):\n ab = chainer.functions.matmul(a[:, :, None], b[:, None, :]) # ijk\n c = c.reshape(-1, c.shape[-1]) # [jk]l\n return chainer.functions.matmul(_as_mat(ab), c)\n\n\ndef _ij_il_jkl_to_ik(a, b, c):\n return _ij_ik_jkl_to_il(a, b, chainer.functions.swapaxes(c, 1, 2))\n\n\ndef _ik_il_jkl_to_ij(a, b, c):\n return _ij_ik_jkl_to_il(a, b, chainer.functions.rollaxis(c, 0, c.ndim))\n\n\nclass BilinearFunction(function_node.FunctionNode):\n def check_type_forward(self, in_types):\n n_in = type_check.eval(in_types.size())\n if n_in != 3 and n_in != 6:\n raise type_check.InvalidType(\n '{0} or {1}'.format(\n in_types.size() == 3, in_types.size() == 6),\n '{0} == {1}'.format(in_types.size(), n_in))\n\n e1_type, e2_type, W_type = in_types[:3]\n type_check_prod = type_check.make_variable(numpy.prod, 'prod')\n type_check.expect(\n e1_type.dtype == numpy.float32,\n e1_type.ndim >= 2,\n e2_type.dtype == numpy.float32,\n e2_type.ndim >= 2,\n e1_type.shape[0] == e2_type.shape[0],\n W_type.dtype == numpy.float32,\n W_type.ndim == 3,\n type_check_prod(e1_type.shape[1:]) == W_type.shape[0],\n type_check_prod(e2_type.shape[1:]) == W_type.shape[1],\n )\n\n if n_in == 6:\n out_size = W_type.shape[2]\n V1_type, V2_type, b_type = in_types[3:]\n type_check.expect(\n V1_type.dtype == numpy.float32,\n V1_type.ndim == 2,\n V1_type.shape[0] == W_type.shape[0],\n V1_type.shape[1] == out_size,\n V2_type.dtype == numpy.float32,\n V2_type.ndim == 2,\n V2_type.shape[0] == W_type.shape[1],\n V2_type.shape[1] == out_size,\n b_type.dtype == numpy.float32,\n b_type.ndim == 1,\n b_type.shape[0] == out_size,\n )\n\n def forward(self, inputs):\n self.retain_inputs(tuple(range(len(inputs))))\n\n e1 = _as_mat(inputs[0])\n e2 = _as_mat(inputs[1])\n W = inputs[2]\n\n xp = cuda.get_array_module(*inputs)\n if xp is numpy:\n y = numpy.einsum('ij,ik,jkl->il', e1, e2, W)\n else:\n i_len, j_len = e1.shape\n k_len = e2.shape[1]\n # 'ij,ik->ijk'\n e1e2 = e1[:, :, None] * e2[:, None, :]\n # ijk->i[jk]\n e1e2 = e1e2.reshape(i_len, j_len * k_len)\n # jkl->[jk]l\n W_mat = W.reshape(-1, W.shape[2])\n # 'i[jk],[jk]l->il'\n y = e1e2.dot(W_mat)\n\n if len(inputs) == 6:\n V1, V2, b = inputs[3:]\n y += e1.dot(V1)\n y += e2.dot(V2)\n y += b\n return y,\n\n def backward(self, indexes, grad_outputs):\n inputs = self.get_retained_inputs()\n e1, e2, W = inputs[:3]\n gy, = grad_outputs\n\n if len(inputs) == 6:\n V1, V2 = inputs[3], inputs[4]\n return BilinearFunctionGrad().apply((e1, e2, W, V1, V2, gy))\n return BilinearFunctionGrad().apply((e1, e2, W, gy))\n\n\nclass BilinearFunctionGrad(function_node.FunctionNode):\n\n def forward(self, inputs):\n self.retain_inputs(tuple(range(len(inputs))))\n\n e1 = _as_mat(inputs[0])\n e2 = _as_mat(inputs[1])\n W, gy = inputs[2], inputs[-1]\n\n xp = cuda.get_array_module(*inputs)\n if xp is numpy:\n gW = numpy.einsum('ij,ik,il->jkl', e1, e2, gy)\n ge1 = numpy.einsum('ik,jkl,il->ij', e2, W, gy)\n ge2 = numpy.einsum('ij,jkl,il->ik', e1, W, gy)\n else:\n kern = cuda.reduce('T in0, T in1, T in2', 'T out',\n 'in0 * in1 * in2', 'a + b', 'out = a', 0,\n 'bilinear_product')\n\n e1_b = e1[:, :, None, None] # ij\n e2_b = e2[:, None, :, None] # ik\n gy_b = gy[:, None, None, :] # il\n W_b = W[None, :, :, :] # jkl\n\n gW = kern(e1_b, e2_b, gy_b, axis=0) # 'ij,ik,il->jkl'\n ge1 = kern(e2_b, W_b, gy_b, axis=(2, 3)) # 'ik,jkl,il->ij'\n ge2 = kern(e1_b, W_b, gy_b, axis=(1, 3)) # 'ij,jkl,il->ik'\n\n ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW\n\n if len(inputs) == 6:\n V1, V2 = inputs[3], inputs[4]\n gV1 = e1.T.dot(gy)\n gV2 = e2.T.dot(gy)\n gb = gy.sum(0)\n ge1 += gy.dot(V1.T)\n ge2 += gy.dot(V2.T)\n ret += gV1, gV2, gb\n\n return ret\n\n def backward(self, indexes, grad_outputs):\n inputs = self.get_retained_inputs()\n\n e1 = _as_mat(inputs[0])\n e2 = _as_mat(inputs[1])\n W, gy = inputs[2], inputs[-1]\n\n gge1 = _as_mat(grad_outputs[0])\n gge2 = _as_mat(grad_outputs[1])\n ggW = grad_outputs[2]\n\n dge1_de2 = _ij_il_jkl_to_ik(gge1, gy, W)\n dge1_dW = _ij_ik_il_to_jkl(gge1, e2, gy)\n dge1_dgy = _ij_ik_jkl_to_il(gge1, e2, W)\n\n dge2_de1 = _ik_il_jkl_to_ij(gge2, gy, W)\n dge2_dW = _ij_ik_il_to_jkl(e1, gge2, gy)\n dge2_dgy = _ij_ik_jkl_to_il(e1, gge2, W)\n\n dgW_de1 = _ik_il_jkl_to_ij(e2, gy, ggW)\n dgW_de2 = _ij_il_jkl_to_ik(e1, gy, ggW)\n dgW_dgy = _ij_ik_jkl_to_il(e1, e2, ggW)\n\n ge1 = dgW_de1 + dge2_de1\n ge2 = dgW_de2 + dge1_de2\n gW = dge1_dW + dge2_dW\n ggy = dgW_dgy + dge1_dgy + dge2_dgy\n\n if len(inputs) == 6:\n V1, V2 = inputs[3], inputs[4]\n ggV1, ggV2, ggb = grad_outputs[3:]\n\n gV1 = chainer.functions.matmul(gge1, gy, transa=True)\n gV2 = chainer.functions.matmul(gge2, gy, transa=True)\n\n ge1 += chainer.functions.matmul(gy, ggV1, transb=True)\n ge2 += chainer.functions.matmul(gy, ggV2, transb=True)\n ggy += chainer.functions.matmul(gge1, V1)\n ggy += chainer.functions.matmul(gge2, V2)\n ggy += chainer.functions.matmul(e1, ggV1)\n ggy += chainer.functions.matmul(e2, ggV2)\n ggy += chainer.functions.broadcast_to(ggb, ggy.shape)\n\n ge1 = ge1.reshape(inputs[0].shape)\n ge2 = ge2.reshape(inputs[1].shape)\n\n if len(inputs) == 6:\n return ge1, ge2, gW, gV1, gV2, ggy\n return ge1, ge2, gW, ggy\n\n\ndef bilinear(e1, e2, W, V1=None, V2=None, b=None):\n \"\"\"Applies a bilinear function based on given parameters.\n\n This is a building block of Neural Tensor Network (see the reference paper\n below). It takes two input variables and one or four parameters, and\n outputs one variable.\n\n To be precise, denote six input arrays mathematically by\n :math:`e^1\\\\in \\\\mathbb{R}^{I\\\\cdot J}`,\n :math:`e^2\\\\in \\\\mathbb{R}^{I\\\\cdot K}`,\n :math:`W\\\\in \\\\mathbb{R}^{J \\\\cdot K \\\\cdot L}`,\n :math:`V^1\\\\in \\\\mathbb{R}^{J \\\\cdot L}`,\n :math:`V^2\\\\in \\\\mathbb{R}^{K \\\\cdot L}`, and\n :math:`b\\\\in \\\\mathbb{R}^{L}`,\n where :math:`I` is mini-batch size.\n In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear\n parameters.\n\n The output of forward propagation is calculated as\n\n .. math::\n\n y_{il} = \\\\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\\\\n \\\\sum_{j} e^1_{ij} V^1_{jl} + \\\\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.\n\n Note that V1, V2, b are optional. If these are not given, then this\n function omits the last three terms in the above equation.\n\n .. note::\n\n This function accepts an input variable ``e1`` or ``e2`` of a non-matrix\n array. In this case, the leading dimension is treated as the batch\n dimension, and the other dimensions are reduced to one dimension.\n\n .. note::\n\n In the original paper, :math:`J` and :math:`K`\n must be equal and the author denotes :math:`[V^1 V^2]`\n (concatenation of matrices) by :math:`V`.\n\n Args:\n e1 (~chainer.Variable): Left input variable.\n e2 (~chainer.Variable): Right input variable.\n W (~chainer.Variable): Quadratic weight variable.\n V1 (~chainer.Variable): Left coefficient variable.\n V2 (~chainer.Variable): Right coefficient variable.\n b (~chainer.Variable): Bias variable.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n See:\n `Reasoning With Neural Tensor Networks for Knowledge Base Completion\n <https://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-\n networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].\n\n \"\"\"\n flags = [V1 is None, V2 is None, b is None]\n if any(flags):\n if not all(flags):\n raise ValueError('All coefficients and bias for bilinear() must '\n 'be None, if at least one of them is None.')\n return BilinearFunction().apply((e1, e2, W))[0]\n return BilinearFunction().apply((e1, e2, W, V1, V2, b))[0]\n", "path": "chainer/functions/connection/bilinear.py"}]} | 3,764 | 746 |
gh_patches_debug_40992 | rasdani/github-patches | git_diff | modin-project__modin-2701 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement usecols parameter for read_csv with OmniSci backend
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. What kind of performance improvements would you like to see with this new API?
According to pyarrow documentation `pyarrow.read_csv` supports `include_columns` (https://arrow.apache.org/docs/python/generated/pyarrow.csv.ConvertOptions.html#pyarrow.csv.ConvertOptions), it can be used for implementation of `usecols` parameter of `modin.read_csv` with OmniSci backend.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/experimental/engines/omnisci_on_ray/io.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 from modin.experimental.backends.omnisci.query_compiler import DFAlgQueryCompiler
15 from modin.engines.ray.generic.io import RayIO
16 from modin.experimental.engines.omnisci_on_ray.frame.data import OmnisciOnRayFrame
17 from modin.error_message import ErrorMessage
18
19 from pyarrow.csv import read_csv, ParseOptions, ConvertOptions, ReadOptions
20 import pyarrow as pa
21
22
23 class OmnisciOnRayIO(RayIO):
24
25 frame_cls = OmnisciOnRayFrame
26 query_compiler_cls = DFAlgQueryCompiler
27
28 arg_keys = [
29 "filepath_or_buffer",
30 "sep",
31 "delimiter",
32 "header",
33 "names",
34 "index_col",
35 "usecols",
36 "squeeze",
37 "prefix",
38 "mangle_dupe_cols",
39 "dtype",
40 "engine",
41 "converters",
42 "true_values",
43 "false_values",
44 "skipinitialspace",
45 "skiprows",
46 "nrows",
47 "na_values",
48 "keep_default_na",
49 "na_filter",
50 "verbose",
51 "skip_blank_lines",
52 "parse_dates",
53 "infer_datetime_format",
54 "keep_date_col",
55 "date_parser",
56 "dayfirst",
57 "cache_dates",
58 "iterator",
59 "chunksize",
60 "compression",
61 "thousands",
62 "decimal",
63 "lineterminator",
64 "quotechar",
65 "quoting",
66 "escapechar",
67 "comment",
68 "encoding",
69 "dialect",
70 "error_bad_lines",
71 "warn_bad_lines",
72 "skipfooter",
73 "doublequote",
74 "delim_whitespace",
75 "low_memory",
76 "memory_map",
77 "float_precision",
78 ]
79
80 @classmethod
81 def read_csv(
82 cls,
83 filepath_or_buffer,
84 sep=",",
85 delimiter=None,
86 header="infer",
87 names=None,
88 index_col=None,
89 usecols=None,
90 squeeze=False,
91 prefix=None,
92 mangle_dupe_cols=True,
93 dtype=None,
94 engine=None,
95 converters=None,
96 true_values=None,
97 false_values=None,
98 skipinitialspace=False,
99 skiprows=None,
100 nrows=None,
101 na_values=None,
102 keep_default_na=True,
103 na_filter=True,
104 verbose=False,
105 skip_blank_lines=True,
106 parse_dates=False,
107 infer_datetime_format=False,
108 keep_date_col=False,
109 date_parser=None,
110 dayfirst=False,
111 cache_dates=True,
112 iterator=False,
113 chunksize=None,
114 compression="infer",
115 thousands=None,
116 decimal=b".",
117 lineterminator=None,
118 quotechar='"',
119 quoting=0,
120 escapechar=None,
121 comment=None,
122 encoding=None,
123 dialect=None,
124 error_bad_lines=True,
125 warn_bad_lines=True,
126 skipfooter=0,
127 doublequote=True,
128 delim_whitespace=False,
129 low_memory=True,
130 memory_map=False,
131 float_precision=None,
132 storage_options=None,
133 ):
134 items = locals().copy()
135 mykwargs = {k: items[k] for k in items if k in cls.arg_keys}
136 eng = str(engine).lower().strip()
137 try:
138 if eng in ["pandas", "c"]:
139 return cls._read(**mykwargs)
140
141 if isinstance(dtype, dict):
142 column_types = {c: cls._dtype_to_arrow(t) for c, t in dtype.items()}
143 else:
144 column_types = cls._dtype_to_arrow(dtype)
145
146 if (type(parse_dates) is list) and type(column_types) is dict:
147 for c in parse_dates:
148 column_types[c] = pa.timestamp("s")
149
150 if names:
151 if header == 0:
152 skiprows = skiprows + 1 if skiprows is not None else 1
153 elif header is None or header == "infer":
154 pass
155 else:
156 raise NotImplementedError(
157 "read_csv with 'arrow' engine and provided 'names' parameter supports only 0, None and 'infer' header values"
158 )
159 else:
160 if header == 0 or header == "infer":
161 pass
162 else:
163 raise NotImplementedError(
164 "read_csv with 'arrow' engine without 'names' parameter provided supports only 0 and 'infer' header values"
165 )
166
167 if delimiter is None:
168 delimiter = sep
169
170 if delim_whitespace and delimiter != ",":
171 raise ValueError(
172 "Specified a delimiter and delim_whitespace=True; you can only specify one."
173 )
174
175 po = ParseOptions(
176 delimiter="\\s+" if delim_whitespace else delimiter,
177 quote_char=quotechar,
178 double_quote=doublequote,
179 escape_char=escapechar,
180 newlines_in_values=False,
181 ignore_empty_lines=skip_blank_lines,
182 )
183 co = ConvertOptions(
184 check_utf8=None,
185 column_types=column_types,
186 null_values=None,
187 true_values=None,
188 false_values=None,
189 # timestamp fields should be handled as strings if parse_dates
190 # didn't passed explicitly as an array or a dict
191 timestamp_parsers=[""] if isinstance(parse_dates, bool) else None,
192 strings_can_be_null=None,
193 include_columns=None,
194 include_missing_columns=None,
195 auto_dict_encode=None,
196 auto_dict_max_cardinality=None,
197 )
198 ro = ReadOptions(
199 use_threads=True,
200 block_size=None,
201 skip_rows=skiprows,
202 column_names=names,
203 autogenerate_column_names=None,
204 )
205
206 at = read_csv(
207 filepath_or_buffer,
208 read_options=ro,
209 parse_options=po,
210 convert_options=co,
211 )
212
213 return cls.from_arrow(at)
214 except (pa.ArrowNotImplementedError, NotImplementedError):
215 if eng in ["arrow"]:
216 raise
217
218 ErrorMessage.default_to_pandas("`read_csv`")
219 return cls._read(**mykwargs)
220
221 @classmethod
222 def _dtype_to_arrow(cls, dtype):
223 if dtype is None:
224 return None
225 tname = dtype if isinstance(dtype, str) else dtype.name
226 if tname == "category":
227 return pa.dictionary(index_type=pa.int32(), value_type=pa.string())
228 elif tname == "string":
229 return pa.string()
230 else:
231 return pa.from_numpy_dtype(tname)
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/experimental/engines/omnisci_on_ray/io.py b/modin/experimental/engines/omnisci_on_ray/io.py
--- a/modin/experimental/engines/omnisci_on_ray/io.py
+++ b/modin/experimental/engines/omnisci_on_ray/io.py
@@ -19,6 +19,9 @@
from pyarrow.csv import read_csv, ParseOptions, ConvertOptions, ReadOptions
import pyarrow as pa
+import pandas
+from pandas.io.parsers import _validate_usecols_arg
+
class OmnisciOnRayIO(RayIO):
@@ -172,6 +175,8 @@
"Specified a delimiter and delim_whitespace=True; you can only specify one."
)
+ usecols_md = cls._prepare_pyarrow_usecols(mykwargs)
+
po = ParseOptions(
delimiter="\\s+" if delim_whitespace else delimiter,
quote_char=quotechar,
@@ -190,7 +195,7 @@
# didn't passed explicitly as an array or a dict
timestamp_parsers=[""] if isinstance(parse_dates, bool) else None,
strings_can_be_null=None,
- include_columns=None,
+ include_columns=usecols_md,
include_missing_columns=None,
auto_dict_encode=None,
auto_dict_max_cardinality=None,
@@ -229,3 +234,57 @@
return pa.string()
else:
return pa.from_numpy_dtype(tname)
+
+ @classmethod
+ def _prepare_pyarrow_usecols(cls, read_csv_kwargs):
+ """
+ Define `usecols` parameter in the way pyarrow can process it.
+ ----------
+ read_csv_kwargs:
+ read_csv function parameters.
+
+ Returns
+ -------
+ usecols_md: list
+ Redefined `usecols` parameter.
+ """
+ usecols = read_csv_kwargs.get("usecols", None)
+ engine = read_csv_kwargs.get("engine", None)
+ usecols_md, usecols_names_dtypes = _validate_usecols_arg(usecols)
+ if usecols_md:
+ empty_pd_df = pandas.read_csv(
+ **dict(
+ read_csv_kwargs,
+ nrows=0,
+ skipfooter=0,
+ usecols=None,
+ engine=None if engine == "arrow" else engine,
+ )
+ )
+ column_names = empty_pd_df.columns
+ if usecols_names_dtypes == "string":
+ if usecols_md.issubset(set(column_names)):
+ # columns should be sorted because pandas doesn't preserve columns order
+ usecols_md = [
+ col_name for col_name in column_names if col_name in usecols_md
+ ]
+ else:
+ raise NotImplementedError(
+ "values passed in the `usecols` parameter don't match columns names"
+ )
+ elif usecols_names_dtypes == "integer":
+ # columns should be sorted because pandas doesn't preserve columns order
+ usecols_md = sorted(usecols_md)
+ if len(column_names) < usecols_md[-1]:
+ raise NotImplementedError(
+ "max usecols value is higher than the number of columns"
+ )
+ usecols_md = [column_names[i] for i in usecols_md]
+ elif callable(usecols_md):
+ usecols_md = [
+ col_name for col_name in column_names if usecols_md(col_name)
+ ]
+ else:
+ raise NotImplementedError("unsupported `usecols` parameter")
+
+ return usecols_md
| {"golden_diff": "diff --git a/modin/experimental/engines/omnisci_on_ray/io.py b/modin/experimental/engines/omnisci_on_ray/io.py\n--- a/modin/experimental/engines/omnisci_on_ray/io.py\n+++ b/modin/experimental/engines/omnisci_on_ray/io.py\n@@ -19,6 +19,9 @@\n from pyarrow.csv import read_csv, ParseOptions, ConvertOptions, ReadOptions\n import pyarrow as pa\n \n+import pandas\n+from pandas.io.parsers import _validate_usecols_arg\n+\n \n class OmnisciOnRayIO(RayIO):\n \n@@ -172,6 +175,8 @@\n \"Specified a delimiter and delim_whitespace=True; you can only specify one.\"\n )\n \n+ usecols_md = cls._prepare_pyarrow_usecols(mykwargs)\n+\n po = ParseOptions(\n delimiter=\"\\\\s+\" if delim_whitespace else delimiter,\n quote_char=quotechar,\n@@ -190,7 +195,7 @@\n # didn't passed explicitly as an array or a dict\n timestamp_parsers=[\"\"] if isinstance(parse_dates, bool) else None,\n strings_can_be_null=None,\n- include_columns=None,\n+ include_columns=usecols_md,\n include_missing_columns=None,\n auto_dict_encode=None,\n auto_dict_max_cardinality=None,\n@@ -229,3 +234,57 @@\n return pa.string()\n else:\n return pa.from_numpy_dtype(tname)\n+\n+ @classmethod\n+ def _prepare_pyarrow_usecols(cls, read_csv_kwargs):\n+ \"\"\"\n+ Define `usecols` parameter in the way pyarrow can process it.\n+ ----------\n+ read_csv_kwargs:\n+ read_csv function parameters.\n+\n+ Returns\n+ -------\n+ usecols_md: list\n+ Redefined `usecols` parameter.\n+ \"\"\"\n+ usecols = read_csv_kwargs.get(\"usecols\", None)\n+ engine = read_csv_kwargs.get(\"engine\", None)\n+ usecols_md, usecols_names_dtypes = _validate_usecols_arg(usecols)\n+ if usecols_md:\n+ empty_pd_df = pandas.read_csv(\n+ **dict(\n+ read_csv_kwargs,\n+ nrows=0,\n+ skipfooter=0,\n+ usecols=None,\n+ engine=None if engine == \"arrow\" else engine,\n+ )\n+ )\n+ column_names = empty_pd_df.columns\n+ if usecols_names_dtypes == \"string\":\n+ if usecols_md.issubset(set(column_names)):\n+ # columns should be sorted because pandas doesn't preserve columns order\n+ usecols_md = [\n+ col_name for col_name in column_names if col_name in usecols_md\n+ ]\n+ else:\n+ raise NotImplementedError(\n+ \"values passed in the `usecols` parameter don't match columns names\"\n+ )\n+ elif usecols_names_dtypes == \"integer\":\n+ # columns should be sorted because pandas doesn't preserve columns order\n+ usecols_md = sorted(usecols_md)\n+ if len(column_names) < usecols_md[-1]:\n+ raise NotImplementedError(\n+ \"max usecols value is higher than the number of columns\"\n+ )\n+ usecols_md = [column_names[i] for i in usecols_md]\n+ elif callable(usecols_md):\n+ usecols_md = [\n+ col_name for col_name in column_names if usecols_md(col_name)\n+ ]\n+ else:\n+ raise NotImplementedError(\"unsupported `usecols` parameter\")\n+\n+ return usecols_md\n", "issue": "Implement usecols parameter for read_csv with OmniSci backend\n**Is your feature request related to a problem? Please describe.**\r\nA clear and concise description of what the problem is. What kind of performance improvements would you like to see with this new API?\r\nAccording to pyarrow documentation `pyarrow.read_csv` supports `include_columns` (https://arrow.apache.org/docs/python/generated/pyarrow.csv.ConvertOptions.html#pyarrow.csv.ConvertOptions), it can be used for implementation of `usecols` parameter of `modin.read_csv` with OmniSci backend.\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom modin.experimental.backends.omnisci.query_compiler import DFAlgQueryCompiler\nfrom modin.engines.ray.generic.io import RayIO\nfrom modin.experimental.engines.omnisci_on_ray.frame.data import OmnisciOnRayFrame\nfrom modin.error_message import ErrorMessage\n\nfrom pyarrow.csv import read_csv, ParseOptions, ConvertOptions, ReadOptions\nimport pyarrow as pa\n\n\nclass OmnisciOnRayIO(RayIO):\n\n frame_cls = OmnisciOnRayFrame\n query_compiler_cls = DFAlgQueryCompiler\n\n arg_keys = [\n \"filepath_or_buffer\",\n \"sep\",\n \"delimiter\",\n \"header\",\n \"names\",\n \"index_col\",\n \"usecols\",\n \"squeeze\",\n \"prefix\",\n \"mangle_dupe_cols\",\n \"dtype\",\n \"engine\",\n \"converters\",\n \"true_values\",\n \"false_values\",\n \"skipinitialspace\",\n \"skiprows\",\n \"nrows\",\n \"na_values\",\n \"keep_default_na\",\n \"na_filter\",\n \"verbose\",\n \"skip_blank_lines\",\n \"parse_dates\",\n \"infer_datetime_format\",\n \"keep_date_col\",\n \"date_parser\",\n \"dayfirst\",\n \"cache_dates\",\n \"iterator\",\n \"chunksize\",\n \"compression\",\n \"thousands\",\n \"decimal\",\n \"lineterminator\",\n \"quotechar\",\n \"quoting\",\n \"escapechar\",\n \"comment\",\n \"encoding\",\n \"dialect\",\n \"error_bad_lines\",\n \"warn_bad_lines\",\n \"skipfooter\",\n \"doublequote\",\n \"delim_whitespace\",\n \"low_memory\",\n \"memory_map\",\n \"float_precision\",\n ]\n\n @classmethod\n def read_csv(\n cls,\n filepath_or_buffer,\n sep=\",\",\n delimiter=None,\n header=\"infer\",\n names=None,\n index_col=None,\n usecols=None,\n squeeze=False,\n prefix=None,\n mangle_dupe_cols=True,\n dtype=None,\n engine=None,\n converters=None,\n true_values=None,\n false_values=None,\n skipinitialspace=False,\n skiprows=None,\n nrows=None,\n na_values=None,\n keep_default_na=True,\n na_filter=True,\n verbose=False,\n skip_blank_lines=True,\n parse_dates=False,\n infer_datetime_format=False,\n keep_date_col=False,\n date_parser=None,\n dayfirst=False,\n cache_dates=True,\n iterator=False,\n chunksize=None,\n compression=\"infer\",\n thousands=None,\n decimal=b\".\",\n lineterminator=None,\n quotechar='\"',\n quoting=0,\n escapechar=None,\n comment=None,\n encoding=None,\n dialect=None,\n error_bad_lines=True,\n warn_bad_lines=True,\n skipfooter=0,\n doublequote=True,\n delim_whitespace=False,\n low_memory=True,\n memory_map=False,\n float_precision=None,\n storage_options=None,\n ):\n items = locals().copy()\n mykwargs = {k: items[k] for k in items if k in cls.arg_keys}\n eng = str(engine).lower().strip()\n try:\n if eng in [\"pandas\", \"c\"]:\n return cls._read(**mykwargs)\n\n if isinstance(dtype, dict):\n column_types = {c: cls._dtype_to_arrow(t) for c, t in dtype.items()}\n else:\n column_types = cls._dtype_to_arrow(dtype)\n\n if (type(parse_dates) is list) and type(column_types) is dict:\n for c in parse_dates:\n column_types[c] = pa.timestamp(\"s\")\n\n if names:\n if header == 0:\n skiprows = skiprows + 1 if skiprows is not None else 1\n elif header is None or header == \"infer\":\n pass\n else:\n raise NotImplementedError(\n \"read_csv with 'arrow' engine and provided 'names' parameter supports only 0, None and 'infer' header values\"\n )\n else:\n if header == 0 or header == \"infer\":\n pass\n else:\n raise NotImplementedError(\n \"read_csv with 'arrow' engine without 'names' parameter provided supports only 0 and 'infer' header values\"\n )\n\n if delimiter is None:\n delimiter = sep\n\n if delim_whitespace and delimiter != \",\":\n raise ValueError(\n \"Specified a delimiter and delim_whitespace=True; you can only specify one.\"\n )\n\n po = ParseOptions(\n delimiter=\"\\\\s+\" if delim_whitespace else delimiter,\n quote_char=quotechar,\n double_quote=doublequote,\n escape_char=escapechar,\n newlines_in_values=False,\n ignore_empty_lines=skip_blank_lines,\n )\n co = ConvertOptions(\n check_utf8=None,\n column_types=column_types,\n null_values=None,\n true_values=None,\n false_values=None,\n # timestamp fields should be handled as strings if parse_dates\n # didn't passed explicitly as an array or a dict\n timestamp_parsers=[\"\"] if isinstance(parse_dates, bool) else None,\n strings_can_be_null=None,\n include_columns=None,\n include_missing_columns=None,\n auto_dict_encode=None,\n auto_dict_max_cardinality=None,\n )\n ro = ReadOptions(\n use_threads=True,\n block_size=None,\n skip_rows=skiprows,\n column_names=names,\n autogenerate_column_names=None,\n )\n\n at = read_csv(\n filepath_or_buffer,\n read_options=ro,\n parse_options=po,\n convert_options=co,\n )\n\n return cls.from_arrow(at)\n except (pa.ArrowNotImplementedError, NotImplementedError):\n if eng in [\"arrow\"]:\n raise\n\n ErrorMessage.default_to_pandas(\"`read_csv`\")\n return cls._read(**mykwargs)\n\n @classmethod\n def _dtype_to_arrow(cls, dtype):\n if dtype is None:\n return None\n tname = dtype if isinstance(dtype, str) else dtype.name\n if tname == \"category\":\n return pa.dictionary(index_type=pa.int32(), value_type=pa.string())\n elif tname == \"string\":\n return pa.string()\n else:\n return pa.from_numpy_dtype(tname)\n", "path": "modin/experimental/engines/omnisci_on_ray/io.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom modin.experimental.backends.omnisci.query_compiler import DFAlgQueryCompiler\nfrom modin.engines.ray.generic.io import RayIO\nfrom modin.experimental.engines.omnisci_on_ray.frame.data import OmnisciOnRayFrame\nfrom modin.error_message import ErrorMessage\n\nfrom pyarrow.csv import read_csv, ParseOptions, ConvertOptions, ReadOptions\nimport pyarrow as pa\n\nimport pandas\nfrom pandas.io.parsers import _validate_usecols_arg\n\n\nclass OmnisciOnRayIO(RayIO):\n\n frame_cls = OmnisciOnRayFrame\n query_compiler_cls = DFAlgQueryCompiler\n\n arg_keys = [\n \"filepath_or_buffer\",\n \"sep\",\n \"delimiter\",\n \"header\",\n \"names\",\n \"index_col\",\n \"usecols\",\n \"squeeze\",\n \"prefix\",\n \"mangle_dupe_cols\",\n \"dtype\",\n \"engine\",\n \"converters\",\n \"true_values\",\n \"false_values\",\n \"skipinitialspace\",\n \"skiprows\",\n \"nrows\",\n \"na_values\",\n \"keep_default_na\",\n \"na_filter\",\n \"verbose\",\n \"skip_blank_lines\",\n \"parse_dates\",\n \"infer_datetime_format\",\n \"keep_date_col\",\n \"date_parser\",\n \"dayfirst\",\n \"cache_dates\",\n \"iterator\",\n \"chunksize\",\n \"compression\",\n \"thousands\",\n \"decimal\",\n \"lineterminator\",\n \"quotechar\",\n \"quoting\",\n \"escapechar\",\n \"comment\",\n \"encoding\",\n \"dialect\",\n \"error_bad_lines\",\n \"warn_bad_lines\",\n \"skipfooter\",\n \"doublequote\",\n \"delim_whitespace\",\n \"low_memory\",\n \"memory_map\",\n \"float_precision\",\n ]\n\n @classmethod\n def read_csv(\n cls,\n filepath_or_buffer,\n sep=\",\",\n delimiter=None,\n header=\"infer\",\n names=None,\n index_col=None,\n usecols=None,\n squeeze=False,\n prefix=None,\n mangle_dupe_cols=True,\n dtype=None,\n engine=None,\n converters=None,\n true_values=None,\n false_values=None,\n skipinitialspace=False,\n skiprows=None,\n nrows=None,\n na_values=None,\n keep_default_na=True,\n na_filter=True,\n verbose=False,\n skip_blank_lines=True,\n parse_dates=False,\n infer_datetime_format=False,\n keep_date_col=False,\n date_parser=None,\n dayfirst=False,\n cache_dates=True,\n iterator=False,\n chunksize=None,\n compression=\"infer\",\n thousands=None,\n decimal=b\".\",\n lineterminator=None,\n quotechar='\"',\n quoting=0,\n escapechar=None,\n comment=None,\n encoding=None,\n dialect=None,\n error_bad_lines=True,\n warn_bad_lines=True,\n skipfooter=0,\n doublequote=True,\n delim_whitespace=False,\n low_memory=True,\n memory_map=False,\n float_precision=None,\n storage_options=None,\n ):\n items = locals().copy()\n mykwargs = {k: items[k] for k in items if k in cls.arg_keys}\n eng = str(engine).lower().strip()\n try:\n if eng in [\"pandas\", \"c\"]:\n return cls._read(**mykwargs)\n\n if isinstance(dtype, dict):\n column_types = {c: cls._dtype_to_arrow(t) for c, t in dtype.items()}\n else:\n column_types = cls._dtype_to_arrow(dtype)\n\n if (type(parse_dates) is list) and type(column_types) is dict:\n for c in parse_dates:\n column_types[c] = pa.timestamp(\"s\")\n\n if names:\n if header == 0:\n skiprows = skiprows + 1 if skiprows is not None else 1\n elif header is None or header == \"infer\":\n pass\n else:\n raise NotImplementedError(\n \"read_csv with 'arrow' engine and provided 'names' parameter supports only 0, None and 'infer' header values\"\n )\n else:\n if header == 0 or header == \"infer\":\n pass\n else:\n raise NotImplementedError(\n \"read_csv with 'arrow' engine without 'names' parameter provided supports only 0 and 'infer' header values\"\n )\n\n if delimiter is None:\n delimiter = sep\n\n if delim_whitespace and delimiter != \",\":\n raise ValueError(\n \"Specified a delimiter and delim_whitespace=True; you can only specify one.\"\n )\n\n usecols_md = cls._prepare_pyarrow_usecols(mykwargs)\n\n po = ParseOptions(\n delimiter=\"\\\\s+\" if delim_whitespace else delimiter,\n quote_char=quotechar,\n double_quote=doublequote,\n escape_char=escapechar,\n newlines_in_values=False,\n ignore_empty_lines=skip_blank_lines,\n )\n co = ConvertOptions(\n check_utf8=None,\n column_types=column_types,\n null_values=None,\n true_values=None,\n false_values=None,\n # timestamp fields should be handled as strings if parse_dates\n # didn't passed explicitly as an array or a dict\n timestamp_parsers=[\"\"] if isinstance(parse_dates, bool) else None,\n strings_can_be_null=None,\n include_columns=usecols_md,\n include_missing_columns=None,\n auto_dict_encode=None,\n auto_dict_max_cardinality=None,\n )\n ro = ReadOptions(\n use_threads=True,\n block_size=None,\n skip_rows=skiprows,\n column_names=names,\n autogenerate_column_names=None,\n )\n\n at = read_csv(\n filepath_or_buffer,\n read_options=ro,\n parse_options=po,\n convert_options=co,\n )\n\n return cls.from_arrow(at)\n except (pa.ArrowNotImplementedError, NotImplementedError):\n if eng in [\"arrow\"]:\n raise\n\n ErrorMessage.default_to_pandas(\"`read_csv`\")\n return cls._read(**mykwargs)\n\n @classmethod\n def _dtype_to_arrow(cls, dtype):\n if dtype is None:\n return None\n tname = dtype if isinstance(dtype, str) else dtype.name\n if tname == \"category\":\n return pa.dictionary(index_type=pa.int32(), value_type=pa.string())\n elif tname == \"string\":\n return pa.string()\n else:\n return pa.from_numpy_dtype(tname)\n\n @classmethod\n def _prepare_pyarrow_usecols(cls, read_csv_kwargs):\n \"\"\"\n Define `usecols` parameter in the way pyarrow can process it.\n ----------\n read_csv_kwargs:\n read_csv function parameters.\n\n Returns\n -------\n usecols_md: list\n Redefined `usecols` parameter.\n \"\"\"\n usecols = read_csv_kwargs.get(\"usecols\", None)\n engine = read_csv_kwargs.get(\"engine\", None)\n usecols_md, usecols_names_dtypes = _validate_usecols_arg(usecols)\n if usecols_md:\n empty_pd_df = pandas.read_csv(\n **dict(\n read_csv_kwargs,\n nrows=0,\n skipfooter=0,\n usecols=None,\n engine=None if engine == \"arrow\" else engine,\n )\n )\n column_names = empty_pd_df.columns\n if usecols_names_dtypes == \"string\":\n if usecols_md.issubset(set(column_names)):\n # columns should be sorted because pandas doesn't preserve columns order\n usecols_md = [\n col_name for col_name in column_names if col_name in usecols_md\n ]\n else:\n raise NotImplementedError(\n \"values passed in the `usecols` parameter don't match columns names\"\n )\n elif usecols_names_dtypes == \"integer\":\n # columns should be sorted because pandas doesn't preserve columns order\n usecols_md = sorted(usecols_md)\n if len(column_names) < usecols_md[-1]:\n raise NotImplementedError(\n \"max usecols value is higher than the number of columns\"\n )\n usecols_md = [column_names[i] for i in usecols_md]\n elif callable(usecols_md):\n usecols_md = [\n col_name for col_name in column_names if usecols_md(col_name)\n ]\n else:\n raise NotImplementedError(\"unsupported `usecols` parameter\")\n\n return usecols_md\n", "path": "modin/experimental/engines/omnisci_on_ray/io.py"}]} | 2,469 | 790 |
gh_patches_debug_21053 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-6083 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API: filtrer les notifications par `is_read`
**Description du besoin**
Pour les besoins de l'extension, il serait intéressant de pouvoir filtrer les notifications (URL `/api/notifications`) selon leur propriété `is_read` pour ne récupérer que les non lues (les autres n'ayant pas d'intérêt pour ce cas d'usage).
**Description de la solution**
Ajouter un filtre pour `is_read` (booléen) sur l'URL `/api/notifications`
**Description des alternatives**
Pouvoir trier selon cette propriété (pour avoir les non-lues d'abord), _a minima_.
**Contexte additionnel**
Voir le code de [notifier.js#64](https://github.com/zestedesavoir/extensions-notificateurs/blob/master/Universal/notifier.js#L64) pour voir le cas d'usage en question (qui me permettrait de supprimer le `.filter()` ligne 78 tout en récupérant des notifications potentiellement anciennes mais non lues qui sont actuellement inaccessibles).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/notification/api/views.py`
Content:
```
1 import datetime
2 from django.core.cache import cache
3 from django.db.models.signals import post_delete
4 from django.db.models.signals import post_save
5 from dry_rest_permissions.generics import DRYPermissions
6 from rest_framework import filters
7 from rest_framework.generics import ListAPIView
8 from rest_framework.permissions import IsAuthenticated
9 from rest_framework_extensions.cache.decorators import cache_response
10 from rest_framework_extensions.etag.decorators import etag
11 from rest_framework_extensions.key_constructor import bits
12 from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
13
14 from zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit
15 from zds.notification.api.serializers import NotificationSerializer
16 from zds.notification.models import Notification
17
18
19 class PagingNotificationListKeyConstructor(DefaultKeyConstructor):
20 pagination = DJRF3xPaginationKeyBit()
21 search = bits.QueryParamsKeyBit(["search", "ordering", "type"])
22 list_sql_query = bits.ListSqlQueryKeyBit()
23 unique_view_id = bits.UniqueViewIdKeyBit()
24 user = bits.UserKeyBit()
25 updated_at = UpdatedAtKeyBit("api_updated_notification")
26
27
28 def change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):
29 cache.set("api_updated_notification", datetime.datetime.utcnow())
30
31
32 post_save.connect(receiver=change_api_notification_updated_at, sender=Notification)
33 post_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)
34
35
36 class NotificationListAPI(ListAPIView):
37 """
38 List of notification.
39 """
40
41 filter_backends = (filters.SearchFilter, filters.OrderingFilter)
42 search_fields = ("title",)
43 ordering_fields = (
44 "pubdate",
45 "title",
46 )
47 list_key_func = PagingNotificationListKeyConstructor()
48 serializer_class = NotificationSerializer
49 permission_classes = (
50 IsAuthenticated,
51 DRYPermissions,
52 )
53
54 @etag(list_key_func)
55 @cache_response(key_func=list_key_func)
56 def get(self, request, *args, **kwargs):
57 """
58 Lists all notifications of a user.
59 ---
60
61 parameters:
62 - name: Authorization
63 description: Bearer token to make an authenticated request.
64 required: true
65 paramType: header
66 - name: page
67 description: Restricts output to the given page number.
68 required: false
69 paramType: query
70 - name: page_size
71 description: Sets the number of notifications per page.
72 required: false
73 paramType: query
74 - name: search
75 description: Filters by title.
76 required: false
77 paramType: query
78 - name: ordering
79 description: Sorts the results. You can order by (-)pubdate or (-)title.
80 paramType: query
81 - name: type
82 description: Filters by notification type.
83 paramType: query
84 - name: subscription_type
85 description: Filters by subscription type.
86 paramType: query
87 - name: expand
88 description: Returns an object instead of an identifier representing the given field.
89 required: false
90 paramType: query
91 responseMessages:
92 - code: 401
93 message: Not Authenticated
94 - code: 404
95 message: Not Found
96 """
97 return self.list(request, *args, **kwargs)
98
99 def get_queryset(self):
100 queryset = Notification.objects.get_notifications_of(self.request.user)
101 subscription_type = self.request.query_params.get("subscription_type", None)
102 if subscription_type:
103 queryset = queryset.filter(subscription__content_type__model=subscription_type)
104 _type = self.request.query_params.get("type", None)
105 if _type:
106 queryset = queryset.filter(content_type__model=_type)
107 return queryset
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py
--- a/zds/notification/api/views.py
+++ b/zds/notification/api/views.py
@@ -84,6 +84,9 @@
- name: subscription_type
description: Filters by subscription type.
paramType: query
+ - name: is_read
+ description: Filters by read status.
+ paramType: query
- name: expand
description: Returns an object instead of an identifier representing the given field.
required: false
@@ -104,4 +107,9 @@
_type = self.request.query_params.get("type", None)
if _type:
queryset = queryset.filter(content_type__model=_type)
+ is_read = str(self.request.query_params.get("is_read", None)).lower()
+ if is_read == "true":
+ queryset = queryset.filter(is_read=True)
+ elif is_read == "false":
+ queryset = queryset.filter(is_read=False)
return queryset
| {"golden_diff": "diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py\n--- a/zds/notification/api/views.py\n+++ b/zds/notification/api/views.py\n@@ -84,6 +84,9 @@\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n+ - name: is_read\n+ description: Filters by read status.\n+ paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n@@ -104,4 +107,9 @@\n _type = self.request.query_params.get(\"type\", None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n+ is_read = str(self.request.query_params.get(\"is_read\", None)).lower()\n+ if is_read == \"true\":\n+ queryset = queryset.filter(is_read=True)\n+ elif is_read == \"false\":\n+ queryset = queryset.filter(is_read=False)\n return queryset\n", "issue": "API: filtrer les notifications par `is_read`\n**Description du besoin**\r\n\r\nPour les besoins de l'extension, il serait int\u00e9ressant de pouvoir filtrer les notifications (URL `/api/notifications`) selon leur propri\u00e9t\u00e9 `is_read` pour ne r\u00e9cup\u00e9rer que les non lues (les autres n'ayant pas d'int\u00e9r\u00eat pour ce cas d'usage).\r\n\r\n**Description de la solution**\r\n\r\nAjouter un filtre pour `is_read` (bool\u00e9en) sur l'URL `/api/notifications`\r\n\r\n**Description des alternatives**\r\n\r\nPouvoir trier selon cette propri\u00e9t\u00e9 (pour avoir les non-lues d'abord), _a minima_.\r\n\r\n**Contexte additionnel**\r\n\r\nVoir le code de [notifier.js#64](https://github.com/zestedesavoir/extensions-notificateurs/blob/master/Universal/notifier.js#L64) pour voir le cas d'usage en question (qui me permettrait de supprimer le `.filter()` ligne 78 tout en r\u00e9cup\u00e9rant des notifications potentiellement anciennes mais non lues qui sont actuellement inaccessibles).\r\n\n", "before_files": [{"content": "import datetime\nfrom django.core.cache import cache\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_extensions.cache.decorators import cache_response\nfrom rest_framework_extensions.etag.decorators import etag\nfrom rest_framework_extensions.key_constructor import bits\nfrom rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n\nfrom zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit\nfrom zds.notification.api.serializers import NotificationSerializer\nfrom zds.notification.models import Notification\n\n\nclass PagingNotificationListKeyConstructor(DefaultKeyConstructor):\n pagination = DJRF3xPaginationKeyBit()\n search = bits.QueryParamsKeyBit([\"search\", \"ordering\", \"type\"])\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n updated_at = UpdatedAtKeyBit(\"api_updated_notification\")\n\n\ndef change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):\n cache.set(\"api_updated_notification\", datetime.datetime.utcnow())\n\n\npost_save.connect(receiver=change_api_notification_updated_at, sender=Notification)\npost_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)\n\n\nclass NotificationListAPI(ListAPIView):\n \"\"\"\n List of notification.\n \"\"\"\n\n filter_backends = (filters.SearchFilter, filters.OrderingFilter)\n search_fields = (\"title\",)\n ordering_fields = (\n \"pubdate\",\n \"title\",\n )\n list_key_func = PagingNotificationListKeyConstructor()\n serializer_class = NotificationSerializer\n permission_classes = (\n IsAuthenticated,\n DRYPermissions,\n )\n\n @etag(list_key_func)\n @cache_response(key_func=list_key_func)\n def get(self, request, *args, **kwargs):\n \"\"\"\n Lists all notifications of a user.\n ---\n\n parameters:\n - name: Authorization\n description: Bearer token to make an authenticated request.\n required: true\n paramType: header\n - name: page\n description: Restricts output to the given page number.\n required: false\n paramType: query\n - name: page_size\n description: Sets the number of notifications per page.\n required: false\n paramType: query\n - name: search\n description: Filters by title.\n required: false\n paramType: query\n - name: ordering\n description: Sorts the results. You can order by (-)pubdate or (-)title.\n paramType: query\n - name: type\n description: Filters by notification type.\n paramType: query\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n paramType: query\n responseMessages:\n - code: 401\n message: Not Authenticated\n - code: 404\n message: Not Found\n \"\"\"\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Notification.objects.get_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get(\"subscription_type\", None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n _type = self.request.query_params.get(\"type\", None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n return queryset\n", "path": "zds/notification/api/views.py"}], "after_files": [{"content": "import datetime\nfrom django.core.cache import cache\nfrom django.db.models.signals import post_delete\nfrom django.db.models.signals import post_save\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_extensions.cache.decorators import cache_response\nfrom rest_framework_extensions.etag.decorators import etag\nfrom rest_framework_extensions.key_constructor import bits\nfrom rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n\nfrom zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit\nfrom zds.notification.api.serializers import NotificationSerializer\nfrom zds.notification.models import Notification\n\n\nclass PagingNotificationListKeyConstructor(DefaultKeyConstructor):\n pagination = DJRF3xPaginationKeyBit()\n search = bits.QueryParamsKeyBit([\"search\", \"ordering\", \"type\"])\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n updated_at = UpdatedAtKeyBit(\"api_updated_notification\")\n\n\ndef change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):\n cache.set(\"api_updated_notification\", datetime.datetime.utcnow())\n\n\npost_save.connect(receiver=change_api_notification_updated_at, sender=Notification)\npost_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)\n\n\nclass NotificationListAPI(ListAPIView):\n \"\"\"\n List of notification.\n \"\"\"\n\n filter_backends = (filters.SearchFilter, filters.OrderingFilter)\n search_fields = (\"title\",)\n ordering_fields = (\n \"pubdate\",\n \"title\",\n )\n list_key_func = PagingNotificationListKeyConstructor()\n serializer_class = NotificationSerializer\n permission_classes = (\n IsAuthenticated,\n DRYPermissions,\n )\n\n @etag(list_key_func)\n @cache_response(key_func=list_key_func)\n def get(self, request, *args, **kwargs):\n \"\"\"\n Lists all notifications of a user.\n ---\n\n parameters:\n - name: Authorization\n description: Bearer token to make an authenticated request.\n required: true\n paramType: header\n - name: page\n description: Restricts output to the given page number.\n required: false\n paramType: query\n - name: page_size\n description: Sets the number of notifications per page.\n required: false\n paramType: query\n - name: search\n description: Filters by title.\n required: false\n paramType: query\n - name: ordering\n description: Sorts the results. You can order by (-)pubdate or (-)title.\n paramType: query\n - name: type\n description: Filters by notification type.\n paramType: query\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n - name: is_read\n description: Filters by read status.\n paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n paramType: query\n responseMessages:\n - code: 401\n message: Not Authenticated\n - code: 404\n message: Not Found\n \"\"\"\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Notification.objects.get_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get(\"subscription_type\", None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n _type = self.request.query_params.get(\"type\", None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n is_read = str(self.request.query_params.get(\"is_read\", None)).lower()\n if is_read == \"true\":\n queryset = queryset.filter(is_read=True)\n elif is_read == \"false\":\n queryset = queryset.filter(is_read=False)\n return queryset\n", "path": "zds/notification/api/views.py"}]} | 1,503 | 225 |
gh_patches_debug_16446 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2207 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle print() statements for inline scripts
print() statements in inline scripts should be suppressed, and produce into ctx.log.warn() calls instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/script.py`
Content:
```
1 import contextlib
2 import os
3 import shlex
4 import sys
5 import threading
6 import traceback
7 import types
8
9 from mitmproxy import exceptions
10 from mitmproxy import ctx
11 from mitmproxy import eventsequence
12
13
14 import watchdog.events
15 from watchdog.observers import polling
16
17
18 def parse_command(command):
19 """
20 Returns a (path, args) tuple.
21 """
22 if not command or not command.strip():
23 raise ValueError("Empty script command.")
24 # Windows: escape all backslashes in the path.
25 if os.name == "nt": # pragma: no cover
26 backslashes = shlex.split(command, posix=False)[0].count("\\")
27 command = command.replace("\\", "\\\\", backslashes)
28 args = shlex.split(command) # pragma: no cover
29 args[0] = os.path.expanduser(args[0])
30 if not os.path.exists(args[0]):
31 raise ValueError(
32 ("Script file not found: %s.\r\n"
33 "If your script path contains spaces, "
34 "make sure to wrap it in additional quotes, e.g. -s \"'./foo bar/baz.py' --args\".") %
35 args[0])
36 elif os.path.isdir(args[0]):
37 raise ValueError("Not a file: %s" % args[0])
38 return args[0], args[1:]
39
40
41 def cut_traceback(tb, func_name):
42 """
43 Cut off a traceback at the function with the given name.
44 The func_name's frame is excluded.
45
46 Args:
47 tb: traceback object, as returned by sys.exc_info()[2]
48 func_name: function name
49
50 Returns:
51 Reduced traceback.
52 """
53 tb_orig = tb
54
55 for _, _, fname, _ in traceback.extract_tb(tb):
56 tb = tb.tb_next
57 if fname == func_name:
58 break
59
60 if tb is None:
61 # We could not find the method, take the full stack trace.
62 # This may happen on some Python interpreters/flavors (e.g. PyInstaller).
63 return tb_orig
64 else:
65 return tb
66
67
68 @contextlib.contextmanager
69 def scriptenv(path, args):
70 oldargs = sys.argv
71 sys.argv = [path] + args
72 script_dir = os.path.dirname(os.path.abspath(path))
73 sys.path.append(script_dir)
74 try:
75 yield
76 except SystemExit as v:
77 ctx.log.error("Script exited with code %s" % v.code)
78 except Exception:
79 etype, value, tb = sys.exc_info()
80 tb = cut_traceback(tb, "scriptenv").tb_next
81 ctx.log.error(
82 "Script error: %s" % "".join(
83 traceback.format_exception(etype, value, tb)
84 )
85 )
86 finally:
87 sys.argv = oldargs
88 sys.path.pop()
89
90
91 def load_script(path, args):
92 with open(path, "rb") as f:
93 try:
94 code = compile(f.read(), path, 'exec')
95 except SyntaxError as e:
96 ctx.log.error(
97 "Script error: %s line %s: %s" % (
98 e.filename, e.lineno, e.msg
99 )
100 )
101 return
102 ns = {'__file__': os.path.abspath(path)}
103 with scriptenv(path, args):
104 exec(code, ns)
105 return types.SimpleNamespace(**ns)
106
107
108 class ReloadHandler(watchdog.events.FileSystemEventHandler):
109 def __init__(self, callback):
110 self.callback = callback
111
112 def filter(self, event):
113 """
114 Returns True only when .py file is changed
115 """
116 if event.is_directory:
117 return False
118 if os.path.basename(event.src_path).startswith("."):
119 return False
120 if event.src_path.endswith(".py"):
121 return True
122 return False
123
124 def on_modified(self, event):
125 if self.filter(event):
126 self.callback()
127
128 def on_created(self, event):
129 if self.filter(event):
130 self.callback()
131
132
133 class Script:
134 """
135 An addon that manages a single script.
136 """
137 def __init__(self, command):
138 self.name = command
139
140 self.command = command
141 self.path, self.args = parse_command(command)
142 self.ns = None
143 self.observer = None
144 self.dead = False
145
146 self.last_options = None
147 self.should_reload = threading.Event()
148
149 for i in eventsequence.Events:
150 if not hasattr(self, i):
151 def mkprox():
152 evt = i
153
154 def prox(*args, **kwargs):
155 self.run(evt, *args, **kwargs)
156 return prox
157 setattr(self, i, mkprox())
158
159 def run(self, name, *args, **kwargs):
160 # It's possible for ns to be un-initialised if we failed during
161 # configure
162 if self.ns is not None and not self.dead:
163 func = getattr(self.ns, name, None)
164 if func:
165 with scriptenv(self.path, self.args):
166 return func(*args, **kwargs)
167
168 def reload(self):
169 self.should_reload.set()
170
171 def load_script(self):
172 self.ns = load_script(self.path, self.args)
173 ret = self.run("start", self.last_options)
174 if ret:
175 self.ns = ret
176 self.run("start", self.last_options)
177
178 def tick(self):
179 if self.should_reload.is_set():
180 self.should_reload.clear()
181 ctx.log.info("Reloading script: %s" % self.name)
182 self.ns = load_script(self.path, self.args)
183 self.start(self.last_options)
184 self.configure(self.last_options, self.last_options.keys())
185 else:
186 self.run("tick")
187
188 def start(self, opts):
189 self.last_options = opts
190 self.load_script()
191
192 def configure(self, options, updated):
193 self.last_options = options
194 if not self.observer:
195 self.observer = polling.PollingObserver()
196 # Bind the handler to the real underlying master object
197 self.observer.schedule(
198 ReloadHandler(self.reload),
199 os.path.dirname(self.path) or "."
200 )
201 self.observer.start()
202 self.run("configure", options, updated)
203
204 def done(self):
205 self.run("done")
206 self.dead = True
207
208
209 class ScriptLoader:
210 """
211 An addon that manages loading scripts from options.
212 """
213 def __init__(self):
214 self.is_running = False
215
216 def running(self):
217 self.is_running = True
218
219 def run_once(self, command, flows):
220 try:
221 sc = Script(command)
222 except ValueError as e:
223 raise ValueError(str(e))
224 sc.load_script()
225 for f in flows:
226 for evt, o in eventsequence.iterate(f):
227 sc.run(evt, o)
228 sc.done()
229 return sc
230
231 def configure(self, options, updated):
232 if "scripts" in updated:
233 for s in options.scripts:
234 if options.scripts.count(s) > 1:
235 raise exceptions.OptionsError("Duplicate script: %s" % s)
236
237 for a in ctx.master.addons.chain[:]:
238 if isinstance(a, Script) and a.name not in options.scripts:
239 ctx.log.info("Un-loading script: %s" % a.name)
240 ctx.master.addons.remove(a)
241
242 # The machinations below are to ensure that:
243 # - Scripts remain in the same order
244 # - Scripts are listed directly after the script addon. This is
245 # needed to ensure that interactions with, for instance, flow
246 # serialization remains correct.
247 # - Scripts are not initialized un-necessarily. If only a
248 # script's order in the script list has changed, it should simply
249 # be moved.
250
251 current = {}
252 for a in ctx.master.addons.chain[:]:
253 if isinstance(a, Script):
254 current[a.name] = a
255 ctx.master.addons.chain.remove(a)
256
257 ordered = []
258 newscripts = []
259 for s in options.scripts:
260 if s in current:
261 ordered.append(current[s])
262 else:
263 ctx.log.info("Loading script: %s" % s)
264 try:
265 sc = Script(s)
266 except ValueError as e:
267 raise exceptions.OptionsError(str(e))
268 ordered.append(sc)
269 newscripts.append(sc)
270
271 ochain = ctx.master.addons.chain
272 pos = ochain.index(self)
273 ctx.master.addons.chain = ochain[:pos + 1] + ordered + ochain[pos + 1:]
274
275 for s in newscripts:
276 ctx.master.addons.invoke_addon(s, "start", options)
277 if self.is_running:
278 # If we're already running, we configure and tell the addon
279 # we're up and running.
280 ctx.master.addons.invoke_addon(
281 s, "configure", options, options.keys()
282 )
283 ctx.master.addons.invoke_addon(s, "running")
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py
--- a/mitmproxy/addons/script.py
+++ b/mitmproxy/addons/script.py
@@ -65,14 +65,28 @@
return tb
+class StreamLog:
+ """
+ A class for redirecting output using contextlib.
+ """
+ def __init__(self, log):
+ self.log = log
+
+ def write(self, buf):
+ if buf.strip():
+ self.log(buf)
+
+
@contextlib.contextmanager
def scriptenv(path, args):
oldargs = sys.argv
sys.argv = [path] + args
script_dir = os.path.dirname(os.path.abspath(path))
sys.path.append(script_dir)
+ stdout_replacement = StreamLog(ctx.log.warn)
try:
- yield
+ with contextlib.redirect_stdout(stdout_replacement):
+ yield
except SystemExit as v:
ctx.log.error("Script exited with code %s" % v.code)
except Exception:
| {"golden_diff": "diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py\n--- a/mitmproxy/addons/script.py\n+++ b/mitmproxy/addons/script.py\n@@ -65,14 +65,28 @@\n return tb\n \n \n+class StreamLog:\n+ \"\"\"\n+ A class for redirecting output using contextlib.\n+ \"\"\"\n+ def __init__(self, log):\n+ self.log = log\n+\n+ def write(self, buf):\n+ if buf.strip():\n+ self.log(buf)\n+\n+\n @contextlib.contextmanager\n def scriptenv(path, args):\n oldargs = sys.argv\n sys.argv = [path] + args\n script_dir = os.path.dirname(os.path.abspath(path))\n sys.path.append(script_dir)\n+ stdout_replacement = StreamLog(ctx.log.warn)\n try:\n- yield\n+ with contextlib.redirect_stdout(stdout_replacement):\n+ yield\n except SystemExit as v:\n ctx.log.error(\"Script exited with code %s\" % v.code)\n except Exception:\n", "issue": "Handle print() statements for inline scripts\nprint() statements in inline scripts should be suppressed, and produce into ctx.log.warn() calls instead. \n\n", "before_files": [{"content": "import contextlib\nimport os\nimport shlex\nimport sys\nimport threading\nimport traceback\nimport types\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import ctx\nfrom mitmproxy import eventsequence\n\n\nimport watchdog.events\nfrom watchdog.observers import polling\n\n\ndef parse_command(command):\n \"\"\"\n Returns a (path, args) tuple.\n \"\"\"\n if not command or not command.strip():\n raise ValueError(\"Empty script command.\")\n # Windows: escape all backslashes in the path.\n if os.name == \"nt\": # pragma: no cover\n backslashes = shlex.split(command, posix=False)[0].count(\"\\\\\")\n command = command.replace(\"\\\\\", \"\\\\\\\\\", backslashes)\n args = shlex.split(command) # pragma: no cover\n args[0] = os.path.expanduser(args[0])\n if not os.path.exists(args[0]):\n raise ValueError(\n (\"Script file not found: %s.\\r\\n\"\n \"If your script path contains spaces, \"\n \"make sure to wrap it in additional quotes, e.g. -s \\\"'./foo bar/baz.py' --args\\\".\") %\n args[0])\n elif os.path.isdir(args[0]):\n raise ValueError(\"Not a file: %s\" % args[0])\n return args[0], args[1:]\n\n\ndef cut_traceback(tb, func_name):\n \"\"\"\n Cut off a traceback at the function with the given name.\n The func_name's frame is excluded.\n\n Args:\n tb: traceback object, as returned by sys.exc_info()[2]\n func_name: function name\n\n Returns:\n Reduced traceback.\n \"\"\"\n tb_orig = tb\n\n for _, _, fname, _ in traceback.extract_tb(tb):\n tb = tb.tb_next\n if fname == func_name:\n break\n\n if tb is None:\n # We could not find the method, take the full stack trace.\n # This may happen on some Python interpreters/flavors (e.g. PyInstaller).\n return tb_orig\n else:\n return tb\n\n\[email protected]\ndef scriptenv(path, args):\n oldargs = sys.argv\n sys.argv = [path] + args\n script_dir = os.path.dirname(os.path.abspath(path))\n sys.path.append(script_dir)\n try:\n yield\n except SystemExit as v:\n ctx.log.error(\"Script exited with code %s\" % v.code)\n except Exception:\n etype, value, tb = sys.exc_info()\n tb = cut_traceback(tb, \"scriptenv\").tb_next\n ctx.log.error(\n \"Script error: %s\" % \"\".join(\n traceback.format_exception(etype, value, tb)\n )\n )\n finally:\n sys.argv = oldargs\n sys.path.pop()\n\n\ndef load_script(path, args):\n with open(path, \"rb\") as f:\n try:\n code = compile(f.read(), path, 'exec')\n except SyntaxError as e:\n ctx.log.error(\n \"Script error: %s line %s: %s\" % (\n e.filename, e.lineno, e.msg\n )\n )\n return\n ns = {'__file__': os.path.abspath(path)}\n with scriptenv(path, args):\n exec(code, ns)\n return types.SimpleNamespace(**ns)\n\n\nclass ReloadHandler(watchdog.events.FileSystemEventHandler):\n def __init__(self, callback):\n self.callback = callback\n\n def filter(self, event):\n \"\"\"\n Returns True only when .py file is changed\n \"\"\"\n if event.is_directory:\n return False\n if os.path.basename(event.src_path).startswith(\".\"):\n return False\n if event.src_path.endswith(\".py\"):\n return True\n return False\n\n def on_modified(self, event):\n if self.filter(event):\n self.callback()\n\n def on_created(self, event):\n if self.filter(event):\n self.callback()\n\n\nclass Script:\n \"\"\"\n An addon that manages a single script.\n \"\"\"\n def __init__(self, command):\n self.name = command\n\n self.command = command\n self.path, self.args = parse_command(command)\n self.ns = None\n self.observer = None\n self.dead = False\n\n self.last_options = None\n self.should_reload = threading.Event()\n\n for i in eventsequence.Events:\n if not hasattr(self, i):\n def mkprox():\n evt = i\n\n def prox(*args, **kwargs):\n self.run(evt, *args, **kwargs)\n return prox\n setattr(self, i, mkprox())\n\n def run(self, name, *args, **kwargs):\n # It's possible for ns to be un-initialised if we failed during\n # configure\n if self.ns is not None and not self.dead:\n func = getattr(self.ns, name, None)\n if func:\n with scriptenv(self.path, self.args):\n return func(*args, **kwargs)\n\n def reload(self):\n self.should_reload.set()\n\n def load_script(self):\n self.ns = load_script(self.path, self.args)\n ret = self.run(\"start\", self.last_options)\n if ret:\n self.ns = ret\n self.run(\"start\", self.last_options)\n\n def tick(self):\n if self.should_reload.is_set():\n self.should_reload.clear()\n ctx.log.info(\"Reloading script: %s\" % self.name)\n self.ns = load_script(self.path, self.args)\n self.start(self.last_options)\n self.configure(self.last_options, self.last_options.keys())\n else:\n self.run(\"tick\")\n\n def start(self, opts):\n self.last_options = opts\n self.load_script()\n\n def configure(self, options, updated):\n self.last_options = options\n if not self.observer:\n self.observer = polling.PollingObserver()\n # Bind the handler to the real underlying master object\n self.observer.schedule(\n ReloadHandler(self.reload),\n os.path.dirname(self.path) or \".\"\n )\n self.observer.start()\n self.run(\"configure\", options, updated)\n\n def done(self):\n self.run(\"done\")\n self.dead = True\n\n\nclass ScriptLoader:\n \"\"\"\n An addon that manages loading scripts from options.\n \"\"\"\n def __init__(self):\n self.is_running = False\n\n def running(self):\n self.is_running = True\n\n def run_once(self, command, flows):\n try:\n sc = Script(command)\n except ValueError as e:\n raise ValueError(str(e))\n sc.load_script()\n for f in flows:\n for evt, o in eventsequence.iterate(f):\n sc.run(evt, o)\n sc.done()\n return sc\n\n def configure(self, options, updated):\n if \"scripts\" in updated:\n for s in options.scripts:\n if options.scripts.count(s) > 1:\n raise exceptions.OptionsError(\"Duplicate script: %s\" % s)\n\n for a in ctx.master.addons.chain[:]:\n if isinstance(a, Script) and a.name not in options.scripts:\n ctx.log.info(\"Un-loading script: %s\" % a.name)\n ctx.master.addons.remove(a)\n\n # The machinations below are to ensure that:\n # - Scripts remain in the same order\n # - Scripts are listed directly after the script addon. This is\n # needed to ensure that interactions with, for instance, flow\n # serialization remains correct.\n # - Scripts are not initialized un-necessarily. If only a\n # script's order in the script list has changed, it should simply\n # be moved.\n\n current = {}\n for a in ctx.master.addons.chain[:]:\n if isinstance(a, Script):\n current[a.name] = a\n ctx.master.addons.chain.remove(a)\n\n ordered = []\n newscripts = []\n for s in options.scripts:\n if s in current:\n ordered.append(current[s])\n else:\n ctx.log.info(\"Loading script: %s\" % s)\n try:\n sc = Script(s)\n except ValueError as e:\n raise exceptions.OptionsError(str(e))\n ordered.append(sc)\n newscripts.append(sc)\n\n ochain = ctx.master.addons.chain\n pos = ochain.index(self)\n ctx.master.addons.chain = ochain[:pos + 1] + ordered + ochain[pos + 1:]\n\n for s in newscripts:\n ctx.master.addons.invoke_addon(s, \"start\", options)\n if self.is_running:\n # If we're already running, we configure and tell the addon\n # we're up and running.\n ctx.master.addons.invoke_addon(\n s, \"configure\", options, options.keys()\n )\n ctx.master.addons.invoke_addon(s, \"running\")\n", "path": "mitmproxy/addons/script.py"}], "after_files": [{"content": "import contextlib\nimport os\nimport shlex\nimport sys\nimport threading\nimport traceback\nimport types\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import ctx\nfrom mitmproxy import eventsequence\n\n\nimport watchdog.events\nfrom watchdog.observers import polling\n\n\ndef parse_command(command):\n \"\"\"\n Returns a (path, args) tuple.\n \"\"\"\n if not command or not command.strip():\n raise ValueError(\"Empty script command.\")\n # Windows: escape all backslashes in the path.\n if os.name == \"nt\": # pragma: no cover\n backslashes = shlex.split(command, posix=False)[0].count(\"\\\\\")\n command = command.replace(\"\\\\\", \"\\\\\\\\\", backslashes)\n args = shlex.split(command) # pragma: no cover\n args[0] = os.path.expanduser(args[0])\n if not os.path.exists(args[0]):\n raise ValueError(\n (\"Script file not found: %s.\\r\\n\"\n \"If your script path contains spaces, \"\n \"make sure to wrap it in additional quotes, e.g. -s \\\"'./foo bar/baz.py' --args\\\".\") %\n args[0])\n elif os.path.isdir(args[0]):\n raise ValueError(\"Not a file: %s\" % args[0])\n return args[0], args[1:]\n\n\ndef cut_traceback(tb, func_name):\n \"\"\"\n Cut off a traceback at the function with the given name.\n The func_name's frame is excluded.\n\n Args:\n tb: traceback object, as returned by sys.exc_info()[2]\n func_name: function name\n\n Returns:\n Reduced traceback.\n \"\"\"\n tb_orig = tb\n\n for _, _, fname, _ in traceback.extract_tb(tb):\n tb = tb.tb_next\n if fname == func_name:\n break\n\n if tb is None:\n # We could not find the method, take the full stack trace.\n # This may happen on some Python interpreters/flavors (e.g. PyInstaller).\n return tb_orig\n else:\n return tb\n\n\nclass StreamLog:\n \"\"\"\n A class for redirecting output using contextlib.\n \"\"\"\n def __init__(self, log):\n self.log = log\n\n def write(self, buf):\n if buf.strip():\n self.log(buf)\n\n\[email protected]\ndef scriptenv(path, args):\n oldargs = sys.argv\n sys.argv = [path] + args\n script_dir = os.path.dirname(os.path.abspath(path))\n sys.path.append(script_dir)\n stdout_replacement = StreamLog(ctx.log.warn)\n try:\n with contextlib.redirect_stdout(stdout_replacement):\n yield\n except SystemExit as v:\n ctx.log.error(\"Script exited with code %s\" % v.code)\n except Exception:\n etype, value, tb = sys.exc_info()\n tb = cut_traceback(tb, \"scriptenv\").tb_next\n ctx.log.error(\n \"Script error: %s\" % \"\".join(\n traceback.format_exception(etype, value, tb)\n )\n )\n finally:\n sys.argv = oldargs\n sys.path.pop()\n\n\ndef load_script(path, args):\n with open(path, \"rb\") as f:\n try:\n code = compile(f.read(), path, 'exec')\n except SyntaxError as e:\n ctx.log.error(\n \"Script error: %s line %s: %s\" % (\n e.filename, e.lineno, e.msg\n )\n )\n return\n ns = {'__file__': os.path.abspath(path)}\n with scriptenv(path, args):\n exec(code, ns)\n return types.SimpleNamespace(**ns)\n\n\nclass ReloadHandler(watchdog.events.FileSystemEventHandler):\n def __init__(self, callback):\n self.callback = callback\n\n def filter(self, event):\n \"\"\"\n Returns True only when .py file is changed\n \"\"\"\n if event.is_directory:\n return False\n if os.path.basename(event.src_path).startswith(\".\"):\n return False\n if event.src_path.endswith(\".py\"):\n return True\n return False\n\n def on_modified(self, event):\n if self.filter(event):\n self.callback()\n\n def on_created(self, event):\n if self.filter(event):\n self.callback()\n\n\nclass Script:\n \"\"\"\n An addon that manages a single script.\n \"\"\"\n def __init__(self, command):\n self.name = command\n\n self.command = command\n self.path, self.args = parse_command(command)\n self.ns = None\n self.observer = None\n self.dead = False\n\n self.last_options = None\n self.should_reload = threading.Event()\n\n for i in eventsequence.Events:\n if not hasattr(self, i):\n def mkprox():\n evt = i\n\n def prox(*args, **kwargs):\n self.run(evt, *args, **kwargs)\n return prox\n setattr(self, i, mkprox())\n\n def run(self, name, *args, **kwargs):\n # It's possible for ns to be un-initialised if we failed during\n # configure\n if self.ns is not None and not self.dead:\n func = getattr(self.ns, name, None)\n if func:\n with scriptenv(self.path, self.args):\n return func(*args, **kwargs)\n\n def reload(self):\n self.should_reload.set()\n\n def load_script(self):\n self.ns = load_script(self.path, self.args)\n ret = self.run(\"start\", self.last_options)\n if ret:\n self.ns = ret\n self.run(\"start\", self.last_options)\n\n def tick(self):\n if self.should_reload.is_set():\n self.should_reload.clear()\n ctx.log.info(\"Reloading script: %s\" % self.name)\n self.ns = load_script(self.path, self.args)\n self.start(self.last_options)\n self.configure(self.last_options, self.last_options.keys())\n else:\n self.run(\"tick\")\n\n def start(self, opts):\n self.last_options = opts\n self.load_script()\n\n def configure(self, options, updated):\n self.last_options = options\n if not self.observer:\n self.observer = polling.PollingObserver()\n # Bind the handler to the real underlying master object\n self.observer.schedule(\n ReloadHandler(self.reload),\n os.path.dirname(self.path) or \".\"\n )\n self.observer.start()\n self.run(\"configure\", options, updated)\n\n def done(self):\n self.run(\"done\")\n self.dead = True\n\n\nclass ScriptLoader:\n \"\"\"\n An addon that manages loading scripts from options.\n \"\"\"\n def __init__(self):\n self.is_running = False\n\n def running(self):\n self.is_running = True\n\n def run_once(self, command, flows):\n try:\n sc = Script(command)\n except ValueError as e:\n raise ValueError(str(e))\n sc.load_script()\n for f in flows:\n for evt, o in eventsequence.iterate(f):\n sc.run(evt, o)\n sc.done()\n return sc\n\n def configure(self, options, updated):\n if \"scripts\" in updated:\n for s in options.scripts:\n if options.scripts.count(s) > 1:\n raise exceptions.OptionsError(\"Duplicate script: %s\" % s)\n\n for a in ctx.master.addons.chain[:]:\n if isinstance(a, Script) and a.name not in options.scripts:\n ctx.log.info(\"Un-loading script: %s\" % a.name)\n ctx.master.addons.remove(a)\n\n # The machinations below are to ensure that:\n # - Scripts remain in the same order\n # - Scripts are listed directly after the script addon. This is\n # needed to ensure that interactions with, for instance, flow\n # serialization remains correct.\n # - Scripts are not initialized un-necessarily. If only a\n # script's order in the script list has changed, it should simply\n # be moved.\n\n current = {}\n for a in ctx.master.addons.chain[:]:\n if isinstance(a, Script):\n current[a.name] = a\n ctx.master.addons.chain.remove(a)\n\n ordered = []\n newscripts = []\n for s in options.scripts:\n if s in current:\n ordered.append(current[s])\n else:\n ctx.log.info(\"Loading script: %s\" % s)\n try:\n sc = Script(s)\n except ValueError as e:\n raise exceptions.OptionsError(str(e))\n ordered.append(sc)\n newscripts.append(sc)\n\n ochain = ctx.master.addons.chain\n pos = ochain.index(self)\n ctx.master.addons.chain = ochain[:pos + 1] + ordered + ochain[pos + 1:]\n\n for s in newscripts:\n ctx.master.addons.invoke_addon(s, \"start\", options)\n if self.is_running:\n # If we're already running, we configure and tell the addon\n # we're up and running.\n ctx.master.addons.invoke_addon(\n s, \"configure\", options, options.keys()\n )\n ctx.master.addons.invoke_addon(s, \"running\")\n", "path": "mitmproxy/addons/script.py"}]} | 2,957 | 232 |
gh_patches_debug_43858 | rasdani/github-patches | git_diff | litestar-org__litestar-3293 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: route not recognized in schemas for specific types
### Description
Currently some parameters types don't show in the docs. It seems `int` works fine, while `str` and `uuid` don't.
This is silent, no logs are shown, running `debug` mode. Same behavior for both `/schema/elements` and `/schema/swagger`.
### URL to code causing the issue
_No response_
### MCVE
```python
(Uncomment one at a time)
class BugRoutes(Controller):
tags = ["Bugs"]
path = "/bugs"
dependencies = dict(context=Provide(route_context))
@routes.get()
def get_bugs(self, context: AppContext) -> Response:
return Response({})
@routes.post()
def create_bug(self, context: AppContext, data: Any) -> Response:
return Response({})
# This works
# @routes.get("/{param:int}")
# def get_bug(self, context: AppContext, param: int) -> Response:
# return Response({})
# This doesn't work (not showing on docs)
# @routes.get("/{param:str}")
# def get_bug_str(self, context: AppContext, param: str) -> Response:
# return Response({})
# This doesn't work (not showing on docs)
# @routes.get("/{param:uuid}")
# def get_bug_uuid(self, context: AppContext, param: UUID) -> Response:
# return Response({})
@routes.patch("/{param:int}")
def update_bug(self, context: AppContext, param: int) -> Response:
return Response({})
@routes.delete("/{param:int}")
def delete_bug(self, context: AppContext, param: int) -> None:
return Response({})
```
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.3.2
### Platform
- [X] Mac
- [ ] Linux
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2700">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2700/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2700/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/_openapi/path_item.py`
Content:
```
1 from __future__ import annotations
2
3 from inspect import cleandoc
4 from typing import TYPE_CHECKING
5
6 from litestar._openapi.parameters import create_parameters_for_handler
7 from litestar._openapi.request_body import create_request_body
8 from litestar._openapi.responses import create_responses_for_handler
9 from litestar._openapi.utils import SEPARATORS_CLEANUP_PATTERN
10 from litestar.enums import HttpMethod
11 from litestar.openapi.spec import Operation, PathItem
12 from litestar.utils.helpers import unwrap_partial
13
14 if TYPE_CHECKING:
15 from litestar._openapi.datastructures import OpenAPIContext
16 from litestar.handlers.http_handlers import HTTPRouteHandler
17 from litestar.routes import HTTPRoute
18
19 __all__ = ("create_path_item_for_route",)
20
21
22 class PathItemFactory:
23 """Factory for creating a PathItem instance for a given route."""
24
25 def __init__(self, openapi_context: OpenAPIContext, route: HTTPRoute) -> None:
26 self.context = openapi_context
27 self.route = route
28 self._path_item = PathItem()
29
30 def create_path_item(self) -> PathItem:
31 """Create a PathItem for the given route parsing all http_methods into Operation Models.
32
33 Returns:
34 A PathItem instance.
35 """
36 for http_method, handler_tuple in self.route.route_handler_map.items():
37 route_handler, _ = handler_tuple
38
39 if not route_handler.resolve_include_in_schema():
40 continue
41
42 operation = self.create_operation_for_handler_method(route_handler, HttpMethod(http_method))
43
44 setattr(self._path_item, http_method.lower(), operation)
45
46 return self._path_item
47
48 def create_operation_for_handler_method(
49 self, route_handler: HTTPRouteHandler, http_method: HttpMethod
50 ) -> Operation:
51 """Create an Operation instance for a given route handler and http method.
52
53 Args:
54 route_handler: A route handler instance.
55 http_method: An HttpMethod enum value.
56
57 Returns:
58 An Operation instance.
59 """
60 operation_id = self.create_operation_id(route_handler, http_method)
61 parameters = create_parameters_for_handler(self.context, route_handler, self.route.path_parameters)
62 signature_fields = route_handler.parsed_fn_signature.parameters
63
64 request_body = None
65 if data_field := signature_fields.get("data"):
66 request_body = create_request_body(
67 self.context, route_handler.handler_id, route_handler.resolve_data_dto(), data_field
68 )
69
70 raises_validation_error = bool(data_field or self._path_item.parameters or parameters)
71 responses = create_responses_for_handler(
72 self.context, route_handler, raises_validation_error=raises_validation_error
73 )
74
75 return route_handler.operation_class(
76 operation_id=operation_id,
77 tags=route_handler.resolve_tags() or None,
78 summary=route_handler.summary or SEPARATORS_CLEANUP_PATTERN.sub("", route_handler.handler_name.title()),
79 description=self.create_description_for_handler(route_handler),
80 deprecated=route_handler.deprecated,
81 responses=responses,
82 request_body=request_body,
83 parameters=parameters or None, # type: ignore[arg-type]
84 security=route_handler.resolve_security() or None,
85 )
86
87 def create_operation_id(self, route_handler: HTTPRouteHandler, http_method: HttpMethod) -> str:
88 """Create an operation id for a given route handler and http method.
89
90 Adds the operation id to the context's operation id set, where it is checked for uniqueness.
91
92 Args:
93 route_handler: A route handler instance.
94 http_method: An HttpMethod enum value.
95
96 Returns:
97 An operation id string.
98 """
99 if isinstance(route_handler.operation_id, str):
100 operation_id = route_handler.operation_id
101 elif callable(route_handler.operation_id):
102 operation_id = route_handler.operation_id(route_handler, http_method, self.route.path_components)
103 else:
104 operation_id = self.context.openapi_config.operation_id_creator(
105 route_handler, http_method, self.route.path_components
106 )
107 self.context.add_operation_id(operation_id)
108 return operation_id
109
110 def create_description_for_handler(self, route_handler: HTTPRouteHandler) -> str | None:
111 """Produce the operation description for a route handler.
112
113 Args:
114 route_handler: A route handler instance.
115
116 Returns:
117 An optional description string
118 """
119 handler_description = route_handler.description
120 if handler_description is None and self.context.openapi_config.use_handler_docstrings:
121 fn = unwrap_partial(route_handler.fn)
122 return cleandoc(fn.__doc__) if fn.__doc__ else None
123 return handler_description
124
125
126 def create_path_item_for_route(openapi_context: OpenAPIContext, route: HTTPRoute) -> PathItem:
127 """Create a PathItem for the given route parsing all http_methods into Operation Models.
128
129 Args:
130 openapi_context: The OpenAPIContext instance.
131 route: The route to create a PathItem for.
132
133 Returns:
134 A PathItem instance.
135 """
136 path_item_factory = PathItemFactory(openapi_context, route)
137 return path_item_factory.create_path_item()
138
```
Path: `litestar/_openapi/plugin.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from litestar._openapi.datastructures import OpenAPIContext
6 from litestar._openapi.path_item import create_path_item_for_route
7 from litestar.exceptions import ImproperlyConfiguredException
8 from litestar.plugins import InitPluginProtocol
9 from litestar.plugins.base import ReceiveRoutePlugin
10 from litestar.routes import HTTPRoute
11
12 if TYPE_CHECKING:
13 from litestar.app import Litestar
14 from litestar.config.app import AppConfig
15 from litestar.openapi.config import OpenAPIConfig
16 from litestar.openapi.spec import OpenAPI
17 from litestar.routes import BaseRoute
18
19
20 class OpenAPIPlugin(InitPluginProtocol, ReceiveRoutePlugin):
21 __slots__ = (
22 "app",
23 "included_routes",
24 "_openapi_config",
25 "_openapi_schema",
26 )
27
28 def __init__(self, app: Litestar) -> None:
29 self.app = app
30 self.included_routes: dict[str, HTTPRoute] = {}
31 self._openapi_config: OpenAPIConfig | None = None
32 self._openapi_schema: OpenAPI | None = None
33
34 def _build_openapi_schema(self) -> OpenAPI:
35 openapi_config = self.openapi_config
36
37 if openapi_config.create_examples:
38 from litestar._openapi.schema_generation.examples import ExampleFactory
39
40 ExampleFactory.seed_random(openapi_config.random_seed)
41
42 openapi = openapi_config.to_openapi_schema()
43 context = OpenAPIContext(openapi_config=openapi_config, plugins=self.app.plugins.openapi)
44 openapi.paths = {
45 route.path_format or "/": create_path_item_for_route(context, route)
46 for route in self.included_routes.values()
47 }
48 openapi.components.schemas = context.schema_registry.generate_components_schemas()
49 return openapi
50
51 def provide_openapi(self) -> OpenAPI:
52 if not self._openapi_schema:
53 self._openapi_schema = self._build_openapi_schema()
54 return self._openapi_schema
55
56 def on_app_init(self, app_config: AppConfig) -> AppConfig:
57 if app_config.openapi_config:
58 self._openapi_config = app_config.openapi_config
59 app_config.route_handlers.append(self.openapi_config.openapi_controller)
60 return app_config
61
62 @property
63 def openapi_config(self) -> OpenAPIConfig:
64 if not self._openapi_config:
65 raise ImproperlyConfiguredException("OpenAPIConfig not initialized")
66 return self._openapi_config
67
68 def receive_route(self, route: BaseRoute) -> None:
69 if not isinstance(route, HTTPRoute):
70 return
71
72 if any(route_handler.resolve_include_in_schema() for route_handler, _ in route.route_handler_map.values()):
73 # Force recompute the schema if a new route is added
74 self._openapi_schema = None
75 self.included_routes[route.path] = route
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/_openapi/path_item.py b/litestar/_openapi/path_item.py
--- a/litestar/_openapi/path_item.py
+++ b/litestar/_openapi/path_item.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import dataclasses
from inspect import cleandoc
from typing import TYPE_CHECKING
@@ -8,6 +9,7 @@
from litestar._openapi.responses import create_responses_for_handler
from litestar._openapi.utils import SEPARATORS_CLEANUP_PATTERN
from litestar.enums import HttpMethod
+from litestar.exceptions import ImproperlyConfiguredException
from litestar.openapi.spec import Operation, PathItem
from litestar.utils.helpers import unwrap_partial
@@ -16,7 +18,7 @@
from litestar.handlers.http_handlers import HTTPRouteHandler
from litestar.routes import HTTPRoute
-__all__ = ("create_path_item_for_route",)
+__all__ = ("create_path_item_for_route", "merge_path_item_operations")
class PathItemFactory:
@@ -135,3 +137,32 @@
"""
path_item_factory = PathItemFactory(openapi_context, route)
return path_item_factory.create_path_item()
+
+
+def merge_path_item_operations(source: PathItem, other: PathItem, for_path: str) -> PathItem:
+ """Merge operations from path items, creating a new path item that includes
+ operations from both.
+ """
+ attrs_to_merge = {"get", "put", "post", "delete", "options", "head", "patch", "trace"}
+ fields = {f.name for f in dataclasses.fields(PathItem)} - attrs_to_merge
+ if any(getattr(source, attr) and getattr(other, attr) for attr in attrs_to_merge):
+ raise ValueError("Cannot merge operation for PathItem if operation is set on both items")
+
+ if differing_values := [
+ (value_a, value_b) for attr in fields if (value_a := getattr(source, attr)) != (value_b := getattr(other, attr))
+ ]:
+ raise ImproperlyConfiguredException(
+ f"Conflicting OpenAPI path configuration for {for_path!r}. "
+ f"{', '.join(f'{a} != {b}' for a, b in differing_values)}"
+ )
+
+ return dataclasses.replace(
+ source,
+ get=source.get or other.get,
+ post=source.post or other.post,
+ patch=source.patch or other.patch,
+ put=source.put or other.put,
+ delete=source.delete or other.delete,
+ options=source.options or other.options,
+ trace=source.trace or other.trace,
+ )
diff --git a/litestar/_openapi/plugin.py b/litestar/_openapi/plugin.py
--- a/litestar/_openapi/plugin.py
+++ b/litestar/_openapi/plugin.py
@@ -3,7 +3,7 @@
from typing import TYPE_CHECKING
from litestar._openapi.datastructures import OpenAPIContext
-from litestar._openapi.path_item import create_path_item_for_route
+from litestar._openapi.path_item import create_path_item_for_route, merge_path_item_operations
from litestar.exceptions import ImproperlyConfiguredException
from litestar.plugins import InitPluginProtocol
from litestar.plugins.base import ReceiveRoutePlugin
@@ -13,7 +13,7 @@
from litestar.app import Litestar
from litestar.config.app import AppConfig
from litestar.openapi.config import OpenAPIConfig
- from litestar.openapi.spec import OpenAPI
+ from litestar.openapi.spec import OpenAPI, PathItem
from litestar.routes import BaseRoute
@@ -41,10 +41,15 @@
openapi = openapi_config.to_openapi_schema()
context = OpenAPIContext(openapi_config=openapi_config, plugins=self.app.plugins.openapi)
- openapi.paths = {
- route.path_format or "/": create_path_item_for_route(context, route)
- for route in self.included_routes.values()
- }
+ path_items: dict[str, PathItem] = {}
+ for route in self.included_routes.values():
+ path = route.path_format or "/"
+ path_item = create_path_item_for_route(context, route)
+ if existing_path_item := path_items.get(path):
+ path_item = merge_path_item_operations(existing_path_item, path_item, for_path=path)
+ path_items[path] = path_item
+
+ openapi.paths = path_items
openapi.components.schemas = context.schema_registry.generate_components_schemas()
return openapi
| {"golden_diff": "diff --git a/litestar/_openapi/path_item.py b/litestar/_openapi/path_item.py\n--- a/litestar/_openapi/path_item.py\n+++ b/litestar/_openapi/path_item.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+import dataclasses\n from inspect import cleandoc\n from typing import TYPE_CHECKING\n \n@@ -8,6 +9,7 @@\n from litestar._openapi.responses import create_responses_for_handler\n from litestar._openapi.utils import SEPARATORS_CLEANUP_PATTERN\n from litestar.enums import HttpMethod\n+from litestar.exceptions import ImproperlyConfiguredException\n from litestar.openapi.spec import Operation, PathItem\n from litestar.utils.helpers import unwrap_partial\n \n@@ -16,7 +18,7 @@\n from litestar.handlers.http_handlers import HTTPRouteHandler\n from litestar.routes import HTTPRoute\n \n-__all__ = (\"create_path_item_for_route\",)\n+__all__ = (\"create_path_item_for_route\", \"merge_path_item_operations\")\n \n \n class PathItemFactory:\n@@ -135,3 +137,32 @@\n \"\"\"\n path_item_factory = PathItemFactory(openapi_context, route)\n return path_item_factory.create_path_item()\n+\n+\n+def merge_path_item_operations(source: PathItem, other: PathItem, for_path: str) -> PathItem:\n+ \"\"\"Merge operations from path items, creating a new path item that includes\n+ operations from both.\n+ \"\"\"\n+ attrs_to_merge = {\"get\", \"put\", \"post\", \"delete\", \"options\", \"head\", \"patch\", \"trace\"}\n+ fields = {f.name for f in dataclasses.fields(PathItem)} - attrs_to_merge\n+ if any(getattr(source, attr) and getattr(other, attr) for attr in attrs_to_merge):\n+ raise ValueError(\"Cannot merge operation for PathItem if operation is set on both items\")\n+\n+ if differing_values := [\n+ (value_a, value_b) for attr in fields if (value_a := getattr(source, attr)) != (value_b := getattr(other, attr))\n+ ]:\n+ raise ImproperlyConfiguredException(\n+ f\"Conflicting OpenAPI path configuration for {for_path!r}. \"\n+ f\"{', '.join(f'{a} != {b}' for a, b in differing_values)}\"\n+ )\n+\n+ return dataclasses.replace(\n+ source,\n+ get=source.get or other.get,\n+ post=source.post or other.post,\n+ patch=source.patch or other.patch,\n+ put=source.put or other.put,\n+ delete=source.delete or other.delete,\n+ options=source.options or other.options,\n+ trace=source.trace or other.trace,\n+ )\ndiff --git a/litestar/_openapi/plugin.py b/litestar/_openapi/plugin.py\n--- a/litestar/_openapi/plugin.py\n+++ b/litestar/_openapi/plugin.py\n@@ -3,7 +3,7 @@\n from typing import TYPE_CHECKING\n \n from litestar._openapi.datastructures import OpenAPIContext\n-from litestar._openapi.path_item import create_path_item_for_route\n+from litestar._openapi.path_item import create_path_item_for_route, merge_path_item_operations\n from litestar.exceptions import ImproperlyConfiguredException\n from litestar.plugins import InitPluginProtocol\n from litestar.plugins.base import ReceiveRoutePlugin\n@@ -13,7 +13,7 @@\n from litestar.app import Litestar\n from litestar.config.app import AppConfig\n from litestar.openapi.config import OpenAPIConfig\n- from litestar.openapi.spec import OpenAPI\n+ from litestar.openapi.spec import OpenAPI, PathItem\n from litestar.routes import BaseRoute\n \n \n@@ -41,10 +41,15 @@\n \n openapi = openapi_config.to_openapi_schema()\n context = OpenAPIContext(openapi_config=openapi_config, plugins=self.app.plugins.openapi)\n- openapi.paths = {\n- route.path_format or \"/\": create_path_item_for_route(context, route)\n- for route in self.included_routes.values()\n- }\n+ path_items: dict[str, PathItem] = {}\n+ for route in self.included_routes.values():\n+ path = route.path_format or \"/\"\n+ path_item = create_path_item_for_route(context, route)\n+ if existing_path_item := path_items.get(path):\n+ path_item = merge_path_item_operations(existing_path_item, path_item, for_path=path)\n+ path_items[path] = path_item\n+\n+ openapi.paths = path_items\n openapi.components.schemas = context.schema_registry.generate_components_schemas()\n return openapi\n", "issue": "Bug: route not recognized in schemas for specific types\n### Description\r\n\r\nCurrently some parameters types don't show in the docs. It seems `int` works fine, while `str` and `uuid` don't.\r\nThis is silent, no logs are shown, running `debug` mode. Same behavior for both `/schema/elements` and `/schema/swagger`.\r\n\r\n### URL to code causing the issue\r\n\r\n_No response_\r\n\r\n### MCVE\r\n\r\n```python\r\n(Uncomment one at a time)\r\n\r\n\r\nclass BugRoutes(Controller):\r\n tags = [\"Bugs\"]\r\n path = \"/bugs\"\r\n dependencies = dict(context=Provide(route_context))\r\n\r\n @routes.get()\r\n def get_bugs(self, context: AppContext) -> Response:\r\n return Response({})\r\n\r\n @routes.post()\r\n def create_bug(self, context: AppContext, data: Any) -> Response:\r\n return Response({})\r\n\r\n # This works\r\n # @routes.get(\"/{param:int}\")\r\n # def get_bug(self, context: AppContext, param: int) -> Response:\r\n # return Response({})\r\n\r\n # This doesn't work (not showing on docs)\r\n # @routes.get(\"/{param:str}\")\r\n # def get_bug_str(self, context: AppContext, param: str) -> Response:\r\n # return Response({})\r\n\r\n # This doesn't work (not showing on docs)\r\n # @routes.get(\"/{param:uuid}\")\r\n # def get_bug_uuid(self, context: AppContext, param: UUID) -> Response:\r\n # return Response({})\r\n\r\n @routes.patch(\"/{param:int}\")\r\n def update_bug(self, context: AppContext, param: int) -> Response:\r\n return Response({})\r\n\r\n @routes.delete(\"/{param:int}\")\r\n def delete_bug(self, context: AppContext, param: int) -> None:\r\n return Response({})\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n_No response_\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Litestar Version\r\n\r\n2.3.2\r\n\r\n### Platform\r\n\r\n- [X] Mac\r\n- [ ] Linux\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n> [!NOTE] \r\n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \r\n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\r\n>\r\n> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)\r\n> * If you would like to see an issue prioritized, make a pledge towards it!\r\n> * We receive the pledge once the issue is completed & verified\r\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2700\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2700/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2700/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom inspect import cleandoc\nfrom typing import TYPE_CHECKING\n\nfrom litestar._openapi.parameters import create_parameters_for_handler\nfrom litestar._openapi.request_body import create_request_body\nfrom litestar._openapi.responses import create_responses_for_handler\nfrom litestar._openapi.utils import SEPARATORS_CLEANUP_PATTERN\nfrom litestar.enums import HttpMethod\nfrom litestar.openapi.spec import Operation, PathItem\nfrom litestar.utils.helpers import unwrap_partial\n\nif TYPE_CHECKING:\n from litestar._openapi.datastructures import OpenAPIContext\n from litestar.handlers.http_handlers import HTTPRouteHandler\n from litestar.routes import HTTPRoute\n\n__all__ = (\"create_path_item_for_route\",)\n\n\nclass PathItemFactory:\n \"\"\"Factory for creating a PathItem instance for a given route.\"\"\"\n\n def __init__(self, openapi_context: OpenAPIContext, route: HTTPRoute) -> None:\n self.context = openapi_context\n self.route = route\n self._path_item = PathItem()\n\n def create_path_item(self) -> PathItem:\n \"\"\"Create a PathItem for the given route parsing all http_methods into Operation Models.\n\n Returns:\n A PathItem instance.\n \"\"\"\n for http_method, handler_tuple in self.route.route_handler_map.items():\n route_handler, _ = handler_tuple\n\n if not route_handler.resolve_include_in_schema():\n continue\n\n operation = self.create_operation_for_handler_method(route_handler, HttpMethod(http_method))\n\n setattr(self._path_item, http_method.lower(), operation)\n\n return self._path_item\n\n def create_operation_for_handler_method(\n self, route_handler: HTTPRouteHandler, http_method: HttpMethod\n ) -> Operation:\n \"\"\"Create an Operation instance for a given route handler and http method.\n\n Args:\n route_handler: A route handler instance.\n http_method: An HttpMethod enum value.\n\n Returns:\n An Operation instance.\n \"\"\"\n operation_id = self.create_operation_id(route_handler, http_method)\n parameters = create_parameters_for_handler(self.context, route_handler, self.route.path_parameters)\n signature_fields = route_handler.parsed_fn_signature.parameters\n\n request_body = None\n if data_field := signature_fields.get(\"data\"):\n request_body = create_request_body(\n self.context, route_handler.handler_id, route_handler.resolve_data_dto(), data_field\n )\n\n raises_validation_error = bool(data_field or self._path_item.parameters or parameters)\n responses = create_responses_for_handler(\n self.context, route_handler, raises_validation_error=raises_validation_error\n )\n\n return route_handler.operation_class(\n operation_id=operation_id,\n tags=route_handler.resolve_tags() or None,\n summary=route_handler.summary or SEPARATORS_CLEANUP_PATTERN.sub(\"\", route_handler.handler_name.title()),\n description=self.create_description_for_handler(route_handler),\n deprecated=route_handler.deprecated,\n responses=responses,\n request_body=request_body,\n parameters=parameters or None, # type: ignore[arg-type]\n security=route_handler.resolve_security() or None,\n )\n\n def create_operation_id(self, route_handler: HTTPRouteHandler, http_method: HttpMethod) -> str:\n \"\"\"Create an operation id for a given route handler and http method.\n\n Adds the operation id to the context's operation id set, where it is checked for uniqueness.\n\n Args:\n route_handler: A route handler instance.\n http_method: An HttpMethod enum value.\n\n Returns:\n An operation id string.\n \"\"\"\n if isinstance(route_handler.operation_id, str):\n operation_id = route_handler.operation_id\n elif callable(route_handler.operation_id):\n operation_id = route_handler.operation_id(route_handler, http_method, self.route.path_components)\n else:\n operation_id = self.context.openapi_config.operation_id_creator(\n route_handler, http_method, self.route.path_components\n )\n self.context.add_operation_id(operation_id)\n return operation_id\n\n def create_description_for_handler(self, route_handler: HTTPRouteHandler) -> str | None:\n \"\"\"Produce the operation description for a route handler.\n\n Args:\n route_handler: A route handler instance.\n\n Returns:\n An optional description string\n \"\"\"\n handler_description = route_handler.description\n if handler_description is None and self.context.openapi_config.use_handler_docstrings:\n fn = unwrap_partial(route_handler.fn)\n return cleandoc(fn.__doc__) if fn.__doc__ else None\n return handler_description\n\n\ndef create_path_item_for_route(openapi_context: OpenAPIContext, route: HTTPRoute) -> PathItem:\n \"\"\"Create a PathItem for the given route parsing all http_methods into Operation Models.\n\n Args:\n openapi_context: The OpenAPIContext instance.\n route: The route to create a PathItem for.\n\n Returns:\n A PathItem instance.\n \"\"\"\n path_item_factory = PathItemFactory(openapi_context, route)\n return path_item_factory.create_path_item()\n", "path": "litestar/_openapi/path_item.py"}, {"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom litestar._openapi.datastructures import OpenAPIContext\nfrom litestar._openapi.path_item import create_path_item_for_route\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.plugins import InitPluginProtocol\nfrom litestar.plugins.base import ReceiveRoutePlugin\nfrom litestar.routes import HTTPRoute\n\nif TYPE_CHECKING:\n from litestar.app import Litestar\n from litestar.config.app import AppConfig\n from litestar.openapi.config import OpenAPIConfig\n from litestar.openapi.spec import OpenAPI\n from litestar.routes import BaseRoute\n\n\nclass OpenAPIPlugin(InitPluginProtocol, ReceiveRoutePlugin):\n __slots__ = (\n \"app\",\n \"included_routes\",\n \"_openapi_config\",\n \"_openapi_schema\",\n )\n\n def __init__(self, app: Litestar) -> None:\n self.app = app\n self.included_routes: dict[str, HTTPRoute] = {}\n self._openapi_config: OpenAPIConfig | None = None\n self._openapi_schema: OpenAPI | None = None\n\n def _build_openapi_schema(self) -> OpenAPI:\n openapi_config = self.openapi_config\n\n if openapi_config.create_examples:\n from litestar._openapi.schema_generation.examples import ExampleFactory\n\n ExampleFactory.seed_random(openapi_config.random_seed)\n\n openapi = openapi_config.to_openapi_schema()\n context = OpenAPIContext(openapi_config=openapi_config, plugins=self.app.plugins.openapi)\n openapi.paths = {\n route.path_format or \"/\": create_path_item_for_route(context, route)\n for route in self.included_routes.values()\n }\n openapi.components.schemas = context.schema_registry.generate_components_schemas()\n return openapi\n\n def provide_openapi(self) -> OpenAPI:\n if not self._openapi_schema:\n self._openapi_schema = self._build_openapi_schema()\n return self._openapi_schema\n\n def on_app_init(self, app_config: AppConfig) -> AppConfig:\n if app_config.openapi_config:\n self._openapi_config = app_config.openapi_config\n app_config.route_handlers.append(self.openapi_config.openapi_controller)\n return app_config\n\n @property\n def openapi_config(self) -> OpenAPIConfig:\n if not self._openapi_config:\n raise ImproperlyConfiguredException(\"OpenAPIConfig not initialized\")\n return self._openapi_config\n\n def receive_route(self, route: BaseRoute) -> None:\n if not isinstance(route, HTTPRoute):\n return\n\n if any(route_handler.resolve_include_in_schema() for route_handler, _ in route.route_handler_map.values()):\n # Force recompute the schema if a new route is added\n self._openapi_schema = None\n self.included_routes[route.path] = route\n", "path": "litestar/_openapi/plugin.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nfrom inspect import cleandoc\nfrom typing import TYPE_CHECKING\n\nfrom litestar._openapi.parameters import create_parameters_for_handler\nfrom litestar._openapi.request_body import create_request_body\nfrom litestar._openapi.responses import create_responses_for_handler\nfrom litestar._openapi.utils import SEPARATORS_CLEANUP_PATTERN\nfrom litestar.enums import HttpMethod\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.openapi.spec import Operation, PathItem\nfrom litestar.utils.helpers import unwrap_partial\n\nif TYPE_CHECKING:\n from litestar._openapi.datastructures import OpenAPIContext\n from litestar.handlers.http_handlers import HTTPRouteHandler\n from litestar.routes import HTTPRoute\n\n__all__ = (\"create_path_item_for_route\", \"merge_path_item_operations\")\n\n\nclass PathItemFactory:\n \"\"\"Factory for creating a PathItem instance for a given route.\"\"\"\n\n def __init__(self, openapi_context: OpenAPIContext, route: HTTPRoute) -> None:\n self.context = openapi_context\n self.route = route\n self._path_item = PathItem()\n\n def create_path_item(self) -> PathItem:\n \"\"\"Create a PathItem for the given route parsing all http_methods into Operation Models.\n\n Returns:\n A PathItem instance.\n \"\"\"\n for http_method, handler_tuple in self.route.route_handler_map.items():\n route_handler, _ = handler_tuple\n\n if not route_handler.resolve_include_in_schema():\n continue\n\n operation = self.create_operation_for_handler_method(route_handler, HttpMethod(http_method))\n\n setattr(self._path_item, http_method.lower(), operation)\n\n return self._path_item\n\n def create_operation_for_handler_method(\n self, route_handler: HTTPRouteHandler, http_method: HttpMethod\n ) -> Operation:\n \"\"\"Create an Operation instance for a given route handler and http method.\n\n Args:\n route_handler: A route handler instance.\n http_method: An HttpMethod enum value.\n\n Returns:\n An Operation instance.\n \"\"\"\n operation_id = self.create_operation_id(route_handler, http_method)\n parameters = create_parameters_for_handler(self.context, route_handler, self.route.path_parameters)\n signature_fields = route_handler.parsed_fn_signature.parameters\n\n request_body = None\n if data_field := signature_fields.get(\"data\"):\n request_body = create_request_body(\n self.context, route_handler.handler_id, route_handler.resolve_data_dto(), data_field\n )\n\n raises_validation_error = bool(data_field or self._path_item.parameters or parameters)\n responses = create_responses_for_handler(\n self.context, route_handler, raises_validation_error=raises_validation_error\n )\n\n return route_handler.operation_class(\n operation_id=operation_id,\n tags=route_handler.resolve_tags() or None,\n summary=route_handler.summary or SEPARATORS_CLEANUP_PATTERN.sub(\"\", route_handler.handler_name.title()),\n description=self.create_description_for_handler(route_handler),\n deprecated=route_handler.deprecated,\n responses=responses,\n request_body=request_body,\n parameters=parameters or None, # type: ignore[arg-type]\n security=route_handler.resolve_security() or None,\n )\n\n def create_operation_id(self, route_handler: HTTPRouteHandler, http_method: HttpMethod) -> str:\n \"\"\"Create an operation id for a given route handler and http method.\n\n Adds the operation id to the context's operation id set, where it is checked for uniqueness.\n\n Args:\n route_handler: A route handler instance.\n http_method: An HttpMethod enum value.\n\n Returns:\n An operation id string.\n \"\"\"\n if isinstance(route_handler.operation_id, str):\n operation_id = route_handler.operation_id\n elif callable(route_handler.operation_id):\n operation_id = route_handler.operation_id(route_handler, http_method, self.route.path_components)\n else:\n operation_id = self.context.openapi_config.operation_id_creator(\n route_handler, http_method, self.route.path_components\n )\n self.context.add_operation_id(operation_id)\n return operation_id\n\n def create_description_for_handler(self, route_handler: HTTPRouteHandler) -> str | None:\n \"\"\"Produce the operation description for a route handler.\n\n Args:\n route_handler: A route handler instance.\n\n Returns:\n An optional description string\n \"\"\"\n handler_description = route_handler.description\n if handler_description is None and self.context.openapi_config.use_handler_docstrings:\n fn = unwrap_partial(route_handler.fn)\n return cleandoc(fn.__doc__) if fn.__doc__ else None\n return handler_description\n\n\ndef create_path_item_for_route(openapi_context: OpenAPIContext, route: HTTPRoute) -> PathItem:\n \"\"\"Create a PathItem for the given route parsing all http_methods into Operation Models.\n\n Args:\n openapi_context: The OpenAPIContext instance.\n route: The route to create a PathItem for.\n\n Returns:\n A PathItem instance.\n \"\"\"\n path_item_factory = PathItemFactory(openapi_context, route)\n return path_item_factory.create_path_item()\n\n\ndef merge_path_item_operations(source: PathItem, other: PathItem, for_path: str) -> PathItem:\n \"\"\"Merge operations from path items, creating a new path item that includes\n operations from both.\n \"\"\"\n attrs_to_merge = {\"get\", \"put\", \"post\", \"delete\", \"options\", \"head\", \"patch\", \"trace\"}\n fields = {f.name for f in dataclasses.fields(PathItem)} - attrs_to_merge\n if any(getattr(source, attr) and getattr(other, attr) for attr in attrs_to_merge):\n raise ValueError(\"Cannot merge operation for PathItem if operation is set on both items\")\n\n if differing_values := [\n (value_a, value_b) for attr in fields if (value_a := getattr(source, attr)) != (value_b := getattr(other, attr))\n ]:\n raise ImproperlyConfiguredException(\n f\"Conflicting OpenAPI path configuration for {for_path!r}. \"\n f\"{', '.join(f'{a} != {b}' for a, b in differing_values)}\"\n )\n\n return dataclasses.replace(\n source,\n get=source.get or other.get,\n post=source.post or other.post,\n patch=source.patch or other.patch,\n put=source.put or other.put,\n delete=source.delete or other.delete,\n options=source.options or other.options,\n trace=source.trace or other.trace,\n )\n", "path": "litestar/_openapi/path_item.py"}, {"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom litestar._openapi.datastructures import OpenAPIContext\nfrom litestar._openapi.path_item import create_path_item_for_route, merge_path_item_operations\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.plugins import InitPluginProtocol\nfrom litestar.plugins.base import ReceiveRoutePlugin\nfrom litestar.routes import HTTPRoute\n\nif TYPE_CHECKING:\n from litestar.app import Litestar\n from litestar.config.app import AppConfig\n from litestar.openapi.config import OpenAPIConfig\n from litestar.openapi.spec import OpenAPI, PathItem\n from litestar.routes import BaseRoute\n\n\nclass OpenAPIPlugin(InitPluginProtocol, ReceiveRoutePlugin):\n __slots__ = (\n \"app\",\n \"included_routes\",\n \"_openapi_config\",\n \"_openapi_schema\",\n )\n\n def __init__(self, app: Litestar) -> None:\n self.app = app\n self.included_routes: dict[str, HTTPRoute] = {}\n self._openapi_config: OpenAPIConfig | None = None\n self._openapi_schema: OpenAPI | None = None\n\n def _build_openapi_schema(self) -> OpenAPI:\n openapi_config = self.openapi_config\n\n if openapi_config.create_examples:\n from litestar._openapi.schema_generation.examples import ExampleFactory\n\n ExampleFactory.seed_random(openapi_config.random_seed)\n\n openapi = openapi_config.to_openapi_schema()\n context = OpenAPIContext(openapi_config=openapi_config, plugins=self.app.plugins.openapi)\n path_items: dict[str, PathItem] = {}\n for route in self.included_routes.values():\n path = route.path_format or \"/\"\n path_item = create_path_item_for_route(context, route)\n if existing_path_item := path_items.get(path):\n path_item = merge_path_item_operations(existing_path_item, path_item, for_path=path)\n path_items[path] = path_item\n\n openapi.paths = path_items\n openapi.components.schemas = context.schema_registry.generate_components_schemas()\n return openapi\n\n def provide_openapi(self) -> OpenAPI:\n if not self._openapi_schema:\n self._openapi_schema = self._build_openapi_schema()\n return self._openapi_schema\n\n def on_app_init(self, app_config: AppConfig) -> AppConfig:\n if app_config.openapi_config:\n self._openapi_config = app_config.openapi_config\n app_config.route_handlers.append(self.openapi_config.openapi_controller)\n return app_config\n\n @property\n def openapi_config(self) -> OpenAPIConfig:\n if not self._openapi_config:\n raise ImproperlyConfiguredException(\"OpenAPIConfig not initialized\")\n return self._openapi_config\n\n def receive_route(self, route: BaseRoute) -> None:\n if not isinstance(route, HTTPRoute):\n return\n\n if any(route_handler.resolve_include_in_schema() for route_handler, _ in route.route_handler_map.values()):\n # Force recompute the schema if a new route is added\n self._openapi_schema = None\n self.included_routes[route.path] = route\n", "path": "litestar/_openapi/plugin.py"}]} | 3,144 | 1,022 |
gh_patches_debug_21633 | rasdani/github-patches | git_diff | PyGithub__PyGithub-2439 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
v1.58.0 TypeError: create_jwt() got an unexpected keyword argument 'expiration'
The `expiration` keyword argument was removed in v1.58.0. The interface defined in GithubIntegration.pyi is no longer accurate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `github/GithubIntegration.py`
Content:
```
1 import time
2
3 import deprecated
4 import jwt
5
6 from github import Consts
7 from github.GithubException import GithubException
8 from github.Installation import Installation
9 from github.InstallationAuthorization import InstallationAuthorization
10 from github.PaginatedList import PaginatedList
11 from github.Requester import Requester
12
13
14 class GithubIntegration:
15 """
16 Main class to obtain tokens for a GitHub integration.
17 """
18
19 def __init__(
20 self,
21 integration_id,
22 private_key,
23 base_url=Consts.DEFAULT_BASE_URL,
24 jwt_expiry=Consts.DEFAULT_JWT_EXPIRY,
25 jwt_issued_at=Consts.DEFAULT_JWT_ISSUED_AT,
26 ):
27 """
28 :param integration_id: int
29 :param private_key: string
30 :param base_url: string
31 :param jwt_expiry: int. Expiry of the JWT used to get the information about this integration.
32 The default expiration is in 5 minutes and is capped at 10 minutes according to GitHub documentation
33 https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#generating-a-json-web-token-jwt
34 :param jwt_issued_at: int. Number of seconds, relative to now, to set for the "iat" (issued at) parameter.
35 The default value is -60 to protect against clock drift
36 """
37 assert isinstance(integration_id, (int, str)), integration_id
38 assert isinstance(private_key, str), "supplied private key should be a string"
39 assert isinstance(base_url, str), base_url
40 assert isinstance(jwt_expiry, int), jwt_expiry
41 assert Consts.MIN_JWT_EXPIRY <= jwt_expiry <= Consts.MAX_JWT_EXPIRY, jwt_expiry
42 assert isinstance(jwt_issued_at, int)
43
44 self.base_url = base_url
45 self.integration_id = integration_id
46 self.private_key = private_key
47 self.jwt_expiry = jwt_expiry
48 self.jwt_issued_at = jwt_issued_at
49 self.__requester = Requester(
50 login_or_token=None,
51 password=None,
52 jwt=self.create_jwt(),
53 app_auth=None,
54 base_url=self.base_url,
55 timeout=Consts.DEFAULT_TIMEOUT,
56 user_agent="PyGithub/Python",
57 per_page=Consts.DEFAULT_PER_PAGE,
58 verify=True,
59 retry=None,
60 pool_size=None,
61 )
62
63 def _get_headers(self):
64 """
65 Get headers for the requests.
66
67 :return: dict
68 """
69 return {
70 "Authorization": f"Bearer {self.create_jwt()}",
71 "Accept": Consts.mediaTypeIntegrationPreview,
72 "User-Agent": "PyGithub/Python",
73 }
74
75 def _get_installed_app(self, url):
76 """
77 Get installation for the given URL.
78
79 :param url: str
80 :rtype: :class:`github.Installation.Installation`
81 """
82 headers, response = self.__requester.requestJsonAndCheck(
83 "GET", url, headers=self._get_headers()
84 )
85
86 return Installation(
87 requester=self.__requester,
88 headers=headers,
89 attributes=response,
90 completed=True,
91 )
92
93 def create_jwt(self):
94 """
95 Create a signed JWT
96 https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app
97
98 :return string:
99 """
100 now = int(time.time())
101 payload = {
102 "iat": now + self.jwt_issued_at,
103 "exp": now + self.jwt_expiry,
104 "iss": self.integration_id,
105 }
106 encrypted = jwt.encode(payload, key=self.private_key, algorithm="RS256")
107
108 if isinstance(encrypted, bytes):
109 encrypted = encrypted.decode("utf-8")
110
111 return encrypted
112
113 def get_access_token(self, installation_id, permissions=None):
114 """
115 :calls: `POST /app/installations/{installation_id}/access_tokens <https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app>`
116 :param installation_id: int
117 :param permissions: dict
118 :return: :class:`github.InstallationAuthorization.InstallationAuthorization`
119 """
120 if permissions is None:
121 permissions = {}
122
123 if not isinstance(permissions, dict):
124 raise GithubException(
125 status=400, data={"message": "Invalid permissions"}, headers=None
126 )
127
128 body = {"permissions": permissions}
129 headers, response = self.__requester.requestJsonAndCheck(
130 "POST",
131 f"/app/installations/{installation_id}/access_tokens",
132 input=body,
133 )
134
135 return InstallationAuthorization(
136 requester=self.__requester,
137 headers=headers,
138 attributes=response,
139 completed=True,
140 )
141
142 @deprecated.deprecated("Use get_repo_installation")
143 def get_installation(self, owner, repo):
144 """
145 Deprecated by get_repo_installation
146
147 :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`
148 :param owner: str
149 :param repo: str
150 :rtype: :class:`github.Installation.Installation`
151 """
152 return self._get_installed_app(url=f"/repos/{owner}/{repo}/installation")
153
154 def get_installations(self):
155 """
156 :calls: GET /app/installations <https://docs.github.com/en/rest/reference/apps#list-installations-for-the-authenticated-app>
157 :rtype: :class:`github.PaginatedList.PaginatedList[github.Installation.Installation]`
158 """
159 return PaginatedList(
160 contentClass=Installation,
161 requester=self.__requester,
162 firstUrl="/app/installations",
163 firstParams=None,
164 headers=self._get_headers(),
165 list_item="installations",
166 )
167
168 def get_org_installation(self, org):
169 """
170 :calls: `GET /orgs/{org}/installation <https://docs.github.com/en/rest/apps/apps#get-an-organization-installation-for-the-authenticated-app>`
171 :param org: str
172 :rtype: :class:`github.Installation.Installation`
173 """
174 return self._get_installed_app(url=f"/orgs/{org}/installation")
175
176 def get_repo_installation(self, owner, repo):
177 """
178 :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`
179 :param owner: str
180 :param repo: str
181 :rtype: :class:`github.Installation.Installation`
182 """
183 return self._get_installed_app(url=f"/repos/{owner}/{repo}/installation")
184
185 def get_user_installation(self, username):
186 """
187 :calls: `GET /users/{username}/installation <https://docs.github.com/en/rest/apps/apps#get-a-user-installation-for-the-authenticated-app>`
188 :param username: str
189 :rtype: :class:`github.Installation.Installation`
190 """
191 return self._get_installed_app(url=f"/users/{username}/installation")
192
193 def get_app_installation(self, installation_id):
194 """
195 :calls: `GET /app/installations/{installation_id} <https://docs.github.com/en/rest/apps/apps#get-an-installation-for-the-authenticated-app>`
196 :param installation_id: int
197 :rtype: :class:`github.Installation.Installation`
198 """
199 return self._get_installed_app(url=f"/app/installations/{installation_id}")
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/github/GithubIntegration.py b/github/GithubIntegration.py
--- a/github/GithubIntegration.py
+++ b/github/GithubIntegration.py
@@ -90,17 +90,23 @@
completed=True,
)
- def create_jwt(self):
+ def create_jwt(self, expiration=None):
"""
Create a signed JWT
https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app
:return string:
"""
+ if expiration is not None:
+ assert isinstance(expiration, int), expiration
+ assert (
+ Consts.MIN_JWT_EXPIRY <= expiration <= Consts.MAX_JWT_EXPIRY
+ ), expiration
+
now = int(time.time())
payload = {
"iat": now + self.jwt_issued_at,
- "exp": now + self.jwt_expiry,
+ "exp": now + (expiration if expiration is not None else self.jwt_expiry),
"iss": self.integration_id,
}
encrypted = jwt.encode(payload, key=self.private_key, algorithm="RS256")
| {"golden_diff": "diff --git a/github/GithubIntegration.py b/github/GithubIntegration.py\n--- a/github/GithubIntegration.py\n+++ b/github/GithubIntegration.py\n@@ -90,17 +90,23 @@\n completed=True,\n )\n \n- def create_jwt(self):\n+ def create_jwt(self, expiration=None):\n \"\"\"\n Create a signed JWT\n https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app\n \n :return string:\n \"\"\"\n+ if expiration is not None:\n+ assert isinstance(expiration, int), expiration\n+ assert (\n+ Consts.MIN_JWT_EXPIRY <= expiration <= Consts.MAX_JWT_EXPIRY\n+ ), expiration\n+\n now = int(time.time())\n payload = {\n \"iat\": now + self.jwt_issued_at,\n- \"exp\": now + self.jwt_expiry,\n+ \"exp\": now + (expiration if expiration is not None else self.jwt_expiry),\n \"iss\": self.integration_id,\n }\n encrypted = jwt.encode(payload, key=self.private_key, algorithm=\"RS256\")\n", "issue": "v1.58.0 TypeError: create_jwt() got an unexpected keyword argument 'expiration'\nThe `expiration` keyword argument was removed in v1.58.0. The interface defined in GithubIntegration.pyi is no longer accurate. \n", "before_files": [{"content": "import time\n\nimport deprecated\nimport jwt\n\nfrom github import Consts\nfrom github.GithubException import GithubException\nfrom github.Installation import Installation\nfrom github.InstallationAuthorization import InstallationAuthorization\nfrom github.PaginatedList import PaginatedList\nfrom github.Requester import Requester\n\n\nclass GithubIntegration:\n \"\"\"\n Main class to obtain tokens for a GitHub integration.\n \"\"\"\n\n def __init__(\n self,\n integration_id,\n private_key,\n base_url=Consts.DEFAULT_BASE_URL,\n jwt_expiry=Consts.DEFAULT_JWT_EXPIRY,\n jwt_issued_at=Consts.DEFAULT_JWT_ISSUED_AT,\n ):\n \"\"\"\n :param integration_id: int\n :param private_key: string\n :param base_url: string\n :param jwt_expiry: int. Expiry of the JWT used to get the information about this integration.\n The default expiration is in 5 minutes and is capped at 10 minutes according to GitHub documentation\n https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#generating-a-json-web-token-jwt\n :param jwt_issued_at: int. Number of seconds, relative to now, to set for the \"iat\" (issued at) parameter.\n The default value is -60 to protect against clock drift\n \"\"\"\n assert isinstance(integration_id, (int, str)), integration_id\n assert isinstance(private_key, str), \"supplied private key should be a string\"\n assert isinstance(base_url, str), base_url\n assert isinstance(jwt_expiry, int), jwt_expiry\n assert Consts.MIN_JWT_EXPIRY <= jwt_expiry <= Consts.MAX_JWT_EXPIRY, jwt_expiry\n assert isinstance(jwt_issued_at, int)\n\n self.base_url = base_url\n self.integration_id = integration_id\n self.private_key = private_key\n self.jwt_expiry = jwt_expiry\n self.jwt_issued_at = jwt_issued_at\n self.__requester = Requester(\n login_or_token=None,\n password=None,\n jwt=self.create_jwt(),\n app_auth=None,\n base_url=self.base_url,\n timeout=Consts.DEFAULT_TIMEOUT,\n user_agent=\"PyGithub/Python\",\n per_page=Consts.DEFAULT_PER_PAGE,\n verify=True,\n retry=None,\n pool_size=None,\n )\n\n def _get_headers(self):\n \"\"\"\n Get headers for the requests.\n\n :return: dict\n \"\"\"\n return {\n \"Authorization\": f\"Bearer {self.create_jwt()}\",\n \"Accept\": Consts.mediaTypeIntegrationPreview,\n \"User-Agent\": \"PyGithub/Python\",\n }\n\n def _get_installed_app(self, url):\n \"\"\"\n Get installation for the given URL.\n\n :param url: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n headers, response = self.__requester.requestJsonAndCheck(\n \"GET\", url, headers=self._get_headers()\n )\n\n return Installation(\n requester=self.__requester,\n headers=headers,\n attributes=response,\n completed=True,\n )\n\n def create_jwt(self):\n \"\"\"\n Create a signed JWT\n https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app\n\n :return string:\n \"\"\"\n now = int(time.time())\n payload = {\n \"iat\": now + self.jwt_issued_at,\n \"exp\": now + self.jwt_expiry,\n \"iss\": self.integration_id,\n }\n encrypted = jwt.encode(payload, key=self.private_key, algorithm=\"RS256\")\n\n if isinstance(encrypted, bytes):\n encrypted = encrypted.decode(\"utf-8\")\n\n return encrypted\n\n def get_access_token(self, installation_id, permissions=None):\n \"\"\"\n :calls: `POST /app/installations/{installation_id}/access_tokens <https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app>`\n :param installation_id: int\n :param permissions: dict\n :return: :class:`github.InstallationAuthorization.InstallationAuthorization`\n \"\"\"\n if permissions is None:\n permissions = {}\n\n if not isinstance(permissions, dict):\n raise GithubException(\n status=400, data={\"message\": \"Invalid permissions\"}, headers=None\n )\n\n body = {\"permissions\": permissions}\n headers, response = self.__requester.requestJsonAndCheck(\n \"POST\",\n f\"/app/installations/{installation_id}/access_tokens\",\n input=body,\n )\n\n return InstallationAuthorization(\n requester=self.__requester,\n headers=headers,\n attributes=response,\n completed=True,\n )\n\n @deprecated.deprecated(\"Use get_repo_installation\")\n def get_installation(self, owner, repo):\n \"\"\"\n Deprecated by get_repo_installation\n\n :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`\n :param owner: str\n :param repo: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/repos/{owner}/{repo}/installation\")\n\n def get_installations(self):\n \"\"\"\n :calls: GET /app/installations <https://docs.github.com/en/rest/reference/apps#list-installations-for-the-authenticated-app>\n :rtype: :class:`github.PaginatedList.PaginatedList[github.Installation.Installation]`\n \"\"\"\n return PaginatedList(\n contentClass=Installation,\n requester=self.__requester,\n firstUrl=\"/app/installations\",\n firstParams=None,\n headers=self._get_headers(),\n list_item=\"installations\",\n )\n\n def get_org_installation(self, org):\n \"\"\"\n :calls: `GET /orgs/{org}/installation <https://docs.github.com/en/rest/apps/apps#get-an-organization-installation-for-the-authenticated-app>`\n :param org: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/orgs/{org}/installation\")\n\n def get_repo_installation(self, owner, repo):\n \"\"\"\n :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`\n :param owner: str\n :param repo: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/repos/{owner}/{repo}/installation\")\n\n def get_user_installation(self, username):\n \"\"\"\n :calls: `GET /users/{username}/installation <https://docs.github.com/en/rest/apps/apps#get-a-user-installation-for-the-authenticated-app>`\n :param username: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/users/{username}/installation\")\n\n def get_app_installation(self, installation_id):\n \"\"\"\n :calls: `GET /app/installations/{installation_id} <https://docs.github.com/en/rest/apps/apps#get-an-installation-for-the-authenticated-app>`\n :param installation_id: int\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/app/installations/{installation_id}\")\n", "path": "github/GithubIntegration.py"}], "after_files": [{"content": "import time\n\nimport deprecated\nimport jwt\n\nfrom github import Consts\nfrom github.GithubException import GithubException\nfrom github.Installation import Installation\nfrom github.InstallationAuthorization import InstallationAuthorization\nfrom github.PaginatedList import PaginatedList\nfrom github.Requester import Requester\n\n\nclass GithubIntegration:\n \"\"\"\n Main class to obtain tokens for a GitHub integration.\n \"\"\"\n\n def __init__(\n self,\n integration_id,\n private_key,\n base_url=Consts.DEFAULT_BASE_URL,\n jwt_expiry=Consts.DEFAULT_JWT_EXPIRY,\n jwt_issued_at=Consts.DEFAULT_JWT_ISSUED_AT,\n ):\n \"\"\"\n :param integration_id: int\n :param private_key: string\n :param base_url: string\n :param jwt_expiry: int. Expiry of the JWT used to get the information about this integration.\n The default expiration is in 5 minutes and is capped at 10 minutes according to GitHub documentation\n https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#generating-a-json-web-token-jwt\n :param jwt_issued_at: int. Number of seconds, relative to now, to set for the \"iat\" (issued at) parameter.\n The default value is -60 to protect against clock drift\n \"\"\"\n assert isinstance(integration_id, (int, str)), integration_id\n assert isinstance(private_key, str), \"supplied private key should be a string\"\n assert isinstance(base_url, str), base_url\n assert isinstance(jwt_expiry, int), jwt_expiry\n assert Consts.MIN_JWT_EXPIRY <= jwt_expiry <= Consts.MAX_JWT_EXPIRY, jwt_expiry\n assert isinstance(jwt_issued_at, int)\n\n self.base_url = base_url\n self.integration_id = integration_id\n self.private_key = private_key\n self.jwt_expiry = jwt_expiry\n self.jwt_issued_at = jwt_issued_at\n self.__requester = Requester(\n login_or_token=None,\n password=None,\n jwt=self.create_jwt(),\n app_auth=None,\n base_url=self.base_url,\n timeout=Consts.DEFAULT_TIMEOUT,\n user_agent=\"PyGithub/Python\",\n per_page=Consts.DEFAULT_PER_PAGE,\n verify=True,\n retry=None,\n pool_size=None,\n )\n\n def _get_headers(self):\n \"\"\"\n Get headers for the requests.\n\n :return: dict\n \"\"\"\n return {\n \"Authorization\": f\"Bearer {self.create_jwt()}\",\n \"Accept\": Consts.mediaTypeIntegrationPreview,\n \"User-Agent\": \"PyGithub/Python\",\n }\n\n def _get_installed_app(self, url):\n \"\"\"\n Get installation for the given URL.\n\n :param url: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n headers, response = self.__requester.requestJsonAndCheck(\n \"GET\", url, headers=self._get_headers()\n )\n\n return Installation(\n requester=self.__requester,\n headers=headers,\n attributes=response,\n completed=True,\n )\n\n def create_jwt(self, expiration=None):\n \"\"\"\n Create a signed JWT\n https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app\n\n :return string:\n \"\"\"\n if expiration is not None:\n assert isinstance(expiration, int), expiration\n assert (\n Consts.MIN_JWT_EXPIRY <= expiration <= Consts.MAX_JWT_EXPIRY\n ), expiration\n\n now = int(time.time())\n payload = {\n \"iat\": now + self.jwt_issued_at,\n \"exp\": now + (expiration if expiration is not None else self.jwt_expiry),\n \"iss\": self.integration_id,\n }\n encrypted = jwt.encode(payload, key=self.private_key, algorithm=\"RS256\")\n\n if isinstance(encrypted, bytes):\n encrypted = encrypted.decode(\"utf-8\")\n\n return encrypted\n\n def get_access_token(self, installation_id, permissions=None):\n \"\"\"\n :calls: `POST /app/installations/{installation_id}/access_tokens <https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app>`\n :param installation_id: int\n :param permissions: dict\n :return: :class:`github.InstallationAuthorization.InstallationAuthorization`\n \"\"\"\n if permissions is None:\n permissions = {}\n\n if not isinstance(permissions, dict):\n raise GithubException(\n status=400, data={\"message\": \"Invalid permissions\"}, headers=None\n )\n\n body = {\"permissions\": permissions}\n headers, response = self.__requester.requestJsonAndCheck(\n \"POST\",\n f\"/app/installations/{installation_id}/access_tokens\",\n input=body,\n )\n\n return InstallationAuthorization(\n requester=self.__requester,\n headers=headers,\n attributes=response,\n completed=True,\n )\n\n @deprecated.deprecated(\"Use get_repo_installation\")\n def get_installation(self, owner, repo):\n \"\"\"\n Deprecated by get_repo_installation\n\n :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`\n :param owner: str\n :param repo: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/repos/{owner}/{repo}/installation\")\n\n def get_installations(self):\n \"\"\"\n :calls: GET /app/installations <https://docs.github.com/en/rest/reference/apps#list-installations-for-the-authenticated-app>\n :rtype: :class:`github.PaginatedList.PaginatedList[github.Installation.Installation]`\n \"\"\"\n return PaginatedList(\n contentClass=Installation,\n requester=self.__requester,\n firstUrl=\"/app/installations\",\n firstParams=None,\n headers=self._get_headers(),\n list_item=\"installations\",\n )\n\n def get_org_installation(self, org):\n \"\"\"\n :calls: `GET /orgs/{org}/installation <https://docs.github.com/en/rest/apps/apps#get-an-organization-installation-for-the-authenticated-app>`\n :param org: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/orgs/{org}/installation\")\n\n def get_repo_installation(self, owner, repo):\n \"\"\"\n :calls: `GET /repos/{owner}/{repo}/installation <https://docs.github.com/en/rest/reference/apps#get-a-repository-installation-for-the-authenticated-app>`\n :param owner: str\n :param repo: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/repos/{owner}/{repo}/installation\")\n\n def get_user_installation(self, username):\n \"\"\"\n :calls: `GET /users/{username}/installation <https://docs.github.com/en/rest/apps/apps#get-a-user-installation-for-the-authenticated-app>`\n :param username: str\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/users/{username}/installation\")\n\n def get_app_installation(self, installation_id):\n \"\"\"\n :calls: `GET /app/installations/{installation_id} <https://docs.github.com/en/rest/apps/apps#get-an-installation-for-the-authenticated-app>`\n :param installation_id: int\n :rtype: :class:`github.Installation.Installation`\n \"\"\"\n return self._get_installed_app(url=f\"/app/installations/{installation_id}\")\n", "path": "github/GithubIntegration.py"}]} | 2,422 | 250 |
gh_patches_debug_8845 | rasdani/github-patches | git_diff | safe-global__safe-config-service-14 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include provider info in the serialized response of `GET /safe-apps/`
The `/safe-apps` endpoint should include data about the provider if any
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/safe_apps/serializers.py`
Content:
```
1 from rest_framework import serializers
2
3 from .models import SafeApp
4
5
6 class SafeAppsResponseSerializer(serializers.ModelSerializer):
7 class Meta:
8 model = SafeApp
9 fields = ['url', 'name', 'icon_url', 'description', 'networks']
10
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/safe_apps/serializers.py b/src/safe_apps/serializers.py
--- a/src/safe_apps/serializers.py
+++ b/src/safe_apps/serializers.py
@@ -1,9 +1,17 @@
from rest_framework import serializers
-from .models import SafeApp
+from .models import SafeApp, Provider
+
+
+class ProviderSerializer(serializers.ModelSerializer):
+ class Meta:
+ model = Provider
+ fields = ['url', 'name']
class SafeAppsResponseSerializer(serializers.ModelSerializer):
+ provider = ProviderSerializer()
+
class Meta:
model = SafeApp
- fields = ['url', 'name', 'icon_url', 'description', 'networks']
+ fields = ['url', 'name', 'icon_url', 'description', 'networks', 'provider']
| {"golden_diff": "diff --git a/src/safe_apps/serializers.py b/src/safe_apps/serializers.py\n--- a/src/safe_apps/serializers.py\n+++ b/src/safe_apps/serializers.py\n@@ -1,9 +1,17 @@\n from rest_framework import serializers\n \n-from .models import SafeApp\n+from .models import SafeApp, Provider\n+\n+\n+class ProviderSerializer(serializers.ModelSerializer):\n+ class Meta:\n+ model = Provider\n+ fields = ['url', 'name']\n \n \n class SafeAppsResponseSerializer(serializers.ModelSerializer):\n+ provider = ProviderSerializer()\n+\n class Meta:\n model = SafeApp\n- fields = ['url', 'name', 'icon_url', 'description', 'networks']\n+ fields = ['url', 'name', 'icon_url', 'description', 'networks', 'provider']\n", "issue": "Include provider info in the serialized response of `GET /safe-apps/`\nThe `/safe-apps` endpoint should include data about the provider if any\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom .models import SafeApp\n\n\nclass SafeAppsResponseSerializer(serializers.ModelSerializer):\n class Meta:\n model = SafeApp\n fields = ['url', 'name', 'icon_url', 'description', 'networks']\n", "path": "src/safe_apps/serializers.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom .models import SafeApp, Provider\n\n\nclass ProviderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Provider\n fields = ['url', 'name']\n\n\nclass SafeAppsResponseSerializer(serializers.ModelSerializer):\n provider = ProviderSerializer()\n\n class Meta:\n model = SafeApp\n fields = ['url', 'name', 'icon_url', 'description', 'networks', 'provider']\n", "path": "src/safe_apps/serializers.py"}]} | 359 | 180 |
gh_patches_debug_29629 | rasdani/github-patches | git_diff | aio-libs__aiohttp-4556 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GET Requests to link-local IPv6 addresses don't work on Python 3.7+
🐞 **Describe the bug**
The aiohttp resolver loses information related to linklocal IPv6 addresses on Python 3.7+ due to a changes in the representation returned by `socket.getaddrinfo()`
💡 **To Reproduce**
Try to get an URL like `http://[fe80::1%eth0]:8080/`, it will result in an OSError (Invalid argument) exception.
This seems to be due to the way that scopeid's are handled in [resolver.py](https://github.com/aio-libs/aiohttp/blob/72c2acd4850b1cbc638b413a7c28d96882b4d7e8/aiohttp/resolver.py#L31-L37):
Run `socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]` on python 3.6:
```python
socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]
>>> socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]
('fe80::1%eth0', 8080, 0, 4)
```
Run it on python 3.7:
```python
>>> socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]
('fe80::1', 8080, 0, 4)y
```
The `address` element of the tuple no longer includes the textual representation of the scope id, it's only contained in the matching scope_id element of the tuple - which then is missing when later callings _loop.create_connection().
💡 **Expected behavior**
The URL is successfully retrieved for link local IPv6 addresses.
📋 **Logs/tracebacks**
```python-traceback (paste your traceback in the next line)
N/A
```
📋 **Your version of the Python**
```console
$ python3 --version
Python 3.6.6
$ python3.7 --version
Python 3.7.5
```
📋 **Your version of the aiohttp/yarl/multidict distributions**
```console
$ python -m pip show aiohttp
python -m pip show aiohttp
Name: aiohttp
Version: 3.6.2
```
```console
$ python -m pip show multidict
Name: multidict
Version: 4.7.4
```
```console
$ python -m pip show yarl
Name: yarl
Version: 1.4.2
```
📋 **Additional context**
OS: Centos7 Linux
Proxy Server: No
Related to: client
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aiohttp/resolver.py`
Content:
```
1 import socket
2 from typing import Any, Dict, List
3
4 from .abc import AbstractResolver
5 from .helpers import get_running_loop
6
7 __all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')
8
9 try:
10 import aiodns
11
12 # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
13 except ImportError: # pragma: no cover
14 aiodns = None
15
16 aiodns_default = False
17
18
19 class ThreadedResolver(AbstractResolver):
20 """Use Executor for synchronous getaddrinfo() calls, which defaults to
21 concurrent.futures.ThreadPoolExecutor.
22 """
23
24 def __init__(self) -> None:
25 self._loop = get_running_loop()
26
27 async def resolve(self, host: str, port: int=0,
28 family: int=socket.AF_INET) -> List[Dict[str, Any]]:
29 infos = await self._loop.getaddrinfo(
30 host, port, type=socket.SOCK_STREAM, family=family)
31
32 hosts = []
33 for family, _, proto, _, address in infos:
34 hosts.append(
35 {'hostname': host,
36 'host': address[0], 'port': address[1],
37 'family': family, 'proto': proto,
38 'flags': socket.AI_NUMERICHOST})
39
40 return hosts
41
42 async def close(self) -> None:
43 pass
44
45
46 class AsyncResolver(AbstractResolver):
47 """Use the `aiodns` package to make asynchronous DNS lookups"""
48
49 def __init__(self, *args: Any, **kwargs: Any) -> None:
50 if aiodns is None:
51 raise RuntimeError("Resolver requires aiodns library")
52
53 self._loop = get_running_loop()
54 self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)
55
56 async def resolve(self, host: str, port: int=0,
57 family: int=socket.AF_INET) -> List[Dict[str, Any]]:
58 try:
59 resp = await self._resolver.gethostbyname(host, family)
60 except aiodns.error.DNSError as exc:
61 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
62 raise OSError(msg) from exc
63 hosts = []
64 for address in resp.addresses:
65 hosts.append(
66 {'hostname': host,
67 'host': address, 'port': port,
68 'family': family, 'proto': 0,
69 'flags': socket.AI_NUMERICHOST})
70
71 if not hosts:
72 raise OSError("DNS lookup failed")
73
74 return hosts
75
76 async def close(self) -> None:
77 return self._resolver.cancel()
78
79
80 DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py
--- a/aiohttp/resolver.py
+++ b/aiohttp/resolver.py
@@ -31,11 +31,23 @@
hosts = []
for family, _, proto, _, address in infos:
- hosts.append(
- {'hostname': host,
- 'host': address[0], 'port': address[1],
- 'family': family, 'proto': proto,
- 'flags': socket.AI_NUMERICHOST})
+ if family == socket.AF_INET6 and address[3]: # type: ignore
+ # This is essential for link-local IPv6 addresses.
+ # LL IPv6 is a VERY rare case. Strictly speaking, we should use
+ # getnameinfo() unconditionally, but performance makes sense.
+ host, _port = socket.getnameinfo(
+ address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
+ port = int(_port)
+ else:
+ host, port = address[:2]
+ hosts.append({
+ 'hostname': host,
+ 'host': host,
+ 'port': port,
+ 'family': family,
+ 'proto': proto,
+ 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
+ })
return hosts
@@ -62,11 +74,14 @@
raise OSError(msg) from exc
hosts = []
for address in resp.addresses:
- hosts.append(
- {'hostname': host,
- 'host': address, 'port': port,
- 'family': family, 'proto': 0,
- 'flags': socket.AI_NUMERICHOST})
+ hosts.append({
+ 'hostname': host,
+ 'host': address,
+ 'port': port,
+ 'family': family,
+ 'proto': 0,
+ 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
+ })
if not hosts:
raise OSError("DNS lookup failed")
| {"golden_diff": "diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py\n--- a/aiohttp/resolver.py\n+++ b/aiohttp/resolver.py\n@@ -31,11 +31,23 @@\n \n hosts = []\n for family, _, proto, _, address in infos:\n- hosts.append(\n- {'hostname': host,\n- 'host': address[0], 'port': address[1],\n- 'family': family, 'proto': proto,\n- 'flags': socket.AI_NUMERICHOST})\n+ if family == socket.AF_INET6 and address[3]: # type: ignore\n+ # This is essential for link-local IPv6 addresses.\n+ # LL IPv6 is a VERY rare case. Strictly speaking, we should use\n+ # getnameinfo() unconditionally, but performance makes sense.\n+ host, _port = socket.getnameinfo(\n+ address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)\n+ port = int(_port)\n+ else:\n+ host, port = address[:2]\n+ hosts.append({\n+ 'hostname': host,\n+ 'host': host,\n+ 'port': port,\n+ 'family': family,\n+ 'proto': proto,\n+ 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n+ })\n \n return hosts\n \n@@ -62,11 +74,14 @@\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n- hosts.append(\n- {'hostname': host,\n- 'host': address, 'port': port,\n- 'family': family, 'proto': 0,\n- 'flags': socket.AI_NUMERICHOST})\n+ hosts.append({\n+ 'hostname': host,\n+ 'host': address,\n+ 'port': port,\n+ 'family': family,\n+ 'proto': 0,\n+ 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n+ })\n \n if not hosts:\n raise OSError(\"DNS lookup failed\")\n", "issue": "GET Requests to link-local IPv6 addresses don't work on Python 3.7+\n\ud83d\udc1e **Describe the bug**\r\nThe aiohttp resolver loses information related to linklocal IPv6 addresses on Python 3.7+ due to a changes in the representation returned by `socket.getaddrinfo()`\r\n\r\n\ud83d\udca1 **To Reproduce**\r\nTry to get an URL like `http://[fe80::1%eth0]:8080/`, it will result in an OSError (Invalid argument) exception.\r\n\r\nThis seems to be due to the way that scopeid's are handled in [resolver.py](https://github.com/aio-libs/aiohttp/blob/72c2acd4850b1cbc638b413a7c28d96882b4d7e8/aiohttp/resolver.py#L31-L37):\r\n\r\nRun `socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]` on python 3.6:\r\n```python\r\nsocket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]\r\n>>> socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]\r\n('fe80::1%eth0', 8080, 0, 4)\r\n```\r\n\r\nRun it on python 3.7:\r\n```python\r\n>>> socket.getaddrinfo('fe80::1%eth0', 8080, family=socket.AF_INET6, proto=socket.IPPROTO_TCP)[0][4]\r\n('fe80::1', 8080, 0, 4)y\r\n```\r\n\r\nThe `address` element of the tuple no longer includes the textual representation of the scope id, it's only contained in the matching scope_id element of the tuple - which then is missing when later callings _loop.create_connection().\r\n\r\n\ud83d\udca1 **Expected behavior**\r\nThe URL is successfully retrieved for link local IPv6 addresses.\r\n\r\n\r\n\ud83d\udccb **Logs/tracebacks**\r\n```python-traceback (paste your traceback in the next line)\r\nN/A\r\n```\r\n\r\n\ud83d\udccb **Your version of the Python**\r\n```console\r\n$ python3 --version\r\nPython 3.6.6\r\n$ python3.7 --version\r\nPython 3.7.5\r\n```\r\n\r\n\ud83d\udccb **Your version of the aiohttp/yarl/multidict distributions**\r\n```console\r\n$ python -m pip show aiohttp\r\npython -m pip show aiohttp\r\nName: aiohttp\r\nVersion: 3.6.2\r\n```\r\n```console\r\n$ python -m pip show multidict\r\nName: multidict\r\nVersion: 4.7.4\r\n```\r\n```console\r\n$ python -m pip show yarl\r\nName: yarl\r\nVersion: 1.4.2\r\n```\r\n\r\n\ud83d\udccb **Additional context**\r\nOS: Centos7 Linux\r\nProxy Server: No\r\nRelated to: client\n", "before_files": [{"content": "import socket\nfrom typing import Any, Dict, List\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')\n\ntry:\n import aiodns\n\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self) -> None:\n self._loop = get_running_loop()\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n host, port, type=socket.SOCK_STREAM, family=family)\n\n hosts = []\n for family, _, proto, _, address in infos:\n hosts.append(\n {'hostname': host,\n 'host': address[0], 'port': address[1],\n 'family': family, 'proto': proto,\n 'flags': socket.AI_NUMERICHOST})\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop()\n self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append(\n {'hostname': host,\n 'host': address, 'port': port,\n 'family': family, 'proto': 0,\n 'flags': socket.AI_NUMERICHOST})\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}], "after_files": [{"content": "import socket\nfrom typing import Any, Dict, List\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')\n\ntry:\n import aiodns\n\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self) -> None:\n self._loop = get_running_loop()\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n host, port, type=socket.SOCK_STREAM, family=family)\n\n hosts = []\n for family, _, proto, _, address in infos:\n if family == socket.AF_INET6 and address[3]: # type: ignore\n # This is essential for link-local IPv6 addresses.\n # LL IPv6 is a VERY rare case. Strictly speaking, we should use\n # getnameinfo() unconditionally, but performance makes sense.\n host, _port = socket.getnameinfo(\n address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)\n port = int(_port)\n else:\n host, port = address[:2]\n hosts.append({\n 'hostname': host,\n 'host': host,\n 'port': port,\n 'family': family,\n 'proto': proto,\n 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n })\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop()\n self._resolver = aiodns.DNSResolver(*args, loop=self._loop, **kwargs)\n\n async def resolve(self, host: str, port: int=0,\n family: int=socket.AF_INET) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append({\n 'hostname': host,\n 'host': address,\n 'port': port,\n 'family': family,\n 'proto': 0,\n 'flags': socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n })\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}]} | 1,683 | 458 |
gh_patches_debug_4277 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1086 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
send_photo file from disk doesn't seem to work on python2
### Steps to reproduce
1. https://github.com/python-telegram-bot/python-telegram-bot/wiki/Code-snippets#post-an-image-file-from-disk
2. I'm using the API and I'm getting this error:
```
'ascii' codec can't decode byte 0x89 in position 0: ordinal not in range(128)
2018-04-24 09:49:59,039 - telegram.ext.dispatcher - ERROR - An uncaught error was raised while processing the update
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\telegram\ext\dispatcher.py", line 279, in process_update
handler.handle_update(update, self)
File "C:\Python27\lib\site-packages\telegram\ext\commandhandler.py", line 173, in handle_update
return self.callback(dispatcher.bot, update, **optional_args)
File "bot_status.py", line 101, in graph_progress
bot.send_photo(chat_id, open(photo, 'rb'))
File "C:\Python27\lib\site-packages\telegram\bot.py", line 60, in decorator
result = func(self, *args, **kwargs)
File "C:\Python27\lib\site-packages\telegram\bot.py", line 85, in decorator
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "C:\Python27\lib\site-packages\telegram\utils\request.py", line 270, in post
'POST', url, body=data.to_form(), headers=data.headers, **urlopen_kwargs)
File "C:\Python27\lib\site-packages\telegram\files\inputfile.py", line 127, in to_form
return self._parse(form)
File "C:\Python27\lib\site-packages\telegram\files\inputfile.py", line 141, in _parse
return '\r\n'.join(form)
UnicodeDecodeError: 'ascii' codec can't decode byte 0x89 in position 0: ordinal not in range(128)
```
3.
### Expected behaviour
I was supposed to get an image
### Actual behaviour
The bot raised an exception
I've tested the same code on python3 and it works correctly, it seems to be a python2 only issue.
In the _parse function it seems that element form[5] is unicode which forces python to treat everything as unicode and PNG is not a valid utf8 data.
### Configuration
Windows 10 x64 1803
**Version of Python, python-telegram-bot & dependencies:**
``$ python -m telegram``
```
python-telegram-bot 10.0.2
certifi 2018.04.16
future 0.16.0
Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:25:58) [MSC v.1500 64 bit (AMD64)]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/files/inputfile.py`
Content:
```
1 #!/usr/bin/env python
2 # pylint: disable=W0622,E0611
3 #
4 # A library that provides a Python interface to the Telegram Bot API
5 # Copyright (C) 2015-2018
6 # Leandro Toledo de Souza <[email protected]>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Lesser Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Lesser Public License for more details.
17 #
18 # You should have received a copy of the GNU Lesser Public License
19 # along with this program. If not, see [http://www.gnu.org/licenses/].
20 """This module contains an object that represents a Telegram InputFile."""
21
22 try:
23 # python 3
24 from email.generator import _make_boundary as choose_boundary
25 except ImportError:
26 # python 2
27 from mimetools import choose_boundary
28
29 import imghdr
30 import mimetypes
31 import os
32 import sys
33
34 from telegram import TelegramError
35
36 DEFAULT_MIME_TYPE = 'application/octet-stream'
37 USER_AGENT = 'Python Telegram Bot (https://github.com/python-telegram-bot/python-telegram-bot)'
38 FILE_TYPES = ('audio', 'document', 'photo', 'sticker', 'video', 'voice', 'certificate',
39 'video_note', 'png_sticker')
40
41
42 class InputFile(object):
43 """This object represents a Telegram InputFile.
44
45 Attributes:
46 data (:obj:`dict`): Data containing an inputfile.
47
48 Args:
49 data (:obj:`dict`): Data containing an inputfile.
50
51 Raises:
52 TelegramError
53
54 """
55
56 def __init__(self, data):
57 self.data = data
58 self.boundary = choose_boundary()
59
60 for t in FILE_TYPES:
61 if t in data:
62 self.input_name = t
63 self.input_file = data.pop(t)
64 break
65 else:
66 raise TelegramError('Unknown inputfile type')
67
68 if hasattr(self.input_file, 'read'):
69 self.filename = None
70 self.input_file_content = self.input_file.read()
71 if 'filename' in data:
72 self.filename = self.data.pop('filename')
73 elif hasattr(self.input_file, 'name'):
74 # on py2.7, pylint fails to understand this properly
75 # pylint: disable=E1101
76 self.filename = os.path.basename(self.input_file.name)
77
78 try:
79 self.mimetype = self.is_image(self.input_file_content)
80 if not self.filename or '.' not in self.filename:
81 self.filename = self.mimetype.replace('/', '.')
82 except TelegramError:
83 if self.filename:
84 self.mimetype = mimetypes.guess_type(
85 self.filename)[0] or DEFAULT_MIME_TYPE
86 else:
87 self.mimetype = DEFAULT_MIME_TYPE
88
89 @property
90 def headers(self):
91 """:obj:`dict`: Headers."""
92
93 return {'User-agent': USER_AGENT, 'Content-type': self.content_type}
94
95 @property
96 def content_type(self):
97 """:obj:`str`: Content type"""
98 return 'multipart/form-data; boundary=%s' % self.boundary
99
100 def to_form(self):
101 """Transform the inputfile to multipart/form data.
102
103 Returns:
104 :obj:`str`
105
106 """
107 form = []
108 form_boundary = '--' + self.boundary
109
110 # Add data fields
111 for name in iter(self.data):
112 value = self.data[name]
113 form.extend([
114 form_boundary, 'Content-Disposition: form-data; name="%s"' % name, '', str(value)
115 ])
116
117 # Add input_file to upload
118 form.extend([
119 form_boundary, 'Content-Disposition: form-data; name="%s"; filename="%s"' %
120 (self.input_name,
121 self.filename), 'Content-Type: %s' % self.mimetype, '', self.input_file_content
122 ])
123
124 form.append('--' + self.boundary + '--')
125 form.append('')
126
127 return self._parse(form)
128
129 @staticmethod
130 def _parse(form):
131 if sys.version_info > (3,):
132 # on Python 3 form needs to be byte encoded
133 encoded_form = []
134 for item in form:
135 try:
136 encoded_form.append(item.encode())
137 except AttributeError:
138 encoded_form.append(item)
139
140 return b'\r\n'.join(encoded_form)
141 return '\r\n'.join(form)
142
143 @staticmethod
144 def is_image(stream):
145 """Check if the content file is an image by analyzing its headers.
146
147 Args:
148 stream (:obj:`str`): A str representing the content of a file.
149
150 Returns:
151 :obj:`str`: The str mime-type of an image.
152
153 """
154 image = imghdr.what(None, stream)
155 if image:
156 return 'image/%s' % image
157
158 raise TelegramError('Could not parse file content')
159
160 @staticmethod
161 def is_inputfile(data):
162 """Check if the request is a file request.
163
164 Args:
165 data (Dict[:obj:`str`, :obj:`str`]): A dict of (str, str) key/value pairs.
166
167 Returns:
168 :obj:`bool`
169
170 """
171 if data:
172 file_type = [i for i in iter(data) if i in FILE_TYPES]
173
174 if file_type:
175 file_content = data[file_type[0]]
176
177 return hasattr(file_content, 'read')
178
179 return False
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py
--- a/telegram/files/inputfile.py
+++ b/telegram/files/inputfile.py
@@ -86,6 +86,10 @@
else:
self.mimetype = DEFAULT_MIME_TYPE
+ if sys.version_info < (3,):
+ if isinstance(self.filename, unicode): # flake8: noqa pylint: disable=E0602
+ self.filename = self.filename.encode('utf-8', 'replace')
+
@property
def headers(self):
""":obj:`dict`: Headers."""
| {"golden_diff": "diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py\n--- a/telegram/files/inputfile.py\n+++ b/telegram/files/inputfile.py\n@@ -86,6 +86,10 @@\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n \n+ if sys.version_info < (3,):\n+ if isinstance(self.filename, unicode): # flake8: noqa pylint: disable=E0602\n+ self.filename = self.filename.encode('utf-8', 'replace')\n+\n @property\n def headers(self):\n \"\"\":obj:`dict`: Headers.\"\"\"\n", "issue": "send_photo file from disk doesn't seem to work on python2\n### Steps to reproduce\r\n1. https://github.com/python-telegram-bot/python-telegram-bot/wiki/Code-snippets#post-an-image-file-from-disk\r\n\r\n2. I'm using the API and I'm getting this error:\r\n```\r\n'ascii' codec can't decode byte 0x89 in position 0: ordinal not in range(128)\r\n2018-04-24 09:49:59,039 - telegram.ext.dispatcher - ERROR - An uncaught error was raised while processing the update\r\nTraceback (most recent call last):\r\n File \"C:\\Python27\\lib\\site-packages\\telegram\\ext\\dispatcher.py\", line 279, in process_update\r\n handler.handle_update(update, self)\r\n File \"C:\\Python27\\lib\\site-packages\\telegram\\ext\\commandhandler.py\", line 173, in handle_update\r\n return self.callback(dispatcher.bot, update, **optional_args)\r\n File \"bot_status.py\", line 101, in graph_progress\r\n bot.send_photo(chat_id, open(photo, 'rb'))\r\n File \"C:\\Python27\\lib\\site-packages\\telegram\\bot.py\", line 60, in decorator\r\n result = func(self, *args, **kwargs)\r\n File \"C:\\Python27\\lib\\site-packages\\telegram\\bot.py\", line 85, in decorator\r\n result = self._request.post(url, data, timeout=kwargs.get('timeout'))\r\n File \"C:\\Python27\\lib\\site-packages\\telegram\\utils\\request.py\", line 270, in post\r\n 'POST', url, body=data.to_form(), headers=data.headers, **urlopen_kwargs)\r\n File \"C:\\Python27\\lib\\site-packages\\telegram\\files\\inputfile.py\", line 127, in to_form\r\n return self._parse(form)\r\n File \"C:\\Python27\\lib\\site-packages\\telegram\\files\\inputfile.py\", line 141, in _parse\r\n return '\\r\\n'.join(form)\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0x89 in position 0: ordinal not in range(128)\r\n```\r\n\r\n3.\r\n\r\n### Expected behaviour\r\nI was supposed to get an image\r\n\r\n### Actual behaviour\r\nThe bot raised an exception\r\n\r\nI've tested the same code on python3 and it works correctly, it seems to be a python2 only issue.\r\nIn the _parse function it seems that element form[5] is unicode which forces python to treat everything as unicode and PNG is not a valid utf8 data.\r\n\r\n### Configuration\r\nWindows 10 x64 1803\r\n\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n``$ python -m telegram``\r\n```\r\npython-telegram-bot 10.0.2\r\ncertifi 2018.04.16\r\nfuture 0.16.0\r\nPython 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:25:58) [MSC v.1500 64 bit (AMD64)]\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=W0622,E0611\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\ntry:\n # python 3\n from email.generator import _make_boundary as choose_boundary\nexcept ImportError:\n # python 2\n from mimetools import choose_boundary\n\nimport imghdr\nimport mimetypes\nimport os\nimport sys\n\nfrom telegram import TelegramError\n\nDEFAULT_MIME_TYPE = 'application/octet-stream'\nUSER_AGENT = 'Python Telegram Bot (https://github.com/python-telegram-bot/python-telegram-bot)'\nFILE_TYPES = ('audio', 'document', 'photo', 'sticker', 'video', 'voice', 'certificate',\n 'video_note', 'png_sticker')\n\n\nclass InputFile(object):\n \"\"\"This object represents a Telegram InputFile.\n\n Attributes:\n data (:obj:`dict`): Data containing an inputfile.\n\n Args:\n data (:obj:`dict`): Data containing an inputfile.\n\n Raises:\n TelegramError\n\n \"\"\"\n\n def __init__(self, data):\n self.data = data\n self.boundary = choose_boundary()\n\n for t in FILE_TYPES:\n if t in data:\n self.input_name = t\n self.input_file = data.pop(t)\n break\n else:\n raise TelegramError('Unknown inputfile type')\n\n if hasattr(self.input_file, 'read'):\n self.filename = None\n self.input_file_content = self.input_file.read()\n if 'filename' in data:\n self.filename = self.data.pop('filename')\n elif hasattr(self.input_file, 'name'):\n # on py2.7, pylint fails to understand this properly\n # pylint: disable=E1101\n self.filename = os.path.basename(self.input_file.name)\n\n try:\n self.mimetype = self.is_image(self.input_file_content)\n if not self.filename or '.' not in self.filename:\n self.filename = self.mimetype.replace('/', '.')\n except TelegramError:\n if self.filename:\n self.mimetype = mimetypes.guess_type(\n self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n\n @property\n def headers(self):\n \"\"\":obj:`dict`: Headers.\"\"\"\n\n return {'User-agent': USER_AGENT, 'Content-type': self.content_type}\n\n @property\n def content_type(self):\n \"\"\":obj:`str`: Content type\"\"\"\n return 'multipart/form-data; boundary=%s' % self.boundary\n\n def to_form(self):\n \"\"\"Transform the inputfile to multipart/form data.\n\n Returns:\n :obj:`str`\n\n \"\"\"\n form = []\n form_boundary = '--' + self.boundary\n\n # Add data fields\n for name in iter(self.data):\n value = self.data[name]\n form.extend([\n form_boundary, 'Content-Disposition: form-data; name=\"%s\"' % name, '', str(value)\n ])\n\n # Add input_file to upload\n form.extend([\n form_boundary, 'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\n (self.input_name,\n self.filename), 'Content-Type: %s' % self.mimetype, '', self.input_file_content\n ])\n\n form.append('--' + self.boundary + '--')\n form.append('')\n\n return self._parse(form)\n\n @staticmethod\n def _parse(form):\n if sys.version_info > (3,):\n # on Python 3 form needs to be byte encoded\n encoded_form = []\n for item in form:\n try:\n encoded_form.append(item.encode())\n except AttributeError:\n encoded_form.append(item)\n\n return b'\\r\\n'.join(encoded_form)\n return '\\r\\n'.join(form)\n\n @staticmethod\n def is_image(stream):\n \"\"\"Check if the content file is an image by analyzing its headers.\n\n Args:\n stream (:obj:`str`): A str representing the content of a file.\n\n Returns:\n :obj:`str`: The str mime-type of an image.\n\n \"\"\"\n image = imghdr.what(None, stream)\n if image:\n return 'image/%s' % image\n\n raise TelegramError('Could not parse file content')\n\n @staticmethod\n def is_inputfile(data):\n \"\"\"Check if the request is a file request.\n\n Args:\n data (Dict[:obj:`str`, :obj:`str`]): A dict of (str, str) key/value pairs.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n if data:\n file_type = [i for i in iter(data) if i in FILE_TYPES]\n\n if file_type:\n file_content = data[file_type[0]]\n\n return hasattr(file_content, 'read')\n\n return False\n", "path": "telegram/files/inputfile.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=W0622,E0611\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\ntry:\n # python 3\n from email.generator import _make_boundary as choose_boundary\nexcept ImportError:\n # python 2\n from mimetools import choose_boundary\n\nimport imghdr\nimport mimetypes\nimport os\nimport sys\n\nfrom telegram import TelegramError\n\nDEFAULT_MIME_TYPE = 'application/octet-stream'\nUSER_AGENT = 'Python Telegram Bot (https://github.com/python-telegram-bot/python-telegram-bot)'\nFILE_TYPES = ('audio', 'document', 'photo', 'sticker', 'video', 'voice', 'certificate',\n 'video_note', 'png_sticker')\n\n\nclass InputFile(object):\n \"\"\"This object represents a Telegram InputFile.\n\n Attributes:\n data (:obj:`dict`): Data containing an inputfile.\n\n Args:\n data (:obj:`dict`): Data containing an inputfile.\n\n Raises:\n TelegramError\n\n \"\"\"\n\n def __init__(self, data):\n self.data = data\n self.boundary = choose_boundary()\n\n for t in FILE_TYPES:\n if t in data:\n self.input_name = t\n self.input_file = data.pop(t)\n break\n else:\n raise TelegramError('Unknown inputfile type')\n\n if hasattr(self.input_file, 'read'):\n self.filename = None\n self.input_file_content = self.input_file.read()\n if 'filename' in data:\n self.filename = self.data.pop('filename')\n elif hasattr(self.input_file, 'name'):\n # on py2.7, pylint fails to understand this properly\n # pylint: disable=E1101\n self.filename = os.path.basename(self.input_file.name)\n\n try:\n self.mimetype = self.is_image(self.input_file_content)\n if not self.filename or '.' not in self.filename:\n self.filename = self.mimetype.replace('/', '.')\n except TelegramError:\n if self.filename:\n self.mimetype = mimetypes.guess_type(\n self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n\n if sys.version_info < (3,):\n if isinstance(self.filename, unicode): # flake8: noqa pylint: disable=E0602\n self.filename = self.filename.encode('utf-8', 'replace')\n\n @property\n def headers(self):\n \"\"\":obj:`dict`: Headers.\"\"\"\n\n return {'User-agent': USER_AGENT, 'Content-type': self.content_type}\n\n @property\n def content_type(self):\n \"\"\":obj:`str`: Content type\"\"\"\n return 'multipart/form-data; boundary=%s' % self.boundary\n\n def to_form(self):\n \"\"\"Transform the inputfile to multipart/form data.\n\n Returns:\n :obj:`str`\n\n \"\"\"\n form = []\n form_boundary = '--' + self.boundary\n\n # Add data fields\n for name in iter(self.data):\n value = self.data[name]\n form.extend([\n form_boundary, 'Content-Disposition: form-data; name=\"%s\"' % name, '', str(value)\n ])\n\n # Add input_file to upload\n form.extend([\n form_boundary, 'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\n (self.input_name,\n self.filename), 'Content-Type: %s' % self.mimetype, '', self.input_file_content\n ])\n\n form.append('--' + self.boundary + '--')\n form.append('')\n\n return self._parse(form)\n\n @staticmethod\n def _parse(form):\n if sys.version_info > (3,):\n # on Python 3 form needs to be byte encoded\n encoded_form = []\n for item in form:\n try:\n encoded_form.append(item.encode())\n except AttributeError:\n encoded_form.append(item)\n\n return b'\\r\\n'.join(encoded_form)\n return '\\r\\n'.join(form)\n\n @staticmethod\n def is_image(stream):\n \"\"\"Check if the content file is an image by analyzing its headers.\n\n Args:\n stream (:obj:`str`): A str representing the content of a file.\n\n Returns:\n :obj:`str`: The str mime-type of an image.\n\n \"\"\"\n image = imghdr.what(None, stream)\n if image:\n return 'image/%s' % image\n\n raise TelegramError('Could not parse file content')\n\n @staticmethod\n def is_inputfile(data):\n \"\"\"Check if the request is a file request.\n\n Args:\n data (Dict[:obj:`str`, :obj:`str`]): A dict of (str, str) key/value pairs.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n if data:\n file_type = [i for i in iter(data) if i in FILE_TYPES]\n\n if file_type:\n file_content = data[file_type[0]]\n\n return hasattr(file_content, 'read')\n\n return False\n", "path": "telegram/files/inputfile.py"}]} | 2,652 | 134 |
gh_patches_debug_29472 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-353 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Undesirable record grouping behaviours
## Description
Record grouping has a set of behaviours, that are not desirable.
* It considers order_by, which leads to formation of incorrect query on the backend, if we don't group by the sorted column.

* It considers limit and offset. These apply on the grouped result itself, and is unrelated to the record limit & offset.


## Expected behavior
* It should not consider order_by.
* It should not consider limit and offset.
We could also probably have a dedicated API for this. It could also obtain the values for columns, to filter the grouped results. Having it as part of records API makes less sense, since the group count is not a reflection of the record results.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/pagination.py`
Content:
```
1 from collections import OrderedDict
2
3 from rest_framework.pagination import LimitOffsetPagination
4 from rest_framework.response import Response
5
6
7 class DefaultLimitOffsetPagination(LimitOffsetPagination):
8 default_limit = 50
9 max_limit = 500
10
11 def get_paginated_response(self, data):
12 return Response(OrderedDict([
13 ('count', self.count),
14 ('results', data)
15 ]))
16
17
18 class ColumnLimitOffsetPagination(DefaultLimitOffsetPagination):
19
20 def paginate_queryset(self, queryset, request, table_id):
21 self.limit = self.get_limit(request)
22 if self.limit is None:
23 self.limit = self.default_limit
24 self.offset = self.get_offset(request)
25 table = queryset.get(id=table_id)
26 self.count = len(table.sa_columns)
27 self.request = request
28 return list(table.sa_columns)[self.offset:self.offset + self.limit]
29
30
31 class TableLimitOffsetPagination(DefaultLimitOffsetPagination):
32
33 def paginate_queryset(self, queryset, request, table_id,
34 filters=[], order_by=[]):
35 self.limit = self.get_limit(request)
36 if self.limit is None:
37 self.limit = self.default_limit
38 self.offset = self.get_offset(request)
39 # TODO: Cache count value somewhere, since calculating it is expensive.
40 table = queryset.get(id=table_id)
41 self.count = table.sa_num_records(filters=filters)
42 self.request = request
43
44 return table.get_records(
45 self.limit, self.offset, filters=filters, order_by=order_by,
46 )
47
48
49 class TableLimitOffsetGroupPagination(TableLimitOffsetPagination):
50 def get_paginated_response(self, data):
51 return Response(OrderedDict([
52 ('count', self.count),
53 ('group_count', self.group_count),
54 ('results', data)
55 ]))
56
57 def paginate_queryset(self, queryset, request, table_id,
58 filters=[], order_by=[], group_count_by=[]):
59 records = super().paginate_queryset(
60 queryset, request, table_id, filters=filters, order_by=order_by
61 )
62
63 table = queryset.get(id=table_id)
64 if group_count_by:
65 group_count = table.get_group_counts(
66 group_count_by, self.limit, self.offset,
67 filters=filters, order_by=order_by
68 )
69 # Convert the tuple keys into strings so it can be converted to JSON
70 group_count = {','.join(k): v for k, v in group_count.items()}
71 self.group_count = {
72 'group_count_by': group_count_by,
73 'results': group_count,
74 }
75 else:
76 self.group_count = {
77 'group_count_by': None,
78 'results': None,
79 }
80
81 return records
82
```
Path: `db/records.py`
Content:
```
1 import logging
2 from sqlalchemy import delete, select, Column, func
3 from sqlalchemy.inspection import inspect
4 from sqlalchemy_filters import apply_filters, apply_sort
5 from sqlalchemy_filters.exceptions import FieldNotFound
6
7 from db.constants import ID
8
9 logger = logging.getLogger(__name__)
10
11
12 # Grouping exceptions follow the sqlalchemy_filters exceptions patterns
13 class BadGroupFormat(Exception):
14 pass
15
16
17 class GroupFieldNotFound(FieldNotFound):
18 pass
19
20
21 def _get_primary_key_column(table):
22 primary_key_list = list(inspect(table).primary_key)
23 # We do not support getting by composite primary keys
24 assert len(primary_key_list) == 1
25 return primary_key_list[0]
26
27
28 def _create_col_objects(table, column_list):
29 return [
30 table.columns[col] if type(col) == str else col
31 for col in column_list
32 ]
33
34
35 def get_record(table, engine, id_value):
36 primary_key_column = _get_primary_key_column(table)
37 query = select(table).where(primary_key_column == id_value)
38 with engine.begin() as conn:
39 result = conn.execute(query).fetchall()
40 assert len(result) <= 1
41 return result[0] if result else None
42
43
44 def get_records(
45 table, engine, limit=None, offset=None, order_by=[], filters=[],
46 ):
47 """
48 Returns records from a table.
49
50 Args:
51 table: SQLAlchemy table object
52 engine: SQLAlchemy engine object
53 limit: int, gives number of rows to return
54 offset: int, gives number of rows to skip
55 order_by: list of dictionaries, where each dictionary has a 'field' and
56 'direction' field.
57 See: https://github.com/centerofci/sqlalchemy-filters#sort-format
58 filters: list of dictionaries, where each dictionary has a 'field' and 'op'
59 field, in addition to an 'value' field if appropriate.
60 See: https://github.com/centerofci/sqlalchemy-filters#filters-format
61 """
62 query = select(table).limit(limit).offset(offset)
63 if order_by is not None:
64 query = apply_sort(query, order_by)
65 if filters is not None:
66 query = apply_filters(query, filters)
67 with engine.begin() as conn:
68 return conn.execute(query).fetchall()
69
70
71 def get_group_counts(
72 table, engine, group_by, limit=None, offset=None, order_by=[], filters=[],
73 ):
74 """
75 Returns counts by specified groupings
76
77 Args:
78 table: SQLAlchemy table object
79 engine: SQLAlchemy engine object
80 limit: int, gives number of rows to return
81 offset: int, gives number of rows to skip
82 group_by: list or tuple of column names or column objects to group by
83 order_by: list of dictionaries, where each dictionary has a 'field' and
84 'direction' field.
85 See: https://github.com/centerofci/sqlalchemy-filters#sort-format
86 filters: list of dictionaries, where each dictionary has a 'field' and 'op'
87 field, in addition to an 'value' field if appropriate.
88 See: https://github.com/centerofci/sqlalchemy-filters#filters-format
89 """
90 if type(group_by) not in (tuple, list):
91 raise BadGroupFormat(f"Group spec {group_by} must be list or tuple.")
92 for field in group_by:
93 if type(field) not in (str, Column):
94 raise BadGroupFormat(f"Group field {field} must be a string or Column.")
95 field_name = field if type(field) == str else field.name
96 if field_name not in table.c:
97 raise GroupFieldNotFound(f"Group field {field} not found in {table}.")
98
99 group_by = _create_col_objects(table, group_by)
100 query = (
101 select(*group_by, func.count(table.c[ID]))
102 .group_by(*group_by)
103 .limit(limit)
104 .offset(offset)
105 )
106 if order_by is not None:
107 query = apply_sort(query, order_by)
108 if filters is not None:
109 query = apply_filters(query, filters)
110 with engine.begin() as conn:
111 records = conn.execute(query).fetchall()
112
113 # Last field is the count, preceding fields are the group by fields
114 counts = {
115 (*record[:-1],): record[-1]
116 for record in records
117 }
118 return counts
119
120
121 def get_distinct_tuple_values(
122 column_list, engine, table=None, limit=None, offset=None,
123 ):
124 """
125 Returns distinct tuples from a given list of columns.
126
127 Args:
128 column_list: list of column names or SQLAlchemy column objects
129 engine: SQLAlchemy engine object
130 table: SQLAlchemy table object
131 limit: int, gives number of rows to return
132 offset: int, gives number of rows to skip
133
134 If no table is given, the column_list must consist entirely of
135 SQLAlchemy column objects associated with a table.
136 """
137 if table is not None:
138 column_objects = _create_col_objects(table, column_list)
139 else:
140 column_objects = column_list
141 try:
142 assert all([type(col) == Column for col in column_objects])
143 except AssertionError as e:
144 logger.error("All columns must be str or sqlalchemy.Column type")
145 raise e
146
147 query = (
148 select(*column_objects)
149 .distinct()
150 .limit(limit)
151 .offset(offset)
152 )
153 with engine.begin() as conn:
154 res = conn.execute(query).fetchall()
155 return [tuple(zip(column_objects, row)) for row in res]
156
157
158 def distinct_tuples_to_filter(distinct_tuples):
159 filters = []
160 for col, value in distinct_tuples:
161 filters.append({
162 "field": col,
163 "op": "==",
164 "value": value,
165 })
166 return filters
167
168
169 def create_record_or_records(table, engine, record_data):
170 """
171 record_data can be a dictionary, tuple, or list of dictionaries or tuples.
172 if record_data is a list, it creates multiple records.
173 """
174 id_value = None
175 with engine.begin() as connection:
176 result = connection.execute(table.insert(), record_data)
177 # If there was only a single record created, return the record.
178 if result.rowcount == 1:
179 # We need to manually commit insertion so that we can retrieve the record.
180 connection.commit()
181 id_value = result.inserted_primary_key[0]
182 if id_value is not None:
183 return get_record(table, engine, id_value)
184 # Do not return any records if multiple rows were added.
185 return None
186
187
188 def create_records_from_csv(table, engine, csv_filename, column_names, delimiter=None,
189 escape=None, quote=None):
190 with open(csv_filename, 'rb') as csv_file:
191 with engine.begin() as conn:
192 cursor = conn.connection.cursor()
193 relation = '.'.join('"{}"'.format(part) for part in (table.schema, table.name))
194 formatted_columns = '({})'.format(','.join([f'"{column_name}"' for column_name in column_names]))
195
196 copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER'
197 if delimiter:
198 copy_sql += f" DELIMITER E'{delimiter}'"
199 if escape:
200 copy_sql += f" ESCAPE '{escape}'"
201 if quote:
202 if quote == "'":
203 quote = "''"
204 copy_sql += f" QUOTE '{quote}'"
205
206 cursor.copy_expert(copy_sql, csv_file)
207
208
209 def update_record(table, engine, id_value, record_data):
210 primary_key_column = _get_primary_key_column(table)
211 with engine.begin() as connection:
212 connection.execute(
213 table.update().where(primary_key_column == id_value).values(record_data)
214 )
215 return get_record(table, engine, id_value)
216
217
218 def delete_record(table, engine, id_value):
219 primary_key_column = _get_primary_key_column(table)
220 query = delete(table).where(primary_key_column == id_value)
221 with engine.begin() as conn:
222 return conn.execute(query)
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/records.py b/db/records.py
--- a/db/records.py
+++ b/db/records.py
@@ -96,10 +96,8 @@
if field_name not in table.c:
raise GroupFieldNotFound(f"Group field {field} not found in {table}.")
- group_by = _create_col_objects(table, group_by)
query = (
- select(*group_by, func.count(table.c[ID]))
- .group_by(*group_by)
+ select(table)
.limit(limit)
.offset(offset)
)
@@ -107,6 +105,13 @@
query = apply_sort(query, order_by)
if filters is not None:
query = apply_filters(query, filters)
+ subquery = query.subquery()
+
+ group_by = [
+ subquery.columns[col] if type(col) == str else subquery.columns[col.name]
+ for col in group_by
+ ]
+ query = select(*group_by, func.count(subquery.c[ID])).group_by(*group_by)
with engine.begin() as conn:
records = conn.execute(query).fetchall()
diff --git a/mathesar/pagination.py b/mathesar/pagination.py
--- a/mathesar/pagination.py
+++ b/mathesar/pagination.py
@@ -67,7 +67,8 @@
filters=filters, order_by=order_by
)
# Convert the tuple keys into strings so it can be converted to JSON
- group_count = {','.join(k): v for k, v in group_count.items()}
+ group_count = [{"values": list(cols), "count": count}
+ for cols, count in group_count.items()]
self.group_count = {
'group_count_by': group_count_by,
'results': group_count,
| {"golden_diff": "diff --git a/db/records.py b/db/records.py\n--- a/db/records.py\n+++ b/db/records.py\n@@ -96,10 +96,8 @@\n if field_name not in table.c:\n raise GroupFieldNotFound(f\"Group field {field} not found in {table}.\")\n \n- group_by = _create_col_objects(table, group_by)\n query = (\n- select(*group_by, func.count(table.c[ID]))\n- .group_by(*group_by)\n+ select(table)\n .limit(limit)\n .offset(offset)\n )\n@@ -107,6 +105,13 @@\n query = apply_sort(query, order_by)\n if filters is not None:\n query = apply_filters(query, filters)\n+ subquery = query.subquery()\n+\n+ group_by = [\n+ subquery.columns[col] if type(col) == str else subquery.columns[col.name]\n+ for col in group_by\n+ ]\n+ query = select(*group_by, func.count(subquery.c[ID])).group_by(*group_by)\n with engine.begin() as conn:\n records = conn.execute(query).fetchall()\n \ndiff --git a/mathesar/pagination.py b/mathesar/pagination.py\n--- a/mathesar/pagination.py\n+++ b/mathesar/pagination.py\n@@ -67,7 +67,8 @@\n filters=filters, order_by=order_by\n )\n # Convert the tuple keys into strings so it can be converted to JSON\n- group_count = {','.join(k): v for k, v in group_count.items()}\n+ group_count = [{\"values\": list(cols), \"count\": count}\n+ for cols, count in group_count.items()]\n self.group_count = {\n 'group_count_by': group_count_by,\n 'results': group_count,\n", "issue": "Undesirable record grouping behaviours\n## Description\r\nRecord grouping has a set of behaviours, that are not desirable.\r\n* It considers order_by, which leads to formation of incorrect query on the backend, if we don't group by the sorted column.\r\n\r\n\r\n* It considers limit and offset. These apply on the grouped result itself, and is unrelated to the record limit & offset.\r\n\r\n\r\n\r\n\r\n## Expected behavior\r\n* It should not consider order_by.\r\n* It should not consider limit and offset.\r\n\r\nWe could also probably have a dedicated API for this. It could also obtain the values for columns, to filter the grouped results. Having it as part of records API makes less sense, since the group count is not a reflection of the record results.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.response import Response\n\n\nclass DefaultLimitOffsetPagination(LimitOffsetPagination):\n default_limit = 50\n max_limit = 500\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('results', data)\n ]))\n\n\nclass ColumnLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n table = queryset.get(id=table_id)\n self.count = len(table.sa_columns)\n self.request = request\n return list(table.sa_columns)[self.offset:self.offset + self.limit]\n\n\nclass TableLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[]):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n # TODO: Cache count value somewhere, since calculating it is expensive.\n table = queryset.get(id=table_id)\n self.count = table.sa_num_records(filters=filters)\n self.request = request\n\n return table.get_records(\n self.limit, self.offset, filters=filters, order_by=order_by,\n )\n\n\nclass TableLimitOffsetGroupPagination(TableLimitOffsetPagination):\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('group_count', self.group_count),\n ('results', data)\n ]))\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[], group_count_by=[]):\n records = super().paginate_queryset(\n queryset, request, table_id, filters=filters, order_by=order_by\n )\n\n table = queryset.get(id=table_id)\n if group_count_by:\n group_count = table.get_group_counts(\n group_count_by, self.limit, self.offset,\n filters=filters, order_by=order_by\n )\n # Convert the tuple keys into strings so it can be converted to JSON\n group_count = {','.join(k): v for k, v in group_count.items()}\n self.group_count = {\n 'group_count_by': group_count_by,\n 'results': group_count,\n }\n else:\n self.group_count = {\n 'group_count_by': None,\n 'results': None,\n }\n\n return records\n", "path": "mathesar/pagination.py"}, {"content": "import logging\nfrom sqlalchemy import delete, select, Column, func\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy_filters import apply_filters, apply_sort\nfrom sqlalchemy_filters.exceptions import FieldNotFound\n\nfrom db.constants import ID\n\nlogger = logging.getLogger(__name__)\n\n\n# Grouping exceptions follow the sqlalchemy_filters exceptions patterns\nclass BadGroupFormat(Exception):\n pass\n\n\nclass GroupFieldNotFound(FieldNotFound):\n pass\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef _create_col_objects(table, column_list):\n return [\n table.columns[col] if type(col) == str else col\n for col in column_list\n ]\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n result = conn.execute(query).fetchall()\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(\n table, engine, limit=None, offset=None, order_by=[], filters=[],\n):\n \"\"\"\n Returns records from a table.\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n order_by: list of dictionaries, where each dictionary has a 'field' and\n 'direction' field.\n See: https://github.com/centerofci/sqlalchemy-filters#sort-format\n filters: list of dictionaries, where each dictionary has a 'field' and 'op'\n field, in addition to an 'value' field if appropriate.\n See: https://github.com/centerofci/sqlalchemy-filters#filters-format\n \"\"\"\n query = select(table).limit(limit).offset(offset)\n if order_by is not None:\n query = apply_sort(query, order_by)\n if filters is not None:\n query = apply_filters(query, filters)\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n\n\ndef get_group_counts(\n table, engine, group_by, limit=None, offset=None, order_by=[], filters=[],\n):\n \"\"\"\n Returns counts by specified groupings\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n group_by: list or tuple of column names or column objects to group by\n order_by: list of dictionaries, where each dictionary has a 'field' and\n 'direction' field.\n See: https://github.com/centerofci/sqlalchemy-filters#sort-format\n filters: list of dictionaries, where each dictionary has a 'field' and 'op'\n field, in addition to an 'value' field if appropriate.\n See: https://github.com/centerofci/sqlalchemy-filters#filters-format\n \"\"\"\n if type(group_by) not in (tuple, list):\n raise BadGroupFormat(f\"Group spec {group_by} must be list or tuple.\")\n for field in group_by:\n if type(field) not in (str, Column):\n raise BadGroupFormat(f\"Group field {field} must be a string or Column.\")\n field_name = field if type(field) == str else field.name\n if field_name not in table.c:\n raise GroupFieldNotFound(f\"Group field {field} not found in {table}.\")\n\n group_by = _create_col_objects(table, group_by)\n query = (\n select(*group_by, func.count(table.c[ID]))\n .group_by(*group_by)\n .limit(limit)\n .offset(offset)\n )\n if order_by is not None:\n query = apply_sort(query, order_by)\n if filters is not None:\n query = apply_filters(query, filters)\n with engine.begin() as conn:\n records = conn.execute(query).fetchall()\n\n # Last field is the count, preceding fields are the group by fields\n counts = {\n (*record[:-1],): record[-1]\n for record in records\n }\n return counts\n\n\ndef get_distinct_tuple_values(\n column_list, engine, table=None, limit=None, offset=None,\n):\n \"\"\"\n Returns distinct tuples from a given list of columns.\n\n Args:\n column_list: list of column names or SQLAlchemy column objects\n engine: SQLAlchemy engine object\n table: SQLAlchemy table object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n\n If no table is given, the column_list must consist entirely of\n SQLAlchemy column objects associated with a table.\n \"\"\"\n if table is not None:\n column_objects = _create_col_objects(table, column_list)\n else:\n column_objects = column_list\n try:\n assert all([type(col) == Column for col in column_objects])\n except AssertionError as e:\n logger.error(\"All columns must be str or sqlalchemy.Column type\")\n raise e\n\n query = (\n select(*column_objects)\n .distinct()\n .limit(limit)\n .offset(offset)\n )\n with engine.begin() as conn:\n res = conn.execute(query).fetchall()\n return [tuple(zip(column_objects, row)) for row in res]\n\n\ndef distinct_tuples_to_filter(distinct_tuples):\n filters = []\n for col, value in distinct_tuples:\n filters.append({\n \"field\": col,\n \"op\": \"==\",\n \"value\": value,\n })\n return filters\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef create_records_from_csv(table, engine, csv_filename, column_names, delimiter=None,\n escape=None, quote=None):\n with open(csv_filename, 'rb') as csv_file:\n with engine.begin() as conn:\n cursor = conn.connection.cursor()\n relation = '.'.join('\"{}\"'.format(part) for part in (table.schema, table.name))\n formatted_columns = '({})'.format(','.join([f'\"{column_name}\"' for column_name in column_names]))\n\n copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER'\n if delimiter:\n copy_sql += f\" DELIMITER E'{delimiter}'\"\n if escape:\n copy_sql += f\" ESCAPE '{escape}'\"\n if quote:\n if quote == \"'\":\n quote = \"''\"\n copy_sql += f\" QUOTE '{quote}'\"\n\n cursor.copy_expert(copy_sql, csv_file)\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.response import Response\n\n\nclass DefaultLimitOffsetPagination(LimitOffsetPagination):\n default_limit = 50\n max_limit = 500\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('results', data)\n ]))\n\n\nclass ColumnLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n table = queryset.get(id=table_id)\n self.count = len(table.sa_columns)\n self.request = request\n return list(table.sa_columns)[self.offset:self.offset + self.limit]\n\n\nclass TableLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[]):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n # TODO: Cache count value somewhere, since calculating it is expensive.\n table = queryset.get(id=table_id)\n self.count = table.sa_num_records(filters=filters)\n self.request = request\n\n return table.get_records(\n self.limit, self.offset, filters=filters, order_by=order_by,\n )\n\n\nclass TableLimitOffsetGroupPagination(TableLimitOffsetPagination):\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('group_count', self.group_count),\n ('results', data)\n ]))\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[], group_count_by=[]):\n records = super().paginate_queryset(\n queryset, request, table_id, filters=filters, order_by=order_by\n )\n\n table = queryset.get(id=table_id)\n if group_count_by:\n group_count = table.get_group_counts(\n group_count_by, self.limit, self.offset,\n filters=filters, order_by=order_by\n )\n # Convert the tuple keys into strings so it can be converted to JSON\n group_count = [{\"values\": list(cols), \"count\": count}\n for cols, count in group_count.items()]\n self.group_count = {\n 'group_count_by': group_count_by,\n 'results': group_count,\n }\n else:\n self.group_count = {\n 'group_count_by': None,\n 'results': None,\n }\n\n return records\n", "path": "mathesar/pagination.py"}, {"content": "import logging\nfrom sqlalchemy import delete, select, Column, func\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy_filters import apply_filters, apply_sort\nfrom sqlalchemy_filters.exceptions import FieldNotFound\n\nfrom db.constants import ID\n\nlogger = logging.getLogger(__name__)\n\n\n# Grouping exceptions follow the sqlalchemy_filters exceptions patterns\nclass BadGroupFormat(Exception):\n pass\n\n\nclass GroupFieldNotFound(FieldNotFound):\n pass\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef _create_col_objects(table, column_list):\n return [\n table.columns[col] if type(col) == str else col\n for col in column_list\n ]\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n result = conn.execute(query).fetchall()\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(\n table, engine, limit=None, offset=None, order_by=[], filters=[],\n):\n \"\"\"\n Returns records from a table.\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n order_by: list of dictionaries, where each dictionary has a 'field' and\n 'direction' field.\n See: https://github.com/centerofci/sqlalchemy-filters#sort-format\n filters: list of dictionaries, where each dictionary has a 'field' and 'op'\n field, in addition to an 'value' field if appropriate.\n See: https://github.com/centerofci/sqlalchemy-filters#filters-format\n \"\"\"\n query = select(table).limit(limit).offset(offset)\n if order_by is not None:\n query = apply_sort(query, order_by)\n if filters is not None:\n query = apply_filters(query, filters)\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n\n\ndef get_group_counts(\n table, engine, group_by, limit=None, offset=None, order_by=[], filters=[],\n):\n \"\"\"\n Returns counts by specified groupings\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n group_by: list or tuple of column names or column objects to group by\n order_by: list of dictionaries, where each dictionary has a 'field' and\n 'direction' field.\n See: https://github.com/centerofci/sqlalchemy-filters#sort-format\n filters: list of dictionaries, where each dictionary has a 'field' and 'op'\n field, in addition to an 'value' field if appropriate.\n See: https://github.com/centerofci/sqlalchemy-filters#filters-format\n \"\"\"\n if type(group_by) not in (tuple, list):\n raise BadGroupFormat(f\"Group spec {group_by} must be list or tuple.\")\n for field in group_by:\n if type(field) not in (str, Column):\n raise BadGroupFormat(f\"Group field {field} must be a string or Column.\")\n field_name = field if type(field) == str else field.name\n if field_name not in table.c:\n raise GroupFieldNotFound(f\"Group field {field} not found in {table}.\")\n\n query = (\n select(table)\n .limit(limit)\n .offset(offset)\n )\n if order_by is not None:\n query = apply_sort(query, order_by)\n if filters is not None:\n query = apply_filters(query, filters)\n subquery = query.subquery()\n\n group_by = [\n subquery.columns[col] if type(col) == str else subquery.columns[col.name]\n for col in group_by\n ]\n query = select(*group_by, func.count(subquery.c[ID])).group_by(*group_by)\n with engine.begin() as conn:\n records = conn.execute(query).fetchall()\n\n # Last field is the count, preceding fields are the group by fields\n counts = {\n (*record[:-1],): record[-1]\n for record in records\n }\n return counts\n\n\ndef get_distinct_tuple_values(\n column_list, engine, table=None, limit=None, offset=None,\n):\n \"\"\"\n Returns distinct tuples from a given list of columns.\n\n Args:\n column_list: list of column names or SQLAlchemy column objects\n engine: SQLAlchemy engine object\n table: SQLAlchemy table object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n\n If no table is given, the column_list must consist entirely of\n SQLAlchemy column objects associated with a table.\n \"\"\"\n if table is not None:\n column_objects = _create_col_objects(table, column_list)\n else:\n column_objects = column_list\n try:\n assert all([type(col) == Column for col in column_objects])\n except AssertionError as e:\n logger.error(\"All columns must be str or sqlalchemy.Column type\")\n raise e\n\n query = (\n select(*column_objects)\n .distinct()\n .limit(limit)\n .offset(offset)\n )\n with engine.begin() as conn:\n res = conn.execute(query).fetchall()\n return [tuple(zip(column_objects, row)) for row in res]\n\n\ndef distinct_tuples_to_filter(distinct_tuples):\n filters = []\n for col, value in distinct_tuples:\n filters.append({\n \"field\": col,\n \"op\": \"==\",\n \"value\": value,\n })\n return filters\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef create_records_from_csv(table, engine, csv_filename, column_names, delimiter=None,\n escape=None, quote=None):\n with open(csv_filename, 'rb') as csv_file:\n with engine.begin() as conn:\n cursor = conn.connection.cursor()\n relation = '.'.join('\"{}\"'.format(part) for part in (table.schema, table.name))\n formatted_columns = '({})'.format(','.join([f'\"{column_name}\"' for column_name in column_names]))\n\n copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER'\n if delimiter:\n copy_sql += f\" DELIMITER E'{delimiter}'\"\n if escape:\n copy_sql += f\" ESCAPE '{escape}'\"\n if quote:\n if quote == \"'\":\n quote = \"''\"\n copy_sql += f\" QUOTE '{quote}'\"\n\n cursor.copy_expert(copy_sql, csv_file)\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}]} | 3,701 | 399 |
gh_patches_debug_22504 | rasdani/github-patches | git_diff | wright-group__WrightTools-360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows Tempfile Error
On Windows, tempfiles attempted to be opened using h5py cause errors.
I do not have the error message in front of me at present, but I believe it was a 'file already exists' flavor of problem.
We may need to remove the created tmpfile and just use the name....
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/_base.py`
Content:
```
1 """WrightTools base classes and associated."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import shutil
8 import weakref
9 import tempfile
10 import posixpath
11
12 import numpy as np
13
14 import h5py
15
16
17 # --- define --------------------------------------------------------------------------------------
18
19
20 wt5_version = '0.0.0'
21
22
23 # --- dataset -------------------------------------------------------------------------------------
24
25
26 class Dataset(h5py.Dataset):
27 instances = {}
28
29
30 # --- group ---------------------------------------------------------------------------------------
31
32
33 class Group(h5py.Group):
34 instances = {}
35 class_name = 'Group'
36
37 def __init__(self, filepath=None, parent=None, name=None, **kwargs):
38 if filepath is None:
39 return
40 if parent == '':
41 parent = posixpath.sep
42 # file
43 self.filepath = filepath
44 path = parent + posixpath.sep + name
45 file = h5py.File(self.filepath, 'a')
46 file.require_group(parent)
47 file.require_group(path)
48 h5py.Group.__init__(self, bind=file[path].id)
49 self.__n = 0
50 self.fid = self.file.fid
51 if name is not None:
52 self.attrs['name'] = name
53 self.attrs.update(kwargs)
54 self.attrs['class'] = self.class_name
55 # load from file
56 self._items = []
57 for name in self.item_names:
58 self._items.append(self[name])
59 setattr(self, name, self[name])
60 # kwargs
61 self.attrs.update(kwargs)
62 # the following are populated if not already recorded
63 self.__version__
64 self.natural_name
65
66 def __new__(cls, *args, **kwargs):
67 # extract
68 filepath = args[0] if len(args) > 0 else kwargs.get('filepath', None)
69 parent = args[1] if len(args) > 1 else kwargs.get('parent', None)
70 name = args[2] if len(args) > 2 else kwargs.get('name', cls.class_name.lower())
71 edit_local = args[3] if len(args) > 3 else kwargs.get('edit_local', False)
72 # tempfile
73 tmpfile = None
74 if edit_local and filepath is None:
75 raise Exception # TODO: better exception
76 if not edit_local:
77 tmpfile = tempfile.NamedTemporaryFile(prefix='', suffix='.wt5')
78 p = tmpfile.name
79 if filepath:
80 shutil.copyfile(src=filepath, dst=p)
81 elif edit_local and filepath:
82 p = filepath
83 # construct fullpath
84 if parent is None:
85 parent = ''
86 name = '/'
87 fullpath = p + '::' + parent + name
88 # create and/or return
89 if fullpath not in cls.instances.keys():
90 kwargs['filepath'] = p
91 kwargs['parent'] = parent
92 kwargs['name'] = name
93 instance = super(Group, cls).__new__(cls)
94 cls.__init__(instance, **kwargs)
95 cls.instances[fullpath] = instance
96 if tmpfile:
97 setattr(instance, '_tmpfile', tmpfile)
98 weakref.finalize(instance, instance.close)
99 return instance
100 instance = cls.instances[fullpath]
101 return instance
102
103 @property
104 def __version__(self):
105 if '__version__' not in self.file.attrs.keys():
106 self.file.attrs['__version__'] = wt5_version
107 return self.file.attrs['__version__']
108
109 @property
110 def fullpath(self):
111 return self.filepath + '::' + self.name
112
113 @property
114 def item_names(self):
115 if 'item_names' not in self.attrs.keys():
116 self.attrs['item_names'] = np.array([], dtype='S')
117 return self.attrs['item_names']
118
119 @property
120 def natural_name(self):
121 if 'name' not in self.attrs.keys():
122 self.attrs['name'] = self.__class__.default_name
123 return self.attrs['name']
124
125 @property
126 def parent(self):
127 from .collection import Collection
128 group = super().parent
129 parent = group.parent.name
130 if parent == posixpath.sep:
131 parent = None
132 return Collection(self.filepath, parent=parent, name=group.attrs['name'])
133
134 def close(self):
135 if(self.fid.valid > 0):
136 self.__class__.instances.pop(self.fullpath)
137 self.file.flush()
138 self.file.close()
139 if hasattr(self, '_tmpfile'):
140 self._tmpfile.close()
141
142 def flush(self):
143 self.file.flush()
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/_base.py b/WrightTools/_base.py
--- a/WrightTools/_base.py
+++ b/WrightTools/_base.py
@@ -5,6 +5,7 @@
import shutil
+import os
import weakref
import tempfile
import posixpath
@@ -74,8 +75,8 @@
if edit_local and filepath is None:
raise Exception # TODO: better exception
if not edit_local:
- tmpfile = tempfile.NamedTemporaryFile(prefix='', suffix='.wt5')
- p = tmpfile.name
+ tmpfile = tempfile.mkstemp(prefix='', suffix='.wt5')
+ p = tmpfile[1]
if filepath:
shutil.copyfile(src=filepath, dst=p)
elif edit_local and filepath:
@@ -137,7 +138,8 @@
self.file.flush()
self.file.close()
if hasattr(self, '_tmpfile'):
- self._tmpfile.close()
+ os.close(self._tmpfile[0])
+ os.remove(self._tmpfile[1])
def flush(self):
self.file.flush()
| {"golden_diff": "diff --git a/WrightTools/_base.py b/WrightTools/_base.py\n--- a/WrightTools/_base.py\n+++ b/WrightTools/_base.py\n@@ -5,6 +5,7 @@\n \n \n import shutil\n+import os\n import weakref\n import tempfile\n import posixpath\n@@ -74,8 +75,8 @@\n if edit_local and filepath is None:\n raise Exception # TODO: better exception\n if not edit_local:\n- tmpfile = tempfile.NamedTemporaryFile(prefix='', suffix='.wt5')\n- p = tmpfile.name\n+ tmpfile = tempfile.mkstemp(prefix='', suffix='.wt5')\n+ p = tmpfile[1]\n if filepath:\n shutil.copyfile(src=filepath, dst=p)\n elif edit_local and filepath:\n@@ -137,7 +138,8 @@\n self.file.flush()\n self.file.close()\n if hasattr(self, '_tmpfile'):\n- self._tmpfile.close()\n+ os.close(self._tmpfile[0])\n+ os.remove(self._tmpfile[1])\n \n def flush(self):\n self.file.flush()\n", "issue": "Windows Tempfile Error\nOn Windows, tempfiles attempted to be opened using h5py cause errors.\r\n\r\nI do not have the error message in front of me at present, but I believe it was a 'file already exists' flavor of problem. \r\n\r\nWe may need to remove the created tmpfile and just use the name....\n", "before_files": [{"content": "\"\"\"WrightTools base classes and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport shutil\nimport weakref\nimport tempfile\nimport posixpath\n\nimport numpy as np\n\nimport h5py\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\nwt5_version = '0.0.0'\n\n\n# --- dataset -------------------------------------------------------------------------------------\n\n\nclass Dataset(h5py.Dataset):\n instances = {}\n\n\n# --- group ---------------------------------------------------------------------------------------\n\n\nclass Group(h5py.Group):\n instances = {}\n class_name = 'Group'\n\n def __init__(self, filepath=None, parent=None, name=None, **kwargs):\n if filepath is None:\n return\n if parent == '':\n parent = posixpath.sep\n # file\n self.filepath = filepath\n path = parent + posixpath.sep + name\n file = h5py.File(self.filepath, 'a')\n file.require_group(parent)\n file.require_group(path)\n h5py.Group.__init__(self, bind=file[path].id)\n self.__n = 0\n self.fid = self.file.fid\n if name is not None:\n self.attrs['name'] = name\n self.attrs.update(kwargs)\n self.attrs['class'] = self.class_name\n # load from file\n self._items = []\n for name in self.item_names:\n self._items.append(self[name])\n setattr(self, name, self[name])\n # kwargs\n self.attrs.update(kwargs)\n # the following are populated if not already recorded\n self.__version__\n self.natural_name\n\n def __new__(cls, *args, **kwargs):\n # extract\n filepath = args[0] if len(args) > 0 else kwargs.get('filepath', None)\n parent = args[1] if len(args) > 1 else kwargs.get('parent', None)\n name = args[2] if len(args) > 2 else kwargs.get('name', cls.class_name.lower())\n edit_local = args[3] if len(args) > 3 else kwargs.get('edit_local', False)\n # tempfile\n tmpfile = None\n if edit_local and filepath is None:\n raise Exception # TODO: better exception\n if not edit_local:\n tmpfile = tempfile.NamedTemporaryFile(prefix='', suffix='.wt5')\n p = tmpfile.name\n if filepath:\n shutil.copyfile(src=filepath, dst=p)\n elif edit_local and filepath:\n p = filepath\n # construct fullpath\n if parent is None:\n parent = ''\n name = '/'\n fullpath = p + '::' + parent + name\n # create and/or return\n if fullpath not in cls.instances.keys():\n kwargs['filepath'] = p\n kwargs['parent'] = parent\n kwargs['name'] = name\n instance = super(Group, cls).__new__(cls)\n cls.__init__(instance, **kwargs)\n cls.instances[fullpath] = instance\n if tmpfile:\n setattr(instance, '_tmpfile', tmpfile)\n weakref.finalize(instance, instance.close)\n return instance\n instance = cls.instances[fullpath]\n return instance\n\n @property\n def __version__(self):\n if '__version__' not in self.file.attrs.keys():\n self.file.attrs['__version__'] = wt5_version\n return self.file.attrs['__version__']\n\n @property\n def fullpath(self):\n return self.filepath + '::' + self.name\n\n @property\n def item_names(self):\n if 'item_names' not in self.attrs.keys():\n self.attrs['item_names'] = np.array([], dtype='S')\n return self.attrs['item_names']\n\n @property\n def natural_name(self):\n if 'name' not in self.attrs.keys():\n self.attrs['name'] = self.__class__.default_name\n return self.attrs['name']\n\n @property\n def parent(self):\n from .collection import Collection\n group = super().parent\n parent = group.parent.name\n if parent == posixpath.sep:\n parent = None\n return Collection(self.filepath, parent=parent, name=group.attrs['name'])\n\n def close(self):\n if(self.fid.valid > 0):\n self.__class__.instances.pop(self.fullpath)\n self.file.flush()\n self.file.close()\n if hasattr(self, '_tmpfile'):\n self._tmpfile.close()\n\n def flush(self):\n self.file.flush()\n", "path": "WrightTools/_base.py"}], "after_files": [{"content": "\"\"\"WrightTools base classes and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport shutil\nimport os\nimport weakref\nimport tempfile\nimport posixpath\n\nimport numpy as np\n\nimport h5py\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\nwt5_version = '0.0.0'\n\n\n# --- dataset -------------------------------------------------------------------------------------\n\n\nclass Dataset(h5py.Dataset):\n instances = {}\n\n\n# --- group ---------------------------------------------------------------------------------------\n\n\nclass Group(h5py.Group):\n instances = {}\n class_name = 'Group'\n\n def __init__(self, filepath=None, parent=None, name=None, **kwargs):\n if filepath is None:\n return\n if parent == '':\n parent = posixpath.sep\n # file\n self.filepath = filepath\n path = parent + posixpath.sep + name\n file = h5py.File(self.filepath, 'a')\n file.require_group(parent)\n file.require_group(path)\n h5py.Group.__init__(self, bind=file[path].id)\n self.__n = 0\n self.fid = self.file.fid\n if name is not None:\n self.attrs['name'] = name\n self.attrs.update(kwargs)\n self.attrs['class'] = self.class_name\n # load from file\n self._items = []\n for name in self.item_names:\n self._items.append(self[name])\n setattr(self, name, self[name])\n # kwargs\n self.attrs.update(kwargs)\n # the following are populated if not already recorded\n self.__version__\n self.natural_name\n\n def __new__(cls, *args, **kwargs):\n # extract\n filepath = args[0] if len(args) > 0 else kwargs.get('filepath', None)\n parent = args[1] if len(args) > 1 else kwargs.get('parent', None)\n name = args[2] if len(args) > 2 else kwargs.get('name', cls.class_name.lower())\n edit_local = args[3] if len(args) > 3 else kwargs.get('edit_local', False)\n # tempfile\n tmpfile = None\n if edit_local and filepath is None:\n raise Exception # TODO: better exception\n if not edit_local:\n tmpfile = tempfile.mkstemp(prefix='', suffix='.wt5')\n p = tmpfile[1]\n if filepath:\n shutil.copyfile(src=filepath, dst=p)\n elif edit_local and filepath:\n p = filepath\n # construct fullpath\n if parent is None:\n parent = ''\n name = '/'\n fullpath = p + '::' + parent + name\n # create and/or return\n if fullpath not in cls.instances.keys():\n kwargs['filepath'] = p\n kwargs['parent'] = parent\n kwargs['name'] = name\n instance = super(Group, cls).__new__(cls)\n cls.__init__(instance, **kwargs)\n cls.instances[fullpath] = instance\n if tmpfile:\n setattr(instance, '_tmpfile', tmpfile)\n weakref.finalize(instance, instance.close)\n return instance\n instance = cls.instances[fullpath]\n return instance\n\n @property\n def __version__(self):\n if '__version__' not in self.file.attrs.keys():\n self.file.attrs['__version__'] = wt5_version\n return self.file.attrs['__version__']\n\n @property\n def fullpath(self):\n return self.filepath + '::' + self.name\n\n @property\n def item_names(self):\n if 'item_names' not in self.attrs.keys():\n self.attrs['item_names'] = np.array([], dtype='S')\n return self.attrs['item_names']\n\n @property\n def natural_name(self):\n if 'name' not in self.attrs.keys():\n self.attrs['name'] = self.__class__.default_name\n return self.attrs['name']\n\n @property\n def parent(self):\n from .collection import Collection\n group = super().parent\n parent = group.parent.name\n if parent == posixpath.sep:\n parent = None\n return Collection(self.filepath, parent=parent, name=group.attrs['name'])\n\n def close(self):\n if(self.fid.valid > 0):\n self.__class__.instances.pop(self.fullpath)\n self.file.flush()\n self.file.close()\n if hasattr(self, '_tmpfile'):\n os.close(self._tmpfile[0])\n os.remove(self._tmpfile[1])\n\n def flush(self):\n self.file.flush()\n", "path": "WrightTools/_base.py"}]} | 1,615 | 247 |
gh_patches_debug_25508 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-728 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UploadFile causes ASGI application's exception when it is in debug mode
Apparently when we use the ASGI application in debug mode, it cannot print the value of variables if they are not JSON serializable.
In my use case, when I tried to use the [file upload](https://strawberry.rocks/docs/features/file-upload) example in debug mode this issue ended up happening.
I believe it is because of this:
https://github.com/strawberry-graphql/strawberry/blob/de215370b247a417af8a8dd5fc382d71e305bcd7/strawberry/utils/debug.py#L26-L29
Perhaps converting variables to string might help.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/utils/debug.py`
Content:
```
1 import datetime
2 import json
3 import typing
4
5 from pygments import highlight, lexers
6 from pygments.formatters import Terminal256Formatter
7
8 from .graphql_lexer import GraphQLLexer
9
10
11 def pretty_print_graphql_operation(
12 operation_name: str, query: str, variables: typing.Dict["str", typing.Any]
13 ): # pragma: no cover
14 """Pretty print a GraphQL operation using pygments.
15
16 Won't print introspection operation to prevent noise in the output."""
17
18 if operation_name == "IntrospectionQuery":
19 return
20
21 now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
22
23 print(f"[{now}]: {operation_name or 'No operation name'}")
24 print(highlight(query, GraphQLLexer(), Terminal256Formatter()))
25
26 if variables:
27 variables_json = json.dumps(variables, indent=4)
28
29 print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/utils/debug.py b/strawberry/utils/debug.py
--- a/strawberry/utils/debug.py
+++ b/strawberry/utils/debug.py
@@ -1,6 +1,7 @@
import datetime
import json
-import typing
+from json import JSONEncoder
+from typing import Any, Dict
from pygments import highlight, lexers
from pygments.formatters import Terminal256Formatter
@@ -8,9 +9,14 @@
from .graphql_lexer import GraphQLLexer
+class StrawberryJSONEncoder(JSONEncoder):
+ def default(self, o: Any) -> Any:
+ return repr(o)
+
+
def pretty_print_graphql_operation(
- operation_name: str, query: str, variables: typing.Dict["str", typing.Any]
-): # pragma: no cover
+ operation_name: str, query: str, variables: Dict["str", Any]
+):
"""Pretty print a GraphQL operation using pygments.
Won't print introspection operation to prevent noise in the output."""
@@ -24,6 +30,6 @@
print(highlight(query, GraphQLLexer(), Terminal256Formatter()))
if variables:
- variables_json = json.dumps(variables, indent=4)
+ variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)
print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))
| {"golden_diff": "diff --git a/strawberry/utils/debug.py b/strawberry/utils/debug.py\n--- a/strawberry/utils/debug.py\n+++ b/strawberry/utils/debug.py\n@@ -1,6 +1,7 @@\n import datetime\n import json\n-import typing\n+from json import JSONEncoder\n+from typing import Any, Dict\n \n from pygments import highlight, lexers\n from pygments.formatters import Terminal256Formatter\n@@ -8,9 +9,14 @@\n from .graphql_lexer import GraphQLLexer\n \n \n+class StrawberryJSONEncoder(JSONEncoder):\n+ def default(self, o: Any) -> Any:\n+ return repr(o)\n+\n+\n def pretty_print_graphql_operation(\n- operation_name: str, query: str, variables: typing.Dict[\"str\", typing.Any]\n-): # pragma: no cover\n+ operation_name: str, query: str, variables: Dict[\"str\", Any]\n+):\n \"\"\"Pretty print a GraphQL operation using pygments.\n \n Won't print introspection operation to prevent noise in the output.\"\"\"\n@@ -24,6 +30,6 @@\n print(highlight(query, GraphQLLexer(), Terminal256Formatter()))\n \n if variables:\n- variables_json = json.dumps(variables, indent=4)\n+ variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)\n \n print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))\n", "issue": "UploadFile causes ASGI application's exception when it is in debug mode\nApparently when we use the ASGI application in debug mode, it cannot print the value of variables if they are not JSON serializable.\r\n\r\nIn my use case, when I tried to use the [file upload](https://strawberry.rocks/docs/features/file-upload) example in debug mode this issue ended up happening.\r\n\r\nI believe it is because of this:\r\n\r\nhttps://github.com/strawberry-graphql/strawberry/blob/de215370b247a417af8a8dd5fc382d71e305bcd7/strawberry/utils/debug.py#L26-L29\r\n\r\nPerhaps converting variables to string might help.\n", "before_files": [{"content": "import datetime\nimport json\nimport typing\n\nfrom pygments import highlight, lexers\nfrom pygments.formatters import Terminal256Formatter\n\nfrom .graphql_lexer import GraphQLLexer\n\n\ndef pretty_print_graphql_operation(\n operation_name: str, query: str, variables: typing.Dict[\"str\", typing.Any]\n): # pragma: no cover\n \"\"\"Pretty print a GraphQL operation using pygments.\n\n Won't print introspection operation to prevent noise in the output.\"\"\"\n\n if operation_name == \"IntrospectionQuery\":\n return\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n print(f\"[{now}]: {operation_name or 'No operation name'}\")\n print(highlight(query, GraphQLLexer(), Terminal256Formatter()))\n\n if variables:\n variables_json = json.dumps(variables, indent=4)\n\n print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))\n", "path": "strawberry/utils/debug.py"}], "after_files": [{"content": "import datetime\nimport json\nfrom json import JSONEncoder\nfrom typing import Any, Dict\n\nfrom pygments import highlight, lexers\nfrom pygments.formatters import Terminal256Formatter\n\nfrom .graphql_lexer import GraphQLLexer\n\n\nclass StrawberryJSONEncoder(JSONEncoder):\n def default(self, o: Any) -> Any:\n return repr(o)\n\n\ndef pretty_print_graphql_operation(\n operation_name: str, query: str, variables: Dict[\"str\", Any]\n):\n \"\"\"Pretty print a GraphQL operation using pygments.\n\n Won't print introspection operation to prevent noise in the output.\"\"\"\n\n if operation_name == \"IntrospectionQuery\":\n return\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n print(f\"[{now}]: {operation_name or 'No operation name'}\")\n print(highlight(query, GraphQLLexer(), Terminal256Formatter()))\n\n if variables:\n variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)\n\n print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))\n", "path": "strawberry/utils/debug.py"}]} | 683 | 317 |
gh_patches_debug_4565 | rasdani/github-patches | git_diff | pypa__setuptools-1591 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Impossible to install packages with setuptools v.40.6.x, if six isn't installed
Upgrade pip and setuptools to latest versions
```
oleg$ pip install --upgrade pip setuptools
Collecting pip
Using cached https://files.pythonhosted.org/packages/c2/d7/90f34cb0d83a6c5631cf71dfe64cc1054598c843a92b400e55675cc2ac37/pip-18.1-py2.py3-none-any.whl
Collecting setuptools
Using cached https://files.pythonhosted.org/packages/4b/47/1417da90ed6f4c88465d08ea2461ff41c94cc6cc223f333d130d7a99199a/setuptools-40.6.1-py2.py3-none-any.whl
Installing collected packages: pip, setuptools
Found existing installation: pip 9.0.1
Uninstalling pip-9.0.1:
Successfully uninstalled pip-9.0.1
Found existing installation: setuptools 38.2.4
Uninstalling setuptools-38.2.4:
Successfully uninstalled setuptools-38.2.4
Successfully installed pip-18.1 setuptools-40.6.1
```
Try to install any package, d2to1 for example
```
oleg$ pip install d2to1
Collecting d2to1
Downloading https://files.pythonhosted.org/packages/dc/bd/eac45e4e77d76f6c0ae539819c40f1babb891d7855129663e37957a7c2df/d2to1-0.2.12.post1.tar.gz
Complete output from command python setup.py egg_info:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/private/var/folders/ns/71p61z5s3hsd2pv327gmdh1c0000gn/T/pip-install-2J_LhF/d2to1/setup.py", line 17, in <module>
setup(**cfg_to_args())
File "d2to1/util.py", line 204, in cfg_to_args
wrap_commands(kwargs)
File "d2to1/util.py", line 439, in wrap_commands
for cmd, _ in dist.get_command_list():
File "/Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages/setuptools/dist.py", line 724, in get_command_list
cmdclass = ep.resolve()
File "/Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages/pkg_resources/__init__.py", line 2352, in resolve
module = __import__(self.module_name, fromlist=['__name__'], level=0)
File "/Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages/setuptools/command/upload_docs.py", line 23, in <module>
from .upload import upload
File "/Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages/setuptools/command/upload.py", line 15, in <module>
from six.moves.urllib.request import urlopen, Request
ImportError: No module named six.moves.urllib.request
----------------------------------------
Command "python setup.py egg_info" failed with error code 1 in /private/var/folders/ns/71p61z5s3hsd2pv327gmdh1c0000gn/T/pip-install-2J_LhF/d2to1/
````
```
oleg$ pip list
Package Version
---------- ------------
d2to1 0.2.12.post1
pip 18.1
setuptools 40.6.1
wheel 0.30.0
```
Downgrade setuptools to v.40.5.0, and try installation again
```
oleg$ pip install --upgrade setuptools==40.5.0
Collecting setuptools==40.5.0
Downloading https://files.pythonhosted.org/packages/82/a1/ba6fb41367b375f5cb653d1317d8ca263c636cff6566e2da1b0da716069d/setuptools-40.5.0-py2.py3-none-any.whl (569kB)
100% |████████████████████████████████| 573kB 754kB/s
Installing collected packages: setuptools
Found existing installation: setuptools 40.6.1
Uninstalling setuptools-40.6.1:
Successfully uninstalled setuptools-40.6.1
Successfully installed setuptools-40.5.0
oleg$ pip install d2to1
Collecting d2to1
Using cached https://files.pythonhosted.org/packages/dc/bd/eac45e4e77d76f6c0ae539819c40f1babb891d7855129663e37957a7c2df/d2to1-0.2.12.post1.tar.gz
Requirement already satisfied: setuptools in /Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages (from d2to1) (40.5.0)
Building wheels for collected packages: d2to1
Running setup.py bdist_wheel for d2to1 ... done
Stored in directory: /Users/oleg/Library/Caches/pip/wheels/e6/1a/ed/11531583d510d72448e39bfc254147d0e7b2b2ad65722b3a6f
Successfully built d2to1
Installing collected packages: d2to1
Successfully installed d2to1-0.2.12.post1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/command/upload.py`
Content:
```
1 import io
2 import os
3 import hashlib
4 import getpass
5 import platform
6
7 from base64 import standard_b64encode
8
9 from distutils import log
10 from distutils.command import upload as orig
11 from distutils.spawn import spawn
12
13 from distutils.errors import DistutilsError
14
15 from six.moves.urllib.request import urlopen, Request
16 from six.moves.urllib.error import HTTPError
17 from six.moves.urllib.parse import urlparse
18
19 class upload(orig.upload):
20 """
21 Override default upload behavior to obtain password
22 in a variety of different ways.
23 """
24 def run(self):
25 try:
26 orig.upload.run(self)
27 finally:
28 self.announce(
29 "WARNING: Uploading via this command is deprecated, use twine "
30 "to upload instead (https://pypi.org/p/twine/)",
31 log.WARN
32 )
33
34 def finalize_options(self):
35 orig.upload.finalize_options(self)
36 self.username = (
37 self.username or
38 getpass.getuser()
39 )
40 # Attempt to obtain password. Short circuit evaluation at the first
41 # sign of success.
42 self.password = (
43 self.password or
44 self._load_password_from_keyring() or
45 self._prompt_for_password()
46 )
47
48 def upload_file(self, command, pyversion, filename):
49 # Makes sure the repository URL is compliant
50 schema, netloc, url, params, query, fragments = \
51 urlparse(self.repository)
52 if params or query or fragments:
53 raise AssertionError("Incompatible url %s" % self.repository)
54
55 if schema not in ('http', 'https'):
56 raise AssertionError("unsupported schema " + schema)
57
58 # Sign if requested
59 if self.sign:
60 gpg_args = ["gpg", "--detach-sign", "-a", filename]
61 if self.identity:
62 gpg_args[2:2] = ["--local-user", self.identity]
63 spawn(gpg_args,
64 dry_run=self.dry_run)
65
66 # Fill in the data - send all the meta-data in case we need to
67 # register a new release
68 with open(filename, 'rb') as f:
69 content = f.read()
70
71 meta = self.distribution.metadata
72
73 data = {
74 # action
75 ':action': 'file_upload',
76 'protocol_version': '1',
77
78 # identify release
79 'name': meta.get_name(),
80 'version': meta.get_version(),
81
82 # file content
83 'content': (os.path.basename(filename),content),
84 'filetype': command,
85 'pyversion': pyversion,
86 'md5_digest': hashlib.md5(content).hexdigest(),
87
88 # additional meta-data
89 'metadata_version': str(meta.get_metadata_version()),
90 'summary': meta.get_description(),
91 'home_page': meta.get_url(),
92 'author': meta.get_contact(),
93 'author_email': meta.get_contact_email(),
94 'license': meta.get_licence(),
95 'description': meta.get_long_description(),
96 'keywords': meta.get_keywords(),
97 'platform': meta.get_platforms(),
98 'classifiers': meta.get_classifiers(),
99 'download_url': meta.get_download_url(),
100 # PEP 314
101 'provides': meta.get_provides(),
102 'requires': meta.get_requires(),
103 'obsoletes': meta.get_obsoletes(),
104 }
105
106 data['comment'] = ''
107
108 if self.sign:
109 data['gpg_signature'] = (os.path.basename(filename) + ".asc",
110 open(filename+".asc", "rb").read())
111
112 # set up the authentication
113 user_pass = (self.username + ":" + self.password).encode('ascii')
114 # The exact encoding of the authentication string is debated.
115 # Anyway PyPI only accepts ascii for both username or password.
116 auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
117
118 # Build up the MIME payload for the POST data
119 boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
120 sep_boundary = b'\r\n--' + boundary.encode('ascii')
121 end_boundary = sep_boundary + b'--\r\n'
122 body = io.BytesIO()
123 for key, value in data.items():
124 title = '\r\nContent-Disposition: form-data; name="%s"' % key
125 # handle multiple entries for the same name
126 if not isinstance(value, list):
127 value = [value]
128 for value in value:
129 if type(value) is tuple:
130 title += '; filename="%s"' % value[0]
131 value = value[1]
132 else:
133 value = str(value).encode('utf-8')
134 body.write(sep_boundary)
135 body.write(title.encode('utf-8'))
136 body.write(b"\r\n\r\n")
137 body.write(value)
138 body.write(end_boundary)
139 body = body.getvalue()
140
141 msg = "Submitting %s to %s" % (filename, self.repository)
142 self.announce(msg, log.INFO)
143
144 # build the Request
145 headers = {
146 'Content-type': 'multipart/form-data; boundary=%s' % boundary,
147 'Content-length': str(len(body)),
148 'Authorization': auth,
149 }
150
151 request = Request(self.repository, data=body,
152 headers=headers)
153 # send the data
154 try:
155 result = urlopen(request)
156 status = result.getcode()
157 reason = result.msg
158 except HTTPError as e:
159 status = e.code
160 reason = e.msg
161 except OSError as e:
162 self.announce(str(e), log.ERROR)
163 raise
164
165 if status == 200:
166 self.announce('Server response (%s): %s' % (status, reason),
167 log.INFO)
168 if self.show_response:
169 text = getattr(self, '_read_pypi_response',
170 lambda x: None)(result)
171 if text is not None:
172 msg = '\n'.join(('-' * 75, text, '-' * 75))
173 self.announce(msg, log.INFO)
174 else:
175 msg = 'Upload failed (%s): %s' % (status, reason)
176 self.announce(msg, log.ERROR)
177 raise DistutilsError(msg)
178
179 def _load_password_from_keyring(self):
180 """
181 Attempt to load password from keyring. Suppress Exceptions.
182 """
183 try:
184 keyring = __import__('keyring')
185 return keyring.get_password(self.repository, self.username)
186 except Exception:
187 pass
188
189 def _prompt_for_password(self):
190 """
191 Prompt for a password on the tty. Suppress Exceptions.
192 """
193 try:
194 return getpass.getpass()
195 except (Exception, KeyboardInterrupt):
196 pass
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setuptools/command/upload.py b/setuptools/command/upload.py
--- a/setuptools/command/upload.py
+++ b/setuptools/command/upload.py
@@ -12,9 +12,9 @@
from distutils.errors import DistutilsError
-from six.moves.urllib.request import urlopen, Request
-from six.moves.urllib.error import HTTPError
-from six.moves.urllib.parse import urlparse
+from setuptools.extern.six.moves.urllib.request import urlopen, Request
+from setuptools.extern.six.moves.urllib.error import HTTPError
+from setuptools.extern.six.moves.urllib.parse import urlparse
class upload(orig.upload):
"""
| {"golden_diff": "diff --git a/setuptools/command/upload.py b/setuptools/command/upload.py\n--- a/setuptools/command/upload.py\n+++ b/setuptools/command/upload.py\n@@ -12,9 +12,9 @@\n \n from distutils.errors import DistutilsError\n \n-from six.moves.urllib.request import urlopen, Request\n-from six.moves.urllib.error import HTTPError\n-from six.moves.urllib.parse import urlparse\n+from setuptools.extern.six.moves.urllib.request import urlopen, Request\n+from setuptools.extern.six.moves.urllib.error import HTTPError\n+from setuptools.extern.six.moves.urllib.parse import urlparse\n \n class upload(orig.upload):\n \"\"\"\n", "issue": "Impossible to install packages with setuptools v.40.6.x, if six isn't installed\nUpgrade pip and setuptools to latest versions\r\n\r\n```\r\noleg$ pip install --upgrade pip setuptools\r\nCollecting pip\r\n Using cached https://files.pythonhosted.org/packages/c2/d7/90f34cb0d83a6c5631cf71dfe64cc1054598c843a92b400e55675cc2ac37/pip-18.1-py2.py3-none-any.whl\r\nCollecting setuptools\r\n Using cached https://files.pythonhosted.org/packages/4b/47/1417da90ed6f4c88465d08ea2461ff41c94cc6cc223f333d130d7a99199a/setuptools-40.6.1-py2.py3-none-any.whl\r\nInstalling collected packages: pip, setuptools\r\n Found existing installation: pip 9.0.1\r\n Uninstalling pip-9.0.1:\r\n Successfully uninstalled pip-9.0.1\r\n Found existing installation: setuptools 38.2.4\r\n Uninstalling setuptools-38.2.4:\r\n Successfully uninstalled setuptools-38.2.4\r\nSuccessfully installed pip-18.1 setuptools-40.6.1\r\n```\r\n\r\nTry to install any package, d2to1 for example\r\n\r\n```\r\noleg$ pip install d2to1\r\nCollecting d2to1\r\n Downloading https://files.pythonhosted.org/packages/dc/bd/eac45e4e77d76f6c0ae539819c40f1babb891d7855129663e37957a7c2df/d2to1-0.2.12.post1.tar.gz\r\n Complete output from command python setup.py egg_info:\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/private/var/folders/ns/71p61z5s3hsd2pv327gmdh1c0000gn/T/pip-install-2J_LhF/d2to1/setup.py\", line 17, in <module>\r\n setup(**cfg_to_args())\r\n File \"d2to1/util.py\", line 204, in cfg_to_args\r\n wrap_commands(kwargs)\r\n File \"d2to1/util.py\", line 439, in wrap_commands\r\n for cmd, _ in dist.get_command_list():\r\n File \"/Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages/setuptools/dist.py\", line 724, in get_command_list\r\n cmdclass = ep.resolve()\r\n File \"/Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages/pkg_resources/__init__.py\", line 2352, in resolve\r\n module = __import__(self.module_name, fromlist=['__name__'], level=0)\r\n File \"/Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages/setuptools/command/upload_docs.py\", line 23, in <module>\r\n from .upload import upload\r\n File \"/Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages/setuptools/command/upload.py\", line 15, in <module>\r\n from six.moves.urllib.request import urlopen, Request\r\n ImportError: No module named six.moves.urllib.request\r\n \r\n ----------------------------------------\r\nCommand \"python setup.py egg_info\" failed with error code 1 in /private/var/folders/ns/71p61z5s3hsd2pv327gmdh1c0000gn/T/pip-install-2J_LhF/d2to1/\r\n````\r\n\r\n```\r\noleg$ pip list\r\nPackage Version \r\n---------- ------------\r\nd2to1 0.2.12.post1\r\npip 18.1 \r\nsetuptools 40.6.1 \r\nwheel 0.30.0\r\n```\r\n\r\nDowngrade setuptools to v.40.5.0, and try installation again\r\n\r\n```\r\noleg$ pip install --upgrade setuptools==40.5.0\r\nCollecting setuptools==40.5.0\r\n Downloading https://files.pythonhosted.org/packages/82/a1/ba6fb41367b375f5cb653d1317d8ca263c636cff6566e2da1b0da716069d/setuptools-40.5.0-py2.py3-none-any.whl (569kB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 573kB 754kB/s \r\nInstalling collected packages: setuptools\r\n Found existing installation: setuptools 40.6.1\r\n Uninstalling setuptools-40.6.1:\r\n Successfully uninstalled setuptools-40.6.1\r\nSuccessfully installed setuptools-40.5.0\r\n\r\noleg$ pip install d2to1\r\nCollecting d2to1\r\n Using cached https://files.pythonhosted.org/packages/dc/bd/eac45e4e77d76f6c0ae539819c40f1babb891d7855129663e37957a7c2df/d2to1-0.2.12.post1.tar.gz\r\nRequirement already satisfied: setuptools in /Users/oleg/.virtualenvs/yandex/lib/python2.7/site-packages (from d2to1) (40.5.0)\r\nBuilding wheels for collected packages: d2to1\r\n Running setup.py bdist_wheel for d2to1 ... done\r\n Stored in directory: /Users/oleg/Library/Caches/pip/wheels/e6/1a/ed/11531583d510d72448e39bfc254147d0e7b2b2ad65722b3a6f\r\nSuccessfully built d2to1\r\nInstalling collected packages: d2to1\r\nSuccessfully installed d2to1-0.2.12.post1\r\n```\n", "before_files": [{"content": "import io\nimport os\nimport hashlib\nimport getpass\nimport platform\n\nfrom base64 import standard_b64encode\n\nfrom distutils import log\nfrom distutils.command import upload as orig\nfrom distutils.spawn import spawn\n\nfrom distutils.errors import DistutilsError\n\nfrom six.moves.urllib.request import urlopen, Request\nfrom six.moves.urllib.error import HTTPError\nfrom six.moves.urllib.parse import urlparse\n\nclass upload(orig.upload):\n \"\"\"\n Override default upload behavior to obtain password\n in a variety of different ways.\n \"\"\"\n def run(self):\n try:\n orig.upload.run(self)\n finally:\n self.announce(\n \"WARNING: Uploading via this command is deprecated, use twine \"\n \"to upload instead (https://pypi.org/p/twine/)\",\n log.WARN\n )\n\n def finalize_options(self):\n orig.upload.finalize_options(self)\n self.username = (\n self.username or\n getpass.getuser()\n )\n # Attempt to obtain password. Short circuit evaluation at the first\n # sign of success.\n self.password = (\n self.password or\n self._load_password_from_keyring() or\n self._prompt_for_password()\n )\n\n def upload_file(self, command, pyversion, filename):\n # Makes sure the repository URL is compliant\n schema, netloc, url, params, query, fragments = \\\n urlparse(self.repository)\n if params or query or fragments:\n raise AssertionError(\"Incompatible url %s\" % self.repository)\n\n if schema not in ('http', 'https'):\n raise AssertionError(\"unsupported schema \" + schema)\n\n # Sign if requested\n if self.sign:\n gpg_args = [\"gpg\", \"--detach-sign\", \"-a\", filename]\n if self.identity:\n gpg_args[2:2] = [\"--local-user\", self.identity]\n spawn(gpg_args,\n dry_run=self.dry_run)\n\n # Fill in the data - send all the meta-data in case we need to\n # register a new release\n with open(filename, 'rb') as f:\n content = f.read()\n\n meta = self.distribution.metadata\n\n data = {\n # action\n ':action': 'file_upload',\n 'protocol_version': '1',\n\n # identify release\n 'name': meta.get_name(),\n 'version': meta.get_version(),\n\n # file content\n 'content': (os.path.basename(filename),content),\n 'filetype': command,\n 'pyversion': pyversion,\n 'md5_digest': hashlib.md5(content).hexdigest(),\n\n # additional meta-data\n 'metadata_version': str(meta.get_metadata_version()),\n 'summary': meta.get_description(),\n 'home_page': meta.get_url(),\n 'author': meta.get_contact(),\n 'author_email': meta.get_contact_email(),\n 'license': meta.get_licence(),\n 'description': meta.get_long_description(),\n 'keywords': meta.get_keywords(),\n 'platform': meta.get_platforms(),\n 'classifiers': meta.get_classifiers(),\n 'download_url': meta.get_download_url(),\n # PEP 314\n 'provides': meta.get_provides(),\n 'requires': meta.get_requires(),\n 'obsoletes': meta.get_obsoletes(),\n }\n\n data['comment'] = ''\n\n if self.sign:\n data['gpg_signature'] = (os.path.basename(filename) + \".asc\",\n open(filename+\".asc\", \"rb\").read())\n\n # set up the authentication\n user_pass = (self.username + \":\" + self.password).encode('ascii')\n # The exact encoding of the authentication string is debated.\n # Anyway PyPI only accepts ascii for both username or password.\n auth = \"Basic \" + standard_b64encode(user_pass).decode('ascii')\n\n # Build up the MIME payload for the POST data\n boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'\n sep_boundary = b'\\r\\n--' + boundary.encode('ascii')\n end_boundary = sep_boundary + b'--\\r\\n'\n body = io.BytesIO()\n for key, value in data.items():\n title = '\\r\\nContent-Disposition: form-data; name=\"%s\"' % key\n # handle multiple entries for the same name\n if not isinstance(value, list):\n value = [value]\n for value in value:\n if type(value) is tuple:\n title += '; filename=\"%s\"' % value[0]\n value = value[1]\n else:\n value = str(value).encode('utf-8')\n body.write(sep_boundary)\n body.write(title.encode('utf-8'))\n body.write(b\"\\r\\n\\r\\n\")\n body.write(value)\n body.write(end_boundary)\n body = body.getvalue()\n\n msg = \"Submitting %s to %s\" % (filename, self.repository)\n self.announce(msg, log.INFO)\n\n # build the Request\n headers = {\n 'Content-type': 'multipart/form-data; boundary=%s' % boundary,\n 'Content-length': str(len(body)),\n 'Authorization': auth,\n }\n\n request = Request(self.repository, data=body,\n headers=headers)\n # send the data\n try:\n result = urlopen(request)\n status = result.getcode()\n reason = result.msg\n except HTTPError as e:\n status = e.code\n reason = e.msg\n except OSError as e:\n self.announce(str(e), log.ERROR)\n raise\n\n if status == 200:\n self.announce('Server response (%s): %s' % (status, reason),\n log.INFO)\n if self.show_response:\n text = getattr(self, '_read_pypi_response',\n lambda x: None)(result)\n if text is not None:\n msg = '\\n'.join(('-' * 75, text, '-' * 75))\n self.announce(msg, log.INFO)\n else:\n msg = 'Upload failed (%s): %s' % (status, reason)\n self.announce(msg, log.ERROR)\n raise DistutilsError(msg)\n\n def _load_password_from_keyring(self):\n \"\"\"\n Attempt to load password from keyring. Suppress Exceptions.\n \"\"\"\n try:\n keyring = __import__('keyring')\n return keyring.get_password(self.repository, self.username)\n except Exception:\n pass\n\n def _prompt_for_password(self):\n \"\"\"\n Prompt for a password on the tty. Suppress Exceptions.\n \"\"\"\n try:\n return getpass.getpass()\n except (Exception, KeyboardInterrupt):\n pass\n", "path": "setuptools/command/upload.py"}], "after_files": [{"content": "import io\nimport os\nimport hashlib\nimport getpass\nimport platform\n\nfrom base64 import standard_b64encode\n\nfrom distutils import log\nfrom distutils.command import upload as orig\nfrom distutils.spawn import spawn\n\nfrom distutils.errors import DistutilsError\n\nfrom setuptools.extern.six.moves.urllib.request import urlopen, Request\nfrom setuptools.extern.six.moves.urllib.error import HTTPError\nfrom setuptools.extern.six.moves.urllib.parse import urlparse\n\nclass upload(orig.upload):\n \"\"\"\n Override default upload behavior to obtain password\n in a variety of different ways.\n \"\"\"\n def run(self):\n try:\n orig.upload.run(self)\n finally:\n self.announce(\n \"WARNING: Uploading via this command is deprecated, use twine \"\n \"to upload instead (https://pypi.org/p/twine/)\",\n log.WARN\n )\n\n def finalize_options(self):\n orig.upload.finalize_options(self)\n self.username = (\n self.username or\n getpass.getuser()\n )\n # Attempt to obtain password. Short circuit evaluation at the first\n # sign of success.\n self.password = (\n self.password or\n self._load_password_from_keyring() or\n self._prompt_for_password()\n )\n\n def upload_file(self, command, pyversion, filename):\n # Makes sure the repository URL is compliant\n schema, netloc, url, params, query, fragments = \\\n urlparse(self.repository)\n if params or query or fragments:\n raise AssertionError(\"Incompatible url %s\" % self.repository)\n\n if schema not in ('http', 'https'):\n raise AssertionError(\"unsupported schema \" + schema)\n\n # Sign if requested\n if self.sign:\n gpg_args = [\"gpg\", \"--detach-sign\", \"-a\", filename]\n if self.identity:\n gpg_args[2:2] = [\"--local-user\", self.identity]\n spawn(gpg_args,\n dry_run=self.dry_run)\n\n # Fill in the data - send all the meta-data in case we need to\n # register a new release\n with open(filename, 'rb') as f:\n content = f.read()\n\n meta = self.distribution.metadata\n\n data = {\n # action\n ':action': 'file_upload',\n 'protocol_version': '1',\n\n # identify release\n 'name': meta.get_name(),\n 'version': meta.get_version(),\n\n # file content\n 'content': (os.path.basename(filename),content),\n 'filetype': command,\n 'pyversion': pyversion,\n 'md5_digest': hashlib.md5(content).hexdigest(),\n\n # additional meta-data\n 'metadata_version': str(meta.get_metadata_version()),\n 'summary': meta.get_description(),\n 'home_page': meta.get_url(),\n 'author': meta.get_contact(),\n 'author_email': meta.get_contact_email(),\n 'license': meta.get_licence(),\n 'description': meta.get_long_description(),\n 'keywords': meta.get_keywords(),\n 'platform': meta.get_platforms(),\n 'classifiers': meta.get_classifiers(),\n 'download_url': meta.get_download_url(),\n # PEP 314\n 'provides': meta.get_provides(),\n 'requires': meta.get_requires(),\n 'obsoletes': meta.get_obsoletes(),\n }\n\n data['comment'] = ''\n\n if self.sign:\n data['gpg_signature'] = (os.path.basename(filename) + \".asc\",\n open(filename+\".asc\", \"rb\").read())\n\n # set up the authentication\n user_pass = (self.username + \":\" + self.password).encode('ascii')\n # The exact encoding of the authentication string is debated.\n # Anyway PyPI only accepts ascii for both username or password.\n auth = \"Basic \" + standard_b64encode(user_pass).decode('ascii')\n\n # Build up the MIME payload for the POST data\n boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'\n sep_boundary = b'\\r\\n--' + boundary.encode('ascii')\n end_boundary = sep_boundary + b'--\\r\\n'\n body = io.BytesIO()\n for key, value in data.items():\n title = '\\r\\nContent-Disposition: form-data; name=\"%s\"' % key\n # handle multiple entries for the same name\n if not isinstance(value, list):\n value = [value]\n for value in value:\n if type(value) is tuple:\n title += '; filename=\"%s\"' % value[0]\n value = value[1]\n else:\n value = str(value).encode('utf-8')\n body.write(sep_boundary)\n body.write(title.encode('utf-8'))\n body.write(b\"\\r\\n\\r\\n\")\n body.write(value)\n body.write(end_boundary)\n body = body.getvalue()\n\n msg = \"Submitting %s to %s\" % (filename, self.repository)\n self.announce(msg, log.INFO)\n\n # build the Request\n headers = {\n 'Content-type': 'multipart/form-data; boundary=%s' % boundary,\n 'Content-length': str(len(body)),\n 'Authorization': auth,\n }\n\n request = Request(self.repository, data=body,\n headers=headers)\n # send the data\n try:\n result = urlopen(request)\n status = result.getcode()\n reason = result.msg\n except HTTPError as e:\n status = e.code\n reason = e.msg\n except OSError as e:\n self.announce(str(e), log.ERROR)\n raise\n\n if status == 200:\n self.announce('Server response (%s): %s' % (status, reason),\n log.INFO)\n if self.show_response:\n text = getattr(self, '_read_pypi_response',\n lambda x: None)(result)\n if text is not None:\n msg = '\\n'.join(('-' * 75, text, '-' * 75))\n self.announce(msg, log.INFO)\n else:\n msg = 'Upload failed (%s): %s' % (status, reason)\n self.announce(msg, log.ERROR)\n raise DistutilsError(msg)\n\n def _load_password_from_keyring(self):\n \"\"\"\n Attempt to load password from keyring. Suppress Exceptions.\n \"\"\"\n try:\n keyring = __import__('keyring')\n return keyring.get_password(self.repository, self.username)\n except Exception:\n pass\n\n def _prompt_for_password(self):\n \"\"\"\n Prompt for a password on the tty. Suppress Exceptions.\n \"\"\"\n try:\n return getpass.getpass()\n except (Exception, KeyboardInterrupt):\n pass\n", "path": "setuptools/command/upload.py"}]} | 3,633 | 135 |
gh_patches_debug_33721 | rasdani/github-patches | git_diff | docker__docker-py-1178 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support create network EnableIPv6 and Labels options
Check the remote API:
https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-network
There are two missing JSON parameters:
```
EnableIPv6 - Enable IPv6 on the network
Labels - Labels to set on the network, specified as a map: {"key":"value" [,"key2":"value2"]}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/api/network.py`
Content:
```
1 import json
2
3 from ..errors import InvalidVersion
4 from ..utils import check_resource, minimum_version
5 from ..utils import version_lt
6
7
8 class NetworkApiMixin(object):
9 @minimum_version('1.21')
10 def networks(self, names=None, ids=None):
11 filters = {}
12 if names:
13 filters['name'] = names
14 if ids:
15 filters['id'] = ids
16
17 params = {'filters': json.dumps(filters)}
18
19 url = self._url("/networks")
20 res = self._get(url, params=params)
21 return self._result(res, json=True)
22
23 @minimum_version('1.21')
24 def create_network(self, name, driver=None, options=None, ipam=None,
25 check_duplicate=None, internal=False):
26 if options is not None and not isinstance(options, dict):
27 raise TypeError('options must be a dictionary')
28
29 data = {
30 'Name': name,
31 'Driver': driver,
32 'Options': options,
33 'IPAM': ipam,
34 'CheckDuplicate': check_duplicate
35 }
36
37 if internal:
38 if version_lt(self._version, '1.22'):
39 raise InvalidVersion('Internal networks are not '
40 'supported in API version < 1.22')
41 data['Internal'] = True
42
43 url = self._url("/networks/create")
44 res = self._post_json(url, data=data)
45 return self._result(res, json=True)
46
47 @minimum_version('1.21')
48 def remove_network(self, net_id):
49 url = self._url("/networks/{0}", net_id)
50 res = self._delete(url)
51 self._raise_for_status(res)
52
53 @minimum_version('1.21')
54 def inspect_network(self, net_id):
55 url = self._url("/networks/{0}", net_id)
56 res = self._get(url)
57 return self._result(res, json=True)
58
59 @check_resource
60 @minimum_version('1.21')
61 def connect_container_to_network(self, container, net_id,
62 ipv4_address=None, ipv6_address=None,
63 aliases=None, links=None,
64 link_local_ips=None):
65 data = {
66 "Container": container,
67 "EndpointConfig": self.create_endpoint_config(
68 aliases=aliases, links=links, ipv4_address=ipv4_address,
69 ipv6_address=ipv6_address, link_local_ips=link_local_ips
70 ),
71 }
72
73 url = self._url("/networks/{0}/connect", net_id)
74 res = self._post_json(url, data=data)
75 self._raise_for_status(res)
76
77 @check_resource
78 @minimum_version('1.21')
79 def disconnect_container_from_network(self, container, net_id):
80 data = {"container": container}
81 url = self._url("/networks/{0}/disconnect", net_id)
82 res = self._post_json(url, data=data)
83 self._raise_for_status(res)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/api/network.py b/docker/api/network.py
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -22,7 +22,8 @@
@minimum_version('1.21')
def create_network(self, name, driver=None, options=None, ipam=None,
- check_duplicate=None, internal=False):
+ check_duplicate=None, internal=False, labels=None,
+ enable_ipv6=False):
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
@@ -34,6 +35,22 @@
'CheckDuplicate': check_duplicate
}
+ if labels is not None:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'network labels were introduced in API 1.23'
+ )
+ if not isinstance(labels, dict):
+ raise TypeError('labels must be a dictionary')
+ data["Labels"] = labels
+
+ if enable_ipv6:
+ if version_lt(self._version, '1.23'):
+ raise InvalidVersion(
+ 'enable_ipv6 was introduced in API 1.23'
+ )
+ data['EnableIPv6'] = True
+
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
@@ -76,8 +93,15 @@
@check_resource
@minimum_version('1.21')
- def disconnect_container_from_network(self, container, net_id):
- data = {"container": container}
+ def disconnect_container_from_network(self, container, net_id,
+ force=False):
+ data = {"Container": container}
+ if force:
+ if version_lt(self._version, '1.22'):
+ raise InvalidVersion(
+ 'Forced disconnect was introduced in API 1.22'
+ )
+ data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
| {"golden_diff": "diff --git a/docker/api/network.py b/docker/api/network.py\n--- a/docker/api/network.py\n+++ b/docker/api/network.py\n@@ -22,7 +22,8 @@\n \n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None, ipam=None,\n- check_duplicate=None, internal=False):\n+ check_duplicate=None, internal=False, labels=None,\n+ enable_ipv6=False):\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n \n@@ -34,6 +35,22 @@\n 'CheckDuplicate': check_duplicate\n }\n \n+ if labels is not None:\n+ if version_lt(self._version, '1.23'):\n+ raise InvalidVersion(\n+ 'network labels were introduced in API 1.23'\n+ )\n+ if not isinstance(labels, dict):\n+ raise TypeError('labels must be a dictionary')\n+ data[\"Labels\"] = labels\n+\n+ if enable_ipv6:\n+ if version_lt(self._version, '1.23'):\n+ raise InvalidVersion(\n+ 'enable_ipv6 was introduced in API 1.23'\n+ )\n+ data['EnableIPv6'] = True\n+\n if internal:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion('Internal networks are not '\n@@ -76,8 +93,15 @@\n \n @check_resource\n @minimum_version('1.21')\n- def disconnect_container_from_network(self, container, net_id):\n- data = {\"container\": container}\n+ def disconnect_container_from_network(self, container, net_id,\n+ force=False):\n+ data = {\"Container\": container}\n+ if force:\n+ if version_lt(self._version, '1.22'):\n+ raise InvalidVersion(\n+ 'Forced disconnect was introduced in API 1.22'\n+ )\n+ data['Force'] = force\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n", "issue": "Support create network EnableIPv6 and Labels options \nCheck the remote API:\nhttps://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-network\n\nThere are two missing JSON parameters:\n\n```\nEnableIPv6 - Enable IPv6 on the network\nLabels - Labels to set on the network, specified as a map: {\"key\":\"value\" [,\"key2\":\"value2\"]}\n```\n\n", "before_files": [{"content": "import json\n\nfrom ..errors import InvalidVersion\nfrom ..utils import check_resource, minimum_version\nfrom ..utils import version_lt\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None):\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n\n params = {'filters': json.dumps(filters)}\n\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None, ipam=None,\n check_duplicate=None, internal=False):\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'Options': options,\n 'IPAM': ipam,\n 'CheckDuplicate': check_duplicate\n }\n\n if internal:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion('Internal networks are not '\n 'supported in API version < 1.22')\n data['Internal'] = True\n\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def remove_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n def inspect_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url)\n return self._result(res, json=True)\n\n @check_resource\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id,\n ipv4_address=None, ipv6_address=None,\n aliases=None, links=None,\n link_local_ips=None):\n data = {\n \"Container\": container,\n \"EndpointConfig\": self.create_endpoint_config(\n aliases=aliases, links=links, ipv4_address=ipv4_address,\n ipv6_address=ipv6_address, link_local_ips=link_local_ips\n ),\n }\n\n url = self._url(\"/networks/{0}/connect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n\n @check_resource\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n", "path": "docker/api/network.py"}], "after_files": [{"content": "import json\n\nfrom ..errors import InvalidVersion\nfrom ..utils import check_resource, minimum_version\nfrom ..utils import version_lt\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None):\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n\n params = {'filters': json.dumps(filters)}\n\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None, ipam=None,\n check_duplicate=None, internal=False, labels=None,\n enable_ipv6=False):\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'Options': options,\n 'IPAM': ipam,\n 'CheckDuplicate': check_duplicate\n }\n\n if labels is not None:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'network labels were introduced in API 1.23'\n )\n if not isinstance(labels, dict):\n raise TypeError('labels must be a dictionary')\n data[\"Labels\"] = labels\n\n if enable_ipv6:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'enable_ipv6 was introduced in API 1.23'\n )\n data['EnableIPv6'] = True\n\n if internal:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion('Internal networks are not '\n 'supported in API version < 1.22')\n data['Internal'] = True\n\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def remove_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n def inspect_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url)\n return self._result(res, json=True)\n\n @check_resource\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id,\n ipv4_address=None, ipv6_address=None,\n aliases=None, links=None,\n link_local_ips=None):\n data = {\n \"Container\": container,\n \"EndpointConfig\": self.create_endpoint_config(\n aliases=aliases, links=links, ipv4_address=ipv4_address,\n ipv6_address=ipv6_address, link_local_ips=link_local_ips\n ),\n }\n\n url = self._url(\"/networks/{0}/connect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n\n @check_resource\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id,\n force=False):\n data = {\"Container\": container}\n if force:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion(\n 'Forced disconnect was introduced in API 1.22'\n )\n data['Force'] = force\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n", "path": "docker/api/network.py"}]} | 1,142 | 480 |
gh_patches_debug_10767 | rasdani/github-patches | git_diff | horovod__horovod-853 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'float' object has no attribute 'detach' in pytorch imagenet example.
I ran the [pytorch imagenet example](https://github.com/horovod/horovod/blob/master/examples/pytorch_imagenet_resnet50.py) but got an error that `float` number don't have `detach()` method. It seems that `loss.item()` lead to the `float` number, but I don't know how to fix that in `horovod` framework.
Can anyone help me? Thanks a lot!
```
mpirun -np 4 \
-H localhost:4 \
-bind-to none -map-by slot \
-x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \
-mca pml ob1 -mca btl ^openib \
python main_hvd.py --train-dir /datasets/ILSVRC2012/images/train --val-dir /datasets/ILSVRC2012/images/val
```
```
Train Epoch #1: 0%| | 0/10010 [00:00<?, ?it/s]Traceback (most recent call last):
File "main_hvd.py", line 272, in <module>
train(epoch)
File "main_hvd.py", line 179, in train
train_loss.update(loss.item())
File "main_hvd.py", line 263, in update
self.sum += hvd.allreduce(val.detach().cpu(), name=self.name)
AttributeError: 'float' object has no attribute 'detach'
```
My environment is:
* pytorch==0.4.1
* horovod==0.16.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/pytorch_imagenet_resnet50.py`
Content:
```
1 from __future__ import print_function
2
3 import torch
4 import argparse
5 import torch.backends.cudnn as cudnn
6 import torch.nn.functional as F
7 import torch.optim as optim
8 import torch.utils.data.distributed
9 from torchvision import datasets, transforms, models
10 import horovod.torch as hvd
11 import tensorboardX
12 import os
13 import math
14 from tqdm import tqdm
15
16 # Training settings
17 parser = argparse.ArgumentParser(description='PyTorch ImageNet Example',
18 formatter_class=argparse.ArgumentDefaultsHelpFormatter)
19 parser.add_argument('--train-dir', default=os.path.expanduser('~/imagenet/train'),
20 help='path to training data')
21 parser.add_argument('--val-dir', default=os.path.expanduser('~/imagenet/validation'),
22 help='path to validation data')
23 parser.add_argument('--log-dir', default='./logs',
24 help='tensorboard log directory')
25 parser.add_argument('--checkpoint-format', default='./checkpoint-{epoch}.pth.tar',
26 help='checkpoint file format')
27 parser.add_argument('--fp16-allreduce', action='store_true', default=False,
28 help='use fp16 compression during allreduce')
29 parser.add_argument('--batches-per-allreduce', type=int, default=1,
30 help='number of batches processed locally before '
31 'executing allreduce across workers; it multiplies '
32 'total batch size.')
33
34 # Default settings from https://arxiv.org/abs/1706.02677.
35 parser.add_argument('--batch-size', type=int, default=32,
36 help='input batch size for training')
37 parser.add_argument('--val-batch-size', type=int, default=32,
38 help='input batch size for validation')
39 parser.add_argument('--epochs', type=int, default=90,
40 help='number of epochs to train')
41 parser.add_argument('--base-lr', type=float, default=0.0125,
42 help='learning rate for a single GPU')
43 parser.add_argument('--warmup-epochs', type=float, default=5,
44 help='number of warmup epochs')
45 parser.add_argument('--momentum', type=float, default=0.9,
46 help='SGD momentum')
47 parser.add_argument('--wd', type=float, default=0.00005,
48 help='weight decay')
49
50 parser.add_argument('--no-cuda', action='store_true', default=False,
51 help='disables CUDA training')
52 parser.add_argument('--seed', type=int, default=42,
53 help='random seed')
54
55 args = parser.parse_args()
56 args.cuda = not args.no_cuda and torch.cuda.is_available()
57
58 allreduce_batch_size = args.batch_size * args.batches_per_allreduce
59
60 hvd.init()
61 torch.manual_seed(args.seed)
62
63 if args.cuda:
64 # Horovod: pin GPU to local rank.
65 torch.cuda.set_device(hvd.local_rank())
66 torch.cuda.manual_seed(args.seed)
67
68 cudnn.benchmark = True
69
70 # If set > 0, will resume training from a given checkpoint.
71 resume_from_epoch = 0
72 for try_epoch in range(args.epochs, 0, -1):
73 if os.path.exists(args.checkpoint_format.format(epoch=try_epoch)):
74 resume_from_epoch = try_epoch
75 break
76
77 # Horovod: broadcast resume_from_epoch from rank 0 (which will have
78 # checkpoints) to other ranks.
79 resume_from_epoch = hvd.broadcast(torch.tensor(resume_from_epoch), root_rank=0,
80 name='resume_from_epoch').item()
81
82 # Horovod: print logs on the first worker.
83 verbose = 1 if hvd.rank() == 0 else 0
84
85 # Horovod: write TensorBoard logs on first worker.
86 log_writer = tensorboardX.SummaryWriter(args.log_dir) if hvd.rank() == 0 else None
87
88
89 kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}
90 train_dataset = \
91 datasets.ImageFolder(args.train_dir,
92 transform=transforms.Compose([
93 transforms.RandomResizedCrop(224),
94 transforms.RandomHorizontalFlip(),
95 transforms.ToTensor(),
96 transforms.Normalize(mean=[0.485, 0.456, 0.406],
97 std=[0.229, 0.224, 0.225])
98 ]))
99 # Horovod: use DistributedSampler to partition data among workers. Manually specify
100 # `num_replicas=hvd.size()` and `rank=hvd.rank()`.
101 train_sampler = torch.utils.data.distributed.DistributedSampler(
102 train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
103 train_loader = torch.utils.data.DataLoader(
104 train_dataset, batch_size=allreduce_batch_size,
105 sampler=train_sampler, **kwargs)
106
107 val_dataset = \
108 datasets.ImageFolder(args.val_dir,
109 transform=transforms.Compose([
110 transforms.Resize(256),
111 transforms.CenterCrop(224),
112 transforms.ToTensor(),
113 transforms.Normalize(mean=[0.485, 0.456, 0.406],
114 std=[0.229, 0.224, 0.225])
115 ]))
116 val_sampler = torch.utils.data.distributed.DistributedSampler(
117 val_dataset, num_replicas=hvd.size(), rank=hvd.rank())
118 val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.val_batch_size,
119 sampler=val_sampler, **kwargs)
120
121
122 # Set up standard ResNet-50 model.
123 model = models.resnet50()
124
125 if args.cuda:
126 # Move model to GPU.
127 model.cuda()
128
129 # Horovod: scale learning rate by the number of GPUs.
130 # Gradient Accumulation: scale learning rate by batches_per_allreduce
131 optimizer = optim.SGD(model.parameters(),
132 lr=(args.base_lr *
133 args.batches_per_allreduce * hvd.size()),
134 momentum=args.momentum, weight_decay=args.wd)
135
136 # Horovod: (optional) compression algorithm.
137 compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none
138
139 # Horovod: wrap optimizer with DistributedOptimizer.
140 optimizer = hvd.DistributedOptimizer(
141 optimizer, named_parameters=model.named_parameters(),
142 compression=compression,
143 backward_passes_per_step=args.batches_per_allreduce)
144
145 # Restore from a previous checkpoint, if initial_epoch is specified.
146 # Horovod: restore on the first worker which will broadcast weights to other workers.
147 if resume_from_epoch > 0 and hvd.rank() == 0:
148 filepath = args.checkpoint_format.format(epoch=resume_from_epoch)
149 checkpoint = torch.load(filepath)
150 model.load_state_dict(checkpoint['model'])
151 optimizer.load_state_dict(checkpoint['optimizer'])
152
153 # Horovod: broadcast parameters & optimizer state.
154 hvd.broadcast_parameters(model.state_dict(), root_rank=0)
155 hvd.broadcast_optimizer_state(optimizer, root_rank=0)
156
157 def train(epoch):
158 model.train()
159 train_sampler.set_epoch(epoch)
160 train_loss = Metric('train_loss')
161 train_accuracy = Metric('train_accuracy')
162
163 with tqdm(total=len(train_loader),
164 desc='Train Epoch #{}'.format(epoch + 1),
165 disable=not verbose) as t:
166 for batch_idx, (data, target) in enumerate(train_loader):
167 adjust_learning_rate(epoch, batch_idx)
168
169 if args.cuda:
170 data, target = data.cuda(), target.cuda()
171 optimizer.zero_grad()
172 # Split data into sub-batches of size batch_size
173 for i in range(0, len(data), args.batch_size):
174 data_batch = data[i:i + args.batch_size]
175 target_batch = target[i:i + args.batch_size]
176 output = model(data_batch)
177 train_accuracy.update(accuracy(output, target_batch))
178 loss = F.cross_entropy(output, target_batch)
179 train_loss.update(loss.item())
180 # Average gradients among sub-batches
181 loss.div_(math.ceil(float(len(data)) / args.batch_size))
182 loss.backward()
183 # Gradient is applied across all ranks
184 optimizer.step()
185 t.set_postfix({'loss': train_loss.avg.item(),
186 'accuracy': 100. * train_accuracy.avg.item()})
187 t.update(1)
188
189 if log_writer:
190 log_writer.add_scalar('train/loss', train_loss.avg, epoch)
191 log_writer.add_scalar('train/accuracy', train_accuracy.avg, epoch)
192
193
194 def validate(epoch):
195 model.eval()
196 val_loss = Metric('val_loss')
197 val_accuracy = Metric('val_accuracy')
198
199 with tqdm(total=len(val_loader),
200 desc='Validate Epoch #{}'.format(epoch + 1),
201 disable=not verbose) as t:
202 with torch.no_grad():
203 for data, target in val_loader:
204 if args.cuda:
205 data, target = data.cuda(), target.cuda()
206 output = model(data)
207
208 val_loss.update(F.cross_entropy(output, target))
209 val_accuracy.update(accuracy(output, target))
210 t.set_postfix({'loss': val_loss.avg.item(),
211 'accuracy': 100. * val_accuracy.avg.item()})
212 t.update(1)
213
214 if log_writer:
215 log_writer.add_scalar('val/loss', val_loss.avg, epoch)
216 log_writer.add_scalar('val/accuracy', val_accuracy.avg, epoch)
217
218
219 # Horovod: using `lr = base_lr * hvd.size()` from the very beginning leads to worse final
220 # accuracy. Scale the learning rate `lr = base_lr` ---> `lr = base_lr * hvd.size()` during
221 # the first five epochs. See https://arxiv.org/abs/1706.02677 for details.
222 # After the warmup reduce learning rate by 10 on the 30th, 60th and 80th epochs.
223 def adjust_learning_rate(epoch, batch_idx):
224 if epoch < args.warmup_epochs:
225 epoch += float(batch_idx + 1) / len(train_loader)
226 lr_adj = 1. / hvd.size() * (epoch * (hvd.size() - 1) / args.warmup_epochs + 1)
227 elif epoch < 30:
228 lr_adj = 1.
229 elif epoch < 60:
230 lr_adj = 1e-1
231 elif epoch < 80:
232 lr_adj = 1e-2
233 else:
234 lr_adj = 1e-3
235 for param_group in optimizer.param_groups:
236 param_group['lr'] = args.base_lr * hvd.size() * args.batches_per_allreduce * lr_adj
237
238
239 def accuracy(output, target):
240 # get the index of the max log-probability
241 pred = output.max(1, keepdim=True)[1]
242 return pred.eq(target.view_as(pred)).cpu().float().mean()
243
244
245 def save_checkpoint(epoch):
246 if hvd.rank() == 0:
247 filepath = args.checkpoint_format.format(epoch=epoch + 1)
248 state = {
249 'model': model.state_dict(),
250 'optimizer': optimizer.state_dict(),
251 }
252 torch.save(state, filepath)
253
254
255 # Horovod: average metrics from distributed training.
256 class Metric(object):
257 def __init__(self, name):
258 self.name = name
259 self.sum = torch.tensor(0.)
260 self.n = torch.tensor(0.)
261
262 def update(self, val):
263 self.sum += hvd.allreduce(val.detach().cpu(), name=self.name)
264 self.n += 1
265
266 @property
267 def avg(self):
268 return self.sum / self.n
269
270
271 for epoch in range(resume_from_epoch, args.epochs):
272 train(epoch)
273 validate(epoch)
274 save_checkpoint(epoch)
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/pytorch_imagenet_resnet50.py b/examples/pytorch_imagenet_resnet50.py
--- a/examples/pytorch_imagenet_resnet50.py
+++ b/examples/pytorch_imagenet_resnet50.py
@@ -176,7 +176,7 @@
output = model(data_batch)
train_accuracy.update(accuracy(output, target_batch))
loss = F.cross_entropy(output, target_batch)
- train_loss.update(loss.item())
+ train_loss.update(loss)
# Average gradients among sub-batches
loss.div_(math.ceil(float(len(data)) / args.batch_size))
loss.backward()
| {"golden_diff": "diff --git a/examples/pytorch_imagenet_resnet50.py b/examples/pytorch_imagenet_resnet50.py\n--- a/examples/pytorch_imagenet_resnet50.py\n+++ b/examples/pytorch_imagenet_resnet50.py\n@@ -176,7 +176,7 @@\n output = model(data_batch)\n train_accuracy.update(accuracy(output, target_batch))\n loss = F.cross_entropy(output, target_batch)\n- train_loss.update(loss.item())\n+ train_loss.update(loss)\n # Average gradients among sub-batches\n loss.div_(math.ceil(float(len(data)) / args.batch_size))\n loss.backward()\n", "issue": "'float' object has no attribute 'detach' in pytorch imagenet example.\nI ran the [pytorch imagenet example](https://github.com/horovod/horovod/blob/master/examples/pytorch_imagenet_resnet50.py) but got an error that `float` number don't have `detach()` method. It seems that `loss.item()` lead to the `float` number, but I don't know how to fix that in `horovod` framework. \r\n\r\nCan anyone help me? Thanks a lot!\r\n\r\n```\r\nmpirun -np 4 \\\r\n -H localhost:4 \\\r\n -bind-to none -map-by slot \\\r\n -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \\\r\n -mca pml ob1 -mca btl ^openib \\\r\n python main_hvd.py --train-dir /datasets/ILSVRC2012/images/train --val-dir /datasets/ILSVRC2012/images/val\r\n```\r\n\r\n```\r\nTrain Epoch #1: 0%| | 0/10010 [00:00<?, ?it/s]Traceback (most recent call last):\r\n File \"main_hvd.py\", line 272, in <module>\r\n train(epoch)\r\n File \"main_hvd.py\", line 179, in train\r\n train_loss.update(loss.item())\r\n File \"main_hvd.py\", line 263, in update\r\n self.sum += hvd.allreduce(val.detach().cpu(), name=self.name)\r\nAttributeError: 'float' object has no attribute 'detach'\r\n```\r\n\r\nMy environment is:\r\n* pytorch==0.4.1\r\n* horovod==0.16.0\n", "before_files": [{"content": "from __future__ import print_function\n\nimport torch\nimport argparse\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data.distributed\nfrom torchvision import datasets, transforms, models\nimport horovod.torch as hvd\nimport tensorboardX\nimport os\nimport math\nfrom tqdm import tqdm\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Example',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--train-dir', default=os.path.expanduser('~/imagenet/train'),\n help='path to training data')\nparser.add_argument('--val-dir', default=os.path.expanduser('~/imagenet/validation'),\n help='path to validation data')\nparser.add_argument('--log-dir', default='./logs',\n help='tensorboard log directory')\nparser.add_argument('--checkpoint-format', default='./checkpoint-{epoch}.pth.tar',\n help='checkpoint file format')\nparser.add_argument('--fp16-allreduce', action='store_true', default=False,\n help='use fp16 compression during allreduce')\nparser.add_argument('--batches-per-allreduce', type=int, default=1,\n help='number of batches processed locally before '\n 'executing allreduce across workers; it multiplies '\n 'total batch size.')\n\n# Default settings from https://arxiv.org/abs/1706.02677.\nparser.add_argument('--batch-size', type=int, default=32,\n help='input batch size for training')\nparser.add_argument('--val-batch-size', type=int, default=32,\n help='input batch size for validation')\nparser.add_argument('--epochs', type=int, default=90,\n help='number of epochs to train')\nparser.add_argument('--base-lr', type=float, default=0.0125,\n help='learning rate for a single GPU')\nparser.add_argument('--warmup-epochs', type=float, default=5,\n help='number of warmup epochs')\nparser.add_argument('--momentum', type=float, default=0.9,\n help='SGD momentum')\nparser.add_argument('--wd', type=float, default=0.00005,\n help='weight decay')\n\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=42,\n help='random seed')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nallreduce_batch_size = args.batch_size * args.batches_per_allreduce\n\nhvd.init()\ntorch.manual_seed(args.seed)\n\nif args.cuda:\n # Horovod: pin GPU to local rank.\n torch.cuda.set_device(hvd.local_rank())\n torch.cuda.manual_seed(args.seed)\n\ncudnn.benchmark = True\n\n# If set > 0, will resume training from a given checkpoint.\nresume_from_epoch = 0\nfor try_epoch in range(args.epochs, 0, -1):\n if os.path.exists(args.checkpoint_format.format(epoch=try_epoch)):\n resume_from_epoch = try_epoch\n break\n\n# Horovod: broadcast resume_from_epoch from rank 0 (which will have\n# checkpoints) to other ranks.\nresume_from_epoch = hvd.broadcast(torch.tensor(resume_from_epoch), root_rank=0,\n name='resume_from_epoch').item()\n\n# Horovod: print logs on the first worker.\nverbose = 1 if hvd.rank() == 0 else 0\n\n# Horovod: write TensorBoard logs on first worker.\nlog_writer = tensorboardX.SummaryWriter(args.log_dir) if hvd.rank() == 0 else None\n\n\nkwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}\ntrain_dataset = \\\n datasets.ImageFolder(args.train_dir,\n transform=transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]))\n# Horovod: use DistributedSampler to partition data among workers. Manually specify\n# `num_replicas=hvd.size()` and `rank=hvd.rank()`.\ntrain_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset, num_replicas=hvd.size(), rank=hvd.rank())\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=allreduce_batch_size,\n sampler=train_sampler, **kwargs)\n\nval_dataset = \\\n datasets.ImageFolder(args.val_dir,\n transform=transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]))\nval_sampler = torch.utils.data.distributed.DistributedSampler(\n val_dataset, num_replicas=hvd.size(), rank=hvd.rank())\nval_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.val_batch_size,\n sampler=val_sampler, **kwargs)\n\n\n# Set up standard ResNet-50 model.\nmodel = models.resnet50()\n\nif args.cuda:\n # Move model to GPU.\n model.cuda()\n\n# Horovod: scale learning rate by the number of GPUs.\n# Gradient Accumulation: scale learning rate by batches_per_allreduce\noptimizer = optim.SGD(model.parameters(),\n lr=(args.base_lr *\n args.batches_per_allreduce * hvd.size()),\n momentum=args.momentum, weight_decay=args.wd)\n\n# Horovod: (optional) compression algorithm.\ncompression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none\n\n# Horovod: wrap optimizer with DistributedOptimizer.\noptimizer = hvd.DistributedOptimizer(\n optimizer, named_parameters=model.named_parameters(),\n compression=compression,\n backward_passes_per_step=args.batches_per_allreduce)\n\n# Restore from a previous checkpoint, if initial_epoch is specified.\n# Horovod: restore on the first worker which will broadcast weights to other workers.\nif resume_from_epoch > 0 and hvd.rank() == 0:\n filepath = args.checkpoint_format.format(epoch=resume_from_epoch)\n checkpoint = torch.load(filepath)\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n# Horovod: broadcast parameters & optimizer state.\nhvd.broadcast_parameters(model.state_dict(), root_rank=0)\nhvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\ndef train(epoch):\n model.train()\n train_sampler.set_epoch(epoch)\n train_loss = Metric('train_loss')\n train_accuracy = Metric('train_accuracy')\n\n with tqdm(total=len(train_loader),\n desc='Train Epoch #{}'.format(epoch + 1),\n disable=not verbose) as t:\n for batch_idx, (data, target) in enumerate(train_loader):\n adjust_learning_rate(epoch, batch_idx)\n\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n optimizer.zero_grad()\n # Split data into sub-batches of size batch_size\n for i in range(0, len(data), args.batch_size):\n data_batch = data[i:i + args.batch_size]\n target_batch = target[i:i + args.batch_size]\n output = model(data_batch)\n train_accuracy.update(accuracy(output, target_batch))\n loss = F.cross_entropy(output, target_batch)\n train_loss.update(loss.item())\n # Average gradients among sub-batches\n loss.div_(math.ceil(float(len(data)) / args.batch_size))\n loss.backward()\n # Gradient is applied across all ranks\n optimizer.step()\n t.set_postfix({'loss': train_loss.avg.item(),\n 'accuracy': 100. * train_accuracy.avg.item()})\n t.update(1)\n\n if log_writer:\n log_writer.add_scalar('train/loss', train_loss.avg, epoch)\n log_writer.add_scalar('train/accuracy', train_accuracy.avg, epoch)\n\n\ndef validate(epoch):\n model.eval()\n val_loss = Metric('val_loss')\n val_accuracy = Metric('val_accuracy')\n\n with tqdm(total=len(val_loader),\n desc='Validate Epoch #{}'.format(epoch + 1),\n disable=not verbose) as t:\n with torch.no_grad():\n for data, target in val_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n output = model(data)\n\n val_loss.update(F.cross_entropy(output, target))\n val_accuracy.update(accuracy(output, target))\n t.set_postfix({'loss': val_loss.avg.item(),\n 'accuracy': 100. * val_accuracy.avg.item()})\n t.update(1)\n\n if log_writer:\n log_writer.add_scalar('val/loss', val_loss.avg, epoch)\n log_writer.add_scalar('val/accuracy', val_accuracy.avg, epoch)\n\n\n# Horovod: using `lr = base_lr * hvd.size()` from the very beginning leads to worse final\n# accuracy. Scale the learning rate `lr = base_lr` ---> `lr = base_lr * hvd.size()` during\n# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.\n# After the warmup reduce learning rate by 10 on the 30th, 60th and 80th epochs.\ndef adjust_learning_rate(epoch, batch_idx):\n if epoch < args.warmup_epochs:\n epoch += float(batch_idx + 1) / len(train_loader)\n lr_adj = 1. / hvd.size() * (epoch * (hvd.size() - 1) / args.warmup_epochs + 1)\n elif epoch < 30:\n lr_adj = 1.\n elif epoch < 60:\n lr_adj = 1e-1\n elif epoch < 80:\n lr_adj = 1e-2\n else:\n lr_adj = 1e-3\n for param_group in optimizer.param_groups:\n param_group['lr'] = args.base_lr * hvd.size() * args.batches_per_allreduce * lr_adj\n\n\ndef accuracy(output, target):\n # get the index of the max log-probability\n pred = output.max(1, keepdim=True)[1]\n return pred.eq(target.view_as(pred)).cpu().float().mean()\n\n\ndef save_checkpoint(epoch):\n if hvd.rank() == 0:\n filepath = args.checkpoint_format.format(epoch=epoch + 1)\n state = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n torch.save(state, filepath)\n\n\n# Horovod: average metrics from distributed training.\nclass Metric(object):\n def __init__(self, name):\n self.name = name\n self.sum = torch.tensor(0.)\n self.n = torch.tensor(0.)\n\n def update(self, val):\n self.sum += hvd.allreduce(val.detach().cpu(), name=self.name)\n self.n += 1\n\n @property\n def avg(self):\n return self.sum / self.n\n\n\nfor epoch in range(resume_from_epoch, args.epochs):\n train(epoch)\n validate(epoch)\n save_checkpoint(epoch)\n", "path": "examples/pytorch_imagenet_resnet50.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport torch\nimport argparse\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data.distributed\nfrom torchvision import datasets, transforms, models\nimport horovod.torch as hvd\nimport tensorboardX\nimport os\nimport math\nfrom tqdm import tqdm\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Example',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--train-dir', default=os.path.expanduser('~/imagenet/train'),\n help='path to training data')\nparser.add_argument('--val-dir', default=os.path.expanduser('~/imagenet/validation'),\n help='path to validation data')\nparser.add_argument('--log-dir', default='./logs',\n help='tensorboard log directory')\nparser.add_argument('--checkpoint-format', default='./checkpoint-{epoch}.pth.tar',\n help='checkpoint file format')\nparser.add_argument('--fp16-allreduce', action='store_true', default=False,\n help='use fp16 compression during allreduce')\nparser.add_argument('--batches-per-allreduce', type=int, default=1,\n help='number of batches processed locally before '\n 'executing allreduce across workers; it multiplies '\n 'total batch size.')\n\n# Default settings from https://arxiv.org/abs/1706.02677.\nparser.add_argument('--batch-size', type=int, default=32,\n help='input batch size for training')\nparser.add_argument('--val-batch-size', type=int, default=32,\n help='input batch size for validation')\nparser.add_argument('--epochs', type=int, default=90,\n help='number of epochs to train')\nparser.add_argument('--base-lr', type=float, default=0.0125,\n help='learning rate for a single GPU')\nparser.add_argument('--warmup-epochs', type=float, default=5,\n help='number of warmup epochs')\nparser.add_argument('--momentum', type=float, default=0.9,\n help='SGD momentum')\nparser.add_argument('--wd', type=float, default=0.00005,\n help='weight decay')\n\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=42,\n help='random seed')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nallreduce_batch_size = args.batch_size * args.batches_per_allreduce\n\nhvd.init()\ntorch.manual_seed(args.seed)\n\nif args.cuda:\n # Horovod: pin GPU to local rank.\n torch.cuda.set_device(hvd.local_rank())\n torch.cuda.manual_seed(args.seed)\n\ncudnn.benchmark = True\n\n# If set > 0, will resume training from a given checkpoint.\nresume_from_epoch = 0\nfor try_epoch in range(args.epochs, 0, -1):\n if os.path.exists(args.checkpoint_format.format(epoch=try_epoch)):\n resume_from_epoch = try_epoch\n break\n\n# Horovod: broadcast resume_from_epoch from rank 0 (which will have\n# checkpoints) to other ranks.\nresume_from_epoch = hvd.broadcast(torch.tensor(resume_from_epoch), root_rank=0,\n name='resume_from_epoch').item()\n\n# Horovod: print logs on the first worker.\nverbose = 1 if hvd.rank() == 0 else 0\n\n# Horovod: write TensorBoard logs on first worker.\nlog_writer = tensorboardX.SummaryWriter(args.log_dir) if hvd.rank() == 0 else None\n\n\nkwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}\ntrain_dataset = \\\n datasets.ImageFolder(args.train_dir,\n transform=transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]))\n# Horovod: use DistributedSampler to partition data among workers. Manually specify\n# `num_replicas=hvd.size()` and `rank=hvd.rank()`.\ntrain_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset, num_replicas=hvd.size(), rank=hvd.rank())\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=allreduce_batch_size,\n sampler=train_sampler, **kwargs)\n\nval_dataset = \\\n datasets.ImageFolder(args.val_dir,\n transform=transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ]))\nval_sampler = torch.utils.data.distributed.DistributedSampler(\n val_dataset, num_replicas=hvd.size(), rank=hvd.rank())\nval_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.val_batch_size,\n sampler=val_sampler, **kwargs)\n\n\n# Set up standard ResNet-50 model.\nmodel = models.resnet50()\n\nif args.cuda:\n # Move model to GPU.\n model.cuda()\n\n# Horovod: scale learning rate by the number of GPUs.\n# Gradient Accumulation: scale learning rate by batches_per_allreduce\noptimizer = optim.SGD(model.parameters(),\n lr=(args.base_lr *\n args.batches_per_allreduce * hvd.size()),\n momentum=args.momentum, weight_decay=args.wd)\n\n# Horovod: (optional) compression algorithm.\ncompression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none\n\n# Horovod: wrap optimizer with DistributedOptimizer.\noptimizer = hvd.DistributedOptimizer(\n optimizer, named_parameters=model.named_parameters(),\n compression=compression,\n backward_passes_per_step=args.batches_per_allreduce)\n\n# Restore from a previous checkpoint, if initial_epoch is specified.\n# Horovod: restore on the first worker which will broadcast weights to other workers.\nif resume_from_epoch > 0 and hvd.rank() == 0:\n filepath = args.checkpoint_format.format(epoch=resume_from_epoch)\n checkpoint = torch.load(filepath)\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n# Horovod: broadcast parameters & optimizer state.\nhvd.broadcast_parameters(model.state_dict(), root_rank=0)\nhvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\ndef train(epoch):\n model.train()\n train_sampler.set_epoch(epoch)\n train_loss = Metric('train_loss')\n train_accuracy = Metric('train_accuracy')\n\n with tqdm(total=len(train_loader),\n desc='Train Epoch #{}'.format(epoch + 1),\n disable=not verbose) as t:\n for batch_idx, (data, target) in enumerate(train_loader):\n adjust_learning_rate(epoch, batch_idx)\n\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n optimizer.zero_grad()\n # Split data into sub-batches of size batch_size\n for i in range(0, len(data), args.batch_size):\n data_batch = data[i:i + args.batch_size]\n target_batch = target[i:i + args.batch_size]\n output = model(data_batch)\n train_accuracy.update(accuracy(output, target_batch))\n loss = F.cross_entropy(output, target_batch)\n train_loss.update(loss)\n # Average gradients among sub-batches\n loss.div_(math.ceil(float(len(data)) / args.batch_size))\n loss.backward()\n # Gradient is applied across all ranks\n optimizer.step()\n t.set_postfix({'loss': train_loss.avg.item(),\n 'accuracy': 100. * train_accuracy.avg.item()})\n t.update(1)\n\n if log_writer:\n log_writer.add_scalar('train/loss', train_loss.avg, epoch)\n log_writer.add_scalar('train/accuracy', train_accuracy.avg, epoch)\n\n\ndef validate(epoch):\n model.eval()\n val_loss = Metric('val_loss')\n val_accuracy = Metric('val_accuracy')\n\n with tqdm(total=len(val_loader),\n desc='Validate Epoch #{}'.format(epoch + 1),\n disable=not verbose) as t:\n with torch.no_grad():\n for data, target in val_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n output = model(data)\n\n val_loss.update(F.cross_entropy(output, target))\n val_accuracy.update(accuracy(output, target))\n t.set_postfix({'loss': val_loss.avg.item(),\n 'accuracy': 100. * val_accuracy.avg.item()})\n t.update(1)\n\n if log_writer:\n log_writer.add_scalar('val/loss', val_loss.avg, epoch)\n log_writer.add_scalar('val/accuracy', val_accuracy.avg, epoch)\n\n\n# Horovod: using `lr = base_lr * hvd.size()` from the very beginning leads to worse final\n# accuracy. Scale the learning rate `lr = base_lr` ---> `lr = base_lr * hvd.size()` during\n# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.\n# After the warmup reduce learning rate by 10 on the 30th, 60th and 80th epochs.\ndef adjust_learning_rate(epoch, batch_idx):\n if epoch < args.warmup_epochs:\n epoch += float(batch_idx + 1) / len(train_loader)\n lr_adj = 1. / hvd.size() * (epoch * (hvd.size() - 1) / args.warmup_epochs + 1)\n elif epoch < 30:\n lr_adj = 1.\n elif epoch < 60:\n lr_adj = 1e-1\n elif epoch < 80:\n lr_adj = 1e-2\n else:\n lr_adj = 1e-3\n for param_group in optimizer.param_groups:\n param_group['lr'] = args.base_lr * hvd.size() * args.batches_per_allreduce * lr_adj\n\n\ndef accuracy(output, target):\n # get the index of the max log-probability\n pred = output.max(1, keepdim=True)[1]\n return pred.eq(target.view_as(pred)).cpu().float().mean()\n\n\ndef save_checkpoint(epoch):\n if hvd.rank() == 0:\n filepath = args.checkpoint_format.format(epoch=epoch + 1)\n state = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n torch.save(state, filepath)\n\n\n# Horovod: average metrics from distributed training.\nclass Metric(object):\n def __init__(self, name):\n self.name = name\n self.sum = torch.tensor(0.)\n self.n = torch.tensor(0.)\n\n def update(self, val):\n self.sum += hvd.allreduce(val.detach().cpu(), name=self.name)\n self.n += 1\n\n @property\n def avg(self):\n return self.sum / self.n\n\n\nfor epoch in range(resume_from_epoch, args.epochs):\n train(epoch)\n validate(epoch)\n save_checkpoint(epoch)\n", "path": "examples/pytorch_imagenet_resnet50.py"}]} | 3,843 | 142 |
gh_patches_debug_30024 | rasdani/github-patches | git_diff | vispy__vispy-2144 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add ability to pass "fpos" as a parameter to the ColorFilter
Hi all,
I am currently trying to use the ```ColorFilter``` (https://github.com/vispy/vispy/blob/main/vispy/visuals/filters/color.py) in a project along with several other filters, which I need to be placed in a specific order. However, right now, ```fpos``` cannot be passed as a parameter to ```ColorFilter```, which is always using 8:
```
def __init__(self, filter=(1., 1., 1., 1.)):
super(ColorFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=8)
self.filter = filter
```
Is it possible to change this so the user can specify any position for this filter?
Thanks so much,
Clare
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/visuals/filters/color.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) Vispy Development Team. All Rights Reserved.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 from .base_filter import Filter
6 from ..shaders import Function, Varying
7 from ...color import colormap, Color
8
9
10 class IsolineFilter(Filter):
11 FRAG_SHADER = """
12 void isoline() {
13 if ($isolevel <= 0. || $isowidth <= 0.) {
14 return;
15 }
16
17 // function taken from glumpy/examples/isocurves.py
18 // and extended to have level, width, color and antialiasing
19 // as parameters
20
21 // Extract data value
22 // this accounts for perception,
23 // have to decide, which one to use or make this a uniform
24 const vec3 w = vec3(0.299, 0.587, 0.114);
25 //const vec3 w = vec3(0.2126, 0.7152, 0.0722);
26 float value = dot(gl_FragColor.rgb, w);
27
28 // setup lw, aa
29 float linewidth = $isowidth + $antialias;
30
31 // "middle" contour(s) dividing upper and lower half
32 // but only if isolevel is even
33 if( mod($isolevel,2.0) == 0.0 ) {
34 if( length(value - 0.5) < 0.5 / $isolevel)
35 linewidth = linewidth * 2;
36 }
37
38 // Trace contour isoline
39 float v = $isolevel * value - 0.5;
40 float dv = linewidth/2.0 * fwidth(v);
41 float f = abs(fract(v) - 0.5);
42 float d = smoothstep(-dv, +dv, f);
43 float t = linewidth/2.0 - $antialias;
44 d = abs(d)*linewidth/2.0 - t;
45
46 if( d < - linewidth ) {
47 d = 1.0;
48 } else {
49 d /= $antialias;
50 }
51
52 // setup foreground
53 vec4 fc = $isocolor;
54
55 // mix with background
56 if (d < 1.) {
57 gl_FragColor = mix(gl_FragColor, fc, 1-d);
58 }
59
60 }
61 """
62
63 def __init__(self, level=2., width=2.0, antialias=1.0, color='black'):
64 super(IsolineFilter, self).__init__(fcode=self.FRAG_SHADER)
65
66 self.level = level
67 self.width = width
68 self.color = color
69 self.antialias = antialias
70
71 @property
72 def level(self):
73 return self._level
74
75 @level.setter
76 def level(self, lev):
77 if lev <= 0:
78 lev = 0
79 self._level = lev
80 self.fshader['isolevel'] = float(lev)
81
82 @property
83 def width(self):
84 return self._width
85
86 @width.setter
87 def width(self, w):
88 self._width = w
89 self.fshader['isowidth'] = float(w)
90
91 @property
92 def color(self):
93 return self._color
94
95 @color.setter
96 def color(self, c):
97 self._color = c
98 self.fshader['isocolor'] = Color(c).rgba
99
100 @property
101 def antialias(self):
102 return self._antialias
103
104 @antialias.setter
105 def antialias(self, a):
106 self._antialias = a
107 self.fshader['antialias'] = float(a)
108
109
110 class Alpha(Filter):
111 FRAG_SHADER = """
112 void apply_alpha() {
113 gl_FragColor.a = gl_FragColor.a * $alpha;
114 }
115 """
116
117 def __init__(self, alpha=1.0):
118 super(Alpha, self).__init__(fcode=self.FRAG_SHADER)
119
120 self.alpha = alpha
121
122 @property
123 def alpha(self):
124 return self._alpha
125
126 @alpha.setter
127 def alpha(self, a):
128 self._alpha = a
129 self.fshader['alpha'] = float(a)
130
131
132 class ColorFilter(Filter):
133 FRAG_SHADER = """
134 void apply_color_filter() {
135 gl_FragColor = gl_FragColor * $filter;
136 }
137 """
138
139 def __init__(self, filter=(1., 1., 1., 1.)):
140 super(ColorFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=8)
141
142 self.filter = filter
143
144 @property
145 def filter(self):
146 return self._filter
147
148 @filter.setter
149 def filter(self, f):
150 self._filter = tuple(f)
151 self.fshader['filter'] = self._filter
152
153
154 class ZColormapFilter(Filter):
155 FRAG_SHADER = """
156 void z_colormap_support() {
157 $zval = $position.z;
158 }
159 """
160 VERT_SHADER = """
161 void apply_z_colormap() {
162 gl_FragColor = $cmap(($zval - $zrange.x) /
163 ($zrange.y - $zrange.x));
164 }
165 """
166
167 def __init__(self, cmap, zrange=(0., 1.)):
168 super(ZColormapFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=3,
169 vcode=self.VERT_SHADER, vpos=9)
170
171 if isinstance(cmap, str):
172 cmap = colormap.get_colormap(cmap)
173 self.cmap = Function(cmap.glsl_map)
174 self.fshader['cmap'] = self.cmap
175 self.fshader['zrange'] = zrange
176 self.vshader['zval'] = Varying('v_zval', dtype='float')
177 self.fshader['zval'] = self.vshader['zval']
178
179 def _attach(self, visual):
180 super(ZColormapFilter, self)._attach(visual)
181 self.vshader['position'] = visual.shared_program.vert['position']
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vispy/visuals/filters/color.py b/vispy/visuals/filters/color.py
--- a/vispy/visuals/filters/color.py
+++ b/vispy/visuals/filters/color.py
@@ -60,8 +60,8 @@
}
"""
- def __init__(self, level=2., width=2.0, antialias=1.0, color='black'):
- super(IsolineFilter, self).__init__(fcode=self.FRAG_SHADER)
+ def __init__(self, level=2., width=2.0, antialias=1.0, color='black', **kwargs):
+ super(IsolineFilter, self).__init__(fcode=self.FRAG_SHADER, **kwargs)
self.level = level
self.width = width
@@ -114,8 +114,8 @@
}
"""
- def __init__(self, alpha=1.0):
- super(Alpha, self).__init__(fcode=self.FRAG_SHADER)
+ def __init__(self, alpha=1.0, **kwargs):
+ super(Alpha, self).__init__(fcode=self.FRAG_SHADER, **kwargs)
self.alpha = alpha
@@ -136,8 +136,8 @@
}
"""
- def __init__(self, filter=(1., 1., 1., 1.)):
- super(ColorFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=8)
+ def __init__(self, filter=(1., 1., 1., 1.), fpos=8, **kwargs):
+ super(ColorFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=fpos, **kwargs)
self.filter = filter
@@ -164,9 +164,9 @@
}
"""
- def __init__(self, cmap, zrange=(0., 1.)):
- super(ZColormapFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=3,
- vcode=self.VERT_SHADER, vpos=9)
+ def __init__(self, cmap, zrange=(0., 1.), fpos=3, vpos=9, **kwargs):
+ super(ZColormapFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=fpos,
+ vcode=self.VERT_SHADER, vpos=vpos, **kwargs)
if isinstance(cmap, str):
cmap = colormap.get_colormap(cmap)
| {"golden_diff": "diff --git a/vispy/visuals/filters/color.py b/vispy/visuals/filters/color.py\n--- a/vispy/visuals/filters/color.py\n+++ b/vispy/visuals/filters/color.py\n@@ -60,8 +60,8 @@\n }\n \"\"\"\n \n- def __init__(self, level=2., width=2.0, antialias=1.0, color='black'):\n- super(IsolineFilter, self).__init__(fcode=self.FRAG_SHADER)\n+ def __init__(self, level=2., width=2.0, antialias=1.0, color='black', **kwargs):\n+ super(IsolineFilter, self).__init__(fcode=self.FRAG_SHADER, **kwargs)\n \n self.level = level\n self.width = width\n@@ -114,8 +114,8 @@\n }\n \"\"\"\n \n- def __init__(self, alpha=1.0):\n- super(Alpha, self).__init__(fcode=self.FRAG_SHADER)\n+ def __init__(self, alpha=1.0, **kwargs):\n+ super(Alpha, self).__init__(fcode=self.FRAG_SHADER, **kwargs)\n \n self.alpha = alpha\n \n@@ -136,8 +136,8 @@\n }\n \"\"\"\n \n- def __init__(self, filter=(1., 1., 1., 1.)):\n- super(ColorFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=8)\n+ def __init__(self, filter=(1., 1., 1., 1.), fpos=8, **kwargs):\n+ super(ColorFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=fpos, **kwargs)\n \n self.filter = filter\n \n@@ -164,9 +164,9 @@\n }\n \"\"\"\n \n- def __init__(self, cmap, zrange=(0., 1.)):\n- super(ZColormapFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=3,\n- vcode=self.VERT_SHADER, vpos=9)\n+ def __init__(self, cmap, zrange=(0., 1.), fpos=3, vpos=9, **kwargs):\n+ super(ZColormapFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=fpos,\n+ vcode=self.VERT_SHADER, vpos=vpos, **kwargs)\n \n if isinstance(cmap, str):\n cmap = colormap.get_colormap(cmap)\n", "issue": "Add ability to pass \"fpos\" as a parameter to the ColorFilter\nHi all,\r\nI am currently trying to use the ```ColorFilter``` (https://github.com/vispy/vispy/blob/main/vispy/visuals/filters/color.py) in a project along with several other filters, which I need to be placed in a specific order. However, right now, ```fpos``` cannot be passed as a parameter to ```ColorFilter```, which is always using 8:\r\n```\r\n def __init__(self, filter=(1., 1., 1., 1.)):\r\n super(ColorFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=8)\r\n\r\n self.filter = filter\r\n```\r\n\r\nIs it possible to change this so the user can specify any position for this filter?\r\n\r\nThanks so much,\r\nClare\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom .base_filter import Filter\nfrom ..shaders import Function, Varying\nfrom ...color import colormap, Color\n\n\nclass IsolineFilter(Filter):\n FRAG_SHADER = \"\"\"\n void isoline() {\n if ($isolevel <= 0. || $isowidth <= 0.) {\n return;\n }\n\n // function taken from glumpy/examples/isocurves.py\n // and extended to have level, width, color and antialiasing\n // as parameters\n\n // Extract data value\n // this accounts for perception,\n // have to decide, which one to use or make this a uniform\n const vec3 w = vec3(0.299, 0.587, 0.114);\n //const vec3 w = vec3(0.2126, 0.7152, 0.0722);\n float value = dot(gl_FragColor.rgb, w);\n\n // setup lw, aa\n float linewidth = $isowidth + $antialias;\n\n // \"middle\" contour(s) dividing upper and lower half\n // but only if isolevel is even\n if( mod($isolevel,2.0) == 0.0 ) {\n if( length(value - 0.5) < 0.5 / $isolevel)\n linewidth = linewidth * 2;\n }\n\n // Trace contour isoline\n float v = $isolevel * value - 0.5;\n float dv = linewidth/2.0 * fwidth(v);\n float f = abs(fract(v) - 0.5);\n float d = smoothstep(-dv, +dv, f);\n float t = linewidth/2.0 - $antialias;\n d = abs(d)*linewidth/2.0 - t;\n\n if( d < - linewidth ) {\n d = 1.0;\n } else {\n d /= $antialias;\n }\n\n // setup foreground\n vec4 fc = $isocolor;\n\n // mix with background\n if (d < 1.) {\n gl_FragColor = mix(gl_FragColor, fc, 1-d);\n }\n\n }\n \"\"\"\n\n def __init__(self, level=2., width=2.0, antialias=1.0, color='black'):\n super(IsolineFilter, self).__init__(fcode=self.FRAG_SHADER)\n\n self.level = level\n self.width = width\n self.color = color\n self.antialias = antialias\n\n @property\n def level(self):\n return self._level\n\n @level.setter\n def level(self, lev):\n if lev <= 0:\n lev = 0\n self._level = lev\n self.fshader['isolevel'] = float(lev)\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, w):\n self._width = w\n self.fshader['isowidth'] = float(w)\n\n @property\n def color(self):\n return self._color\n\n @color.setter\n def color(self, c):\n self._color = c\n self.fshader['isocolor'] = Color(c).rgba\n\n @property\n def antialias(self):\n return self._antialias\n\n @antialias.setter\n def antialias(self, a):\n self._antialias = a\n self.fshader['antialias'] = float(a)\n\n\nclass Alpha(Filter):\n FRAG_SHADER = \"\"\"\n void apply_alpha() {\n gl_FragColor.a = gl_FragColor.a * $alpha;\n }\n \"\"\"\n\n def __init__(self, alpha=1.0):\n super(Alpha, self).__init__(fcode=self.FRAG_SHADER)\n\n self.alpha = alpha\n\n @property\n def alpha(self):\n return self._alpha\n\n @alpha.setter\n def alpha(self, a):\n self._alpha = a\n self.fshader['alpha'] = float(a)\n\n\nclass ColorFilter(Filter):\n FRAG_SHADER = \"\"\"\n void apply_color_filter() {\n gl_FragColor = gl_FragColor * $filter;\n }\n \"\"\"\n\n def __init__(self, filter=(1., 1., 1., 1.)):\n super(ColorFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=8)\n\n self.filter = filter\n\n @property\n def filter(self):\n return self._filter\n\n @filter.setter\n def filter(self, f):\n self._filter = tuple(f)\n self.fshader['filter'] = self._filter\n\n\nclass ZColormapFilter(Filter):\n FRAG_SHADER = \"\"\"\n void z_colormap_support() {\n $zval = $position.z;\n }\n \"\"\"\n VERT_SHADER = \"\"\"\n void apply_z_colormap() {\n gl_FragColor = $cmap(($zval - $zrange.x) /\n ($zrange.y - $zrange.x));\n }\n \"\"\"\n\n def __init__(self, cmap, zrange=(0., 1.)):\n super(ZColormapFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=3,\n vcode=self.VERT_SHADER, vpos=9)\n\n if isinstance(cmap, str):\n cmap = colormap.get_colormap(cmap)\n self.cmap = Function(cmap.glsl_map)\n self.fshader['cmap'] = self.cmap\n self.fshader['zrange'] = zrange\n self.vshader['zval'] = Varying('v_zval', dtype='float')\n self.fshader['zval'] = self.vshader['zval']\n\n def _attach(self, visual):\n super(ZColormapFilter, self)._attach(visual)\n self.vshader['position'] = visual.shared_program.vert['position']\n", "path": "vispy/visuals/filters/color.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\nfrom .base_filter import Filter\nfrom ..shaders import Function, Varying\nfrom ...color import colormap, Color\n\n\nclass IsolineFilter(Filter):\n FRAG_SHADER = \"\"\"\n void isoline() {\n if ($isolevel <= 0. || $isowidth <= 0.) {\n return;\n }\n\n // function taken from glumpy/examples/isocurves.py\n // and extended to have level, width, color and antialiasing\n // as parameters\n\n // Extract data value\n // this accounts for perception,\n // have to decide, which one to use or make this a uniform\n const vec3 w = vec3(0.299, 0.587, 0.114);\n //const vec3 w = vec3(0.2126, 0.7152, 0.0722);\n float value = dot(gl_FragColor.rgb, w);\n\n // setup lw, aa\n float linewidth = $isowidth + $antialias;\n\n // \"middle\" contour(s) dividing upper and lower half\n // but only if isolevel is even\n if( mod($isolevel,2.0) == 0.0 ) {\n if( length(value - 0.5) < 0.5 / $isolevel)\n linewidth = linewidth * 2;\n }\n\n // Trace contour isoline\n float v = $isolevel * value - 0.5;\n float dv = linewidth/2.0 * fwidth(v);\n float f = abs(fract(v) - 0.5);\n float d = smoothstep(-dv, +dv, f);\n float t = linewidth/2.0 - $antialias;\n d = abs(d)*linewidth/2.0 - t;\n\n if( d < - linewidth ) {\n d = 1.0;\n } else {\n d /= $antialias;\n }\n\n // setup foreground\n vec4 fc = $isocolor;\n\n // mix with background\n if (d < 1.) {\n gl_FragColor = mix(gl_FragColor, fc, 1-d);\n }\n\n }\n \"\"\"\n\n def __init__(self, level=2., width=2.0, antialias=1.0, color='black', **kwargs):\n super(IsolineFilter, self).__init__(fcode=self.FRAG_SHADER, **kwargs)\n\n self.level = level\n self.width = width\n self.color = color\n self.antialias = antialias\n\n @property\n def level(self):\n return self._level\n\n @level.setter\n def level(self, lev):\n if lev <= 0:\n lev = 0\n self._level = lev\n self.fshader['isolevel'] = float(lev)\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, w):\n self._width = w\n self.fshader['isowidth'] = float(w)\n\n @property\n def color(self):\n return self._color\n\n @color.setter\n def color(self, c):\n self._color = c\n self.fshader['isocolor'] = Color(c).rgba\n\n @property\n def antialias(self):\n return self._antialias\n\n @antialias.setter\n def antialias(self, a):\n self._antialias = a\n self.fshader['antialias'] = float(a)\n\n\nclass Alpha(Filter):\n FRAG_SHADER = \"\"\"\n void apply_alpha() {\n gl_FragColor.a = gl_FragColor.a * $alpha;\n }\n \"\"\"\n\n def __init__(self, alpha=1.0, **kwargs):\n super(Alpha, self).__init__(fcode=self.FRAG_SHADER, **kwargs)\n\n self.alpha = alpha\n\n @property\n def alpha(self):\n return self._alpha\n\n @alpha.setter\n def alpha(self, a):\n self._alpha = a\n self.fshader['alpha'] = float(a)\n\n\nclass ColorFilter(Filter):\n FRAG_SHADER = \"\"\"\n void apply_color_filter() {\n gl_FragColor = gl_FragColor * $filter;\n }\n \"\"\"\n\n def __init__(self, filter=(1., 1., 1., 1.), fpos=8, **kwargs):\n super(ColorFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=fpos, **kwargs)\n\n self.filter = filter\n\n @property\n def filter(self):\n return self._filter\n\n @filter.setter\n def filter(self, f):\n self._filter = tuple(f)\n self.fshader['filter'] = self._filter\n\n\nclass ZColormapFilter(Filter):\n FRAG_SHADER = \"\"\"\n void z_colormap_support() {\n $zval = $position.z;\n }\n \"\"\"\n VERT_SHADER = \"\"\"\n void apply_z_colormap() {\n gl_FragColor = $cmap(($zval - $zrange.x) /\n ($zrange.y - $zrange.x));\n }\n \"\"\"\n\n def __init__(self, cmap, zrange=(0., 1.), fpos=3, vpos=9, **kwargs):\n super(ZColormapFilter, self).__init__(fcode=self.FRAG_SHADER, fpos=fpos,\n vcode=self.VERT_SHADER, vpos=vpos, **kwargs)\n\n if isinstance(cmap, str):\n cmap = colormap.get_colormap(cmap)\n self.cmap = Function(cmap.glsl_map)\n self.fshader['cmap'] = self.cmap\n self.fshader['zrange'] = zrange\n self.vshader['zval'] = Varying('v_zval', dtype='float')\n self.fshader['zval'] = self.vshader['zval']\n\n def _attach(self, visual):\n super(ZColormapFilter, self)._attach(visual)\n self.vshader['position'] = visual.shared_program.vert['position']\n", "path": "vispy/visuals/filters/color.py"}]} | 2,233 | 567 |
gh_patches_debug_40239 | rasdani/github-patches | git_diff | freedomofpress__securedrop-4914 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
securedrop-admin (setup) fails in Tails 4.0-rc1
## Description
Running `./securedrop-admin setup` in tails 4.0-rc1 (upgraded from 3.16) returns the following error:
```
INFO: Virtualenv already exists, not creating
INFO: Checking Python dependencies for securedrop-admin
ERROR: Failed to install pip dependencies. Check network connection and try again.
```
This was done in VMs, will try to reproduce on hardware.
## Steps to Reproduce
1. Upgrade Tails device from 3.16 to 4.0 (Boot into 4.0-rc1 drive and clone to upgrade 3.16 drive)
2. Boot into newly-upgraded tails drive
3. Verify that the internet is working (tor is bootstrapped, you can reach an external website over tor)
4. check out `1.1.0~rc2` tag
5. Run `./securedrop-admin setup` in ~/Persistent/securedrop
6. Observe error
## Expected Behavior
Securedrop-admin should run and the dependencies should be installed.
## Actual Behavior
Securedrop-admin fails and returns an error, the dependencies are not installed
## Comments
Per https://github.com/freedomofpress/securedrop/pull/4852/files#diff-b5e536cc161fcc0d62e661b4d6eae381R70-R73
When running the commands locally, I get
* `lsb_release --id --short` returns `Debian`
* `uname -a` returns `Linux amnesia 5.3.0-trunk-amd64 #1 SMF Debian 5.3.2-1~exp1 (2019-10-02) x86_64 GNU/Linux`
When i run ./securedrop-admin with no parameter, I get:
```
amnesia@amnesia:~/Persistent/securedrop$ ./securedrop-admin help
Could not find platform independent libraries <prefix>
Could not find platform dependent libraries <exec_prefix>
Consider setting $PYTHONHOME to <prefix>[:<exec_prefix>]
Fatal Python error: Py_Initialize: Unable to get the locale encoding
ImportError: No module named 'encodings'
Current thread 0x00007cf687450740 (most recent call first):
Aborted
```
securedrop-admin (setup) fails in Tails 4.0-rc1
## Description
Running `./securedrop-admin setup` in tails 4.0-rc1 (upgraded from 3.16) returns the following error:
```
INFO: Virtualenv already exists, not creating
INFO: Checking Python dependencies for securedrop-admin
ERROR: Failed to install pip dependencies. Check network connection and try again.
```
This was done in VMs, will try to reproduce on hardware.
## Steps to Reproduce
1. Upgrade Tails device from 3.16 to 4.0 (Boot into 4.0-rc1 drive and clone to upgrade 3.16 drive)
2. Boot into newly-upgraded tails drive
3. Verify that the internet is working (tor is bootstrapped, you can reach an external website over tor)
4. check out `1.1.0~rc2` tag
5. Run `./securedrop-admin setup` in ~/Persistent/securedrop
6. Observe error
## Expected Behavior
Securedrop-admin should run and the dependencies should be installed.
## Actual Behavior
Securedrop-admin fails and returns an error, the dependencies are not installed
## Comments
Per https://github.com/freedomofpress/securedrop/pull/4852/files#diff-b5e536cc161fcc0d62e661b4d6eae381R70-R73
When running the commands locally, I get
* `lsb_release --id --short` returns `Debian`
* `uname -a` returns `Linux amnesia 5.3.0-trunk-amd64 #1 SMF Debian 5.3.2-1~exp1 (2019-10-02) x86_64 GNU/Linux`
When i run ./securedrop-admin with no parameter, I get:
```
amnesia@amnesia:~/Persistent/securedrop$ ./securedrop-admin help
Could not find platform independent libraries <prefix>
Could not find platform dependent libraries <exec_prefix>
Consider setting $PYTHONHOME to <prefix>[:<exec_prefix>]
Fatal Python error: Py_Initialize: Unable to get the locale encoding
ImportError: No module named 'encodings'
Current thread 0x00007cf687450740 (most recent call first):
Aborted
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `admin/bootstrap.py`
Content:
```
1 # -*- mode: python; coding: utf-8 -*-
2 #
3 # Copyright (C) 2013-2018 Freedom of the Press Foundation & al
4 # Copyright (C) 2018 Loic Dachary <[email protected]>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19
20 import argparse
21 import logging
22 import os
23 import subprocess
24 import sys
25
26 sdlog = logging.getLogger(__name__)
27
28 DIR = os.path.dirname(os.path.realpath(__file__))
29 VENV_DIR = os.path.join(DIR, ".venv3")
30
31
32 def setup_logger(verbose=False):
33 """ Configure logging handler """
34 # Set default level on parent
35 sdlog.setLevel(logging.DEBUG)
36 level = logging.DEBUG if verbose else logging.INFO
37
38 stdout = logging.StreamHandler(sys.stdout)
39 stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
40 stdout.setLevel(level)
41 sdlog.addHandler(stdout)
42
43
44 def run_command(command):
45 """
46 Wrapper function to display stdout for running command,
47 similar to how shelling out in a Bash script displays rolling output.
48
49 Yields a list of the stdout from the `command`, and raises a
50 CalledProcessError if `command` returns non-zero.
51 """
52 popen = subprocess.Popen(command,
53 stdout=subprocess.PIPE,
54 stderr=subprocess.STDOUT)
55 for stdout_line in iter(popen.stdout.readline, b""):
56 yield stdout_line
57 popen.stdout.close()
58 return_code = popen.wait()
59 if return_code:
60 raise subprocess.CalledProcessError(return_code, command)
61
62
63 def is_tails():
64 try:
65 id = subprocess.check_output('lsb_release --id --short',
66 shell=True).strip()
67 except subprocess.CalledProcessError:
68 id = None
69
70 # dirty hack to unreliably detect Tails 4.0~beta2
71 if id == 'Debian':
72 if os.uname()[1] == 'amnesia':
73 id = 'Tails'
74
75 return id == 'Tails'
76
77
78 def maybe_torify():
79 if is_tails():
80 return ['torify']
81 else:
82 return []
83
84
85 def install_apt_dependencies(args):
86 """
87 Install apt dependencies in Tails. In order to install Ansible in
88 a virtualenv, first there are a number of Python prerequisites.
89 """
90 sdlog.info("Installing SecureDrop Admin dependencies")
91 sdlog.info(("You'll be prompted for the temporary Tails admin password,"
92 " which was set on Tails login screen"))
93
94 apt_command = ['sudo', 'su', '-c',
95 "apt-get update && \
96 apt-get -q -o=Dpkg::Use-Pty=0 install -y \
97 python3-virtualenv \
98 python3-yaml \
99 python3-pip \
100 ccontrol \
101 virtualenv \
102 libffi-dev \
103 libssl-dev \
104 libpython3-dev",
105 ]
106
107 try:
108 # Print command results in real-time, to keep Admin apprised
109 # of progress during long-running command.
110 for output_line in run_command(apt_command):
111 print(output_line.decode('utf-8').rstrip())
112 except subprocess.CalledProcessError:
113 # Tails supports apt persistence, which was used by SecureDrop
114 # under Tails 2.x. If updates are being applied, don't try to pile
115 # on with more apt requests.
116 sdlog.error(("Failed to install apt dependencies. Check network"
117 " connection and try again."))
118 raise
119
120
121 def envsetup(args):
122 """Installs Admin tooling required for managing SecureDrop. Specifically:
123
124 * updates apt-cache
125 * installs apt packages for Python virtualenv
126 * creates virtualenv
127 * installs pip packages inside virtualenv
128
129 The virtualenv is created within the Persistence volume in Tails, so that
130 Ansible is available to the Admin on subsequent boots without requiring
131 installation of packages again.
132 """
133 # virtualenv doesnt exist? Install dependencies and create
134 if not os.path.exists(VENV_DIR):
135
136 install_apt_dependencies(args)
137
138 # Technically you can create a virtualenv from within python
139 # but pip can only be run over tor on tails, and debugging that
140 # along with instaling a third-party dependency is not worth
141 # the effort here.
142 sdlog.info("Setting up virtualenv")
143 try:
144 sdlog.debug(subprocess.check_output(
145 maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],
146 stderr=subprocess.STDOUT))
147 except subprocess.CalledProcessError as e:
148 sdlog.debug(e.output)
149 sdlog.error(("Unable to create virtualenv. Check network settings"
150 " and try again."))
151 raise
152 else:
153 sdlog.info("Virtualenv already exists, not creating")
154
155 install_pip_dependencies(args)
156 if os.path.exists(os.path.join(DIR, 'setup.py')):
157 install_pip_self(args)
158
159 sdlog.info("Finished installing SecureDrop dependencies")
160
161
162 def install_pip_self(args):
163 pip_install_cmd = [
164 os.path.join(VENV_DIR, 'bin', 'pip3'),
165 'install', '-e', DIR
166 ]
167 try:
168 subprocess.check_output(maybe_torify() + pip_install_cmd,
169 stderr=subprocess.STDOUT)
170 except subprocess.CalledProcessError as e:
171 sdlog.debug(e.output)
172 sdlog.error("Unable to install self, run with -v for more information")
173 raise
174
175
176 def install_pip_dependencies(args, pip_install_cmd=[
177 os.path.join(VENV_DIR, 'bin', 'pip3'),
178 'install',
179 # Specify requirements file.
180 '-r', os.path.join(DIR, 'requirements.txt'),
181 '--require-hashes',
182 # Make sure to upgrade packages only if necessary.
183 '-U', '--upgrade-strategy', 'only-if-needed',
184 ]):
185 """
186 Install Python dependencies via pip into virtualenv.
187 """
188
189 sdlog.info("Checking Python dependencies for securedrop-admin")
190 try:
191 pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,
192 stderr=subprocess.STDOUT)
193 except subprocess.CalledProcessError as e:
194 sdlog.debug(e.output)
195 sdlog.error(("Failed to install pip dependencies. Check network"
196 " connection and try again."))
197 raise
198
199 sdlog.debug(pip_output)
200 if "Successfully installed" in str(pip_output):
201 sdlog.info("Python dependencies for securedrop-admin upgraded")
202 else:
203 sdlog.info("Python dependencies for securedrop-admin are up-to-date")
204
205
206 def parse_argv(argv):
207 parser = argparse.ArgumentParser()
208 parser.add_argument('-v', action='store_true', default=False,
209 help="Increase verbosity on output")
210 parser.set_defaults(func=envsetup)
211
212 return parser.parse_args(argv)
213
214
215 if __name__ == "__main__":
216 args = parse_argv(sys.argv[1:])
217 setup_logger(args.v)
218 if args.v:
219 args.func(args)
220 else:
221 try:
222 args.func(args)
223 except Exception:
224 sys.exit(1)
225 else:
226 sys.exit(0)
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/admin/bootstrap.py b/admin/bootstrap.py
--- a/admin/bootstrap.py
+++ b/admin/bootstrap.py
@@ -20,6 +20,7 @@
import argparse
import logging
import os
+import shutil
import subprocess
import sys
@@ -68,13 +69,48 @@
id = None
# dirty hack to unreliably detect Tails 4.0~beta2
- if id == 'Debian':
+ if id == b'Debian':
if os.uname()[1] == 'amnesia':
id = 'Tails'
return id == 'Tails'
+def clean_up_tails3_venv(virtualenv_dir=VENV_DIR):
+ """
+ Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is
+ based on Debian Buster and uses libpython3.7. This means that the Tails 3.x
+ virtualenv will not work under Tails 4.x, and will need to be destroyed and
+ rebuilt. We can detect if the version of libpython is 3.5 in the
+ admin/.venv3/ folder, and delete it if that's the case. This will ensure a
+ smooth upgrade from Tails 3.x to Tails 4.x.
+ """
+ if is_tails():
+ try:
+ dist = subprocess.check_output('lsb_release --codename --short',
+ shell=True).strip()
+ except subprocess.CalledProcessError:
+ dist = None
+
+ # tails4 is based on buster
+ if dist == b'buster':
+ python_lib_path = os.path.join(virtualenv_dir, "lib/python3.5")
+ if os.path.exists(os.path.join(python_lib_path)):
+ sdlog.info(
+ "Tails 3 Python 3 virtualenv detected. "
+ "Removing it."
+ )
+ shutil.rmtree(virtualenv_dir)
+ sdlog.info("Tails 3 Python 3 virtualenv deleted.")
+
+
+def checkenv(args):
+ clean_up_tails3_venv(VENV_DIR)
+ if not os.path.exists(os.path.join(VENV_DIR, "bin/activate")):
+ sdlog.error('Please run "securedrop-admin setup".')
+ sys.exit(1)
+
+
def maybe_torify():
if is_tails():
return ['torify']
@@ -130,6 +166,9 @@
Ansible is available to the Admin on subsequent boots without requiring
installation of packages again.
"""
+ # clean up tails 3.x venv when migrating to tails 4.x
+ clean_up_tails3_venv(VENV_DIR)
+
# virtualenv doesnt exist? Install dependencies and create
if not os.path.exists(VENV_DIR):
@@ -209,18 +248,30 @@
help="Increase verbosity on output")
parser.set_defaults(func=envsetup)
+ subparsers = parser.add_subparsers()
+
+ envsetup_parser = subparsers.add_parser(
+ 'envsetup',
+ help='Set up the admin virtualenv.'
+ )
+ envsetup_parser.set_defaults(func=envsetup)
+
+ checkenv_parser = subparsers.add_parser(
+ 'checkenv',
+ help='Check that the admin virtualenv is properly set up.'
+ )
+ checkenv_parser.set_defaults(func=checkenv)
+
return parser.parse_args(argv)
if __name__ == "__main__":
args = parse_argv(sys.argv[1:])
setup_logger(args.v)
- if args.v:
+
+ try:
args.func(args)
+ except Exception:
+ sys.exit(1)
else:
- try:
- args.func(args)
- except Exception:
- sys.exit(1)
- else:
- sys.exit(0)
+ sys.exit(0)
| {"golden_diff": "diff --git a/admin/bootstrap.py b/admin/bootstrap.py\n--- a/admin/bootstrap.py\n+++ b/admin/bootstrap.py\n@@ -20,6 +20,7 @@\n import argparse\n import logging\n import os\n+import shutil\n import subprocess\n import sys\n \n@@ -68,13 +69,48 @@\n id = None\n \n # dirty hack to unreliably detect Tails 4.0~beta2\n- if id == 'Debian':\n+ if id == b'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n \n return id == 'Tails'\n \n \n+def clean_up_tails3_venv(virtualenv_dir=VENV_DIR):\n+ \"\"\"\n+ Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is\n+ based on Debian Buster and uses libpython3.7. This means that the Tails 3.x\n+ virtualenv will not work under Tails 4.x, and will need to be destroyed and\n+ rebuilt. We can detect if the version of libpython is 3.5 in the\n+ admin/.venv3/ folder, and delete it if that's the case. This will ensure a\n+ smooth upgrade from Tails 3.x to Tails 4.x.\n+ \"\"\"\n+ if is_tails():\n+ try:\n+ dist = subprocess.check_output('lsb_release --codename --short',\n+ shell=True).strip()\n+ except subprocess.CalledProcessError:\n+ dist = None\n+\n+ # tails4 is based on buster\n+ if dist == b'buster':\n+ python_lib_path = os.path.join(virtualenv_dir, \"lib/python3.5\")\n+ if os.path.exists(os.path.join(python_lib_path)):\n+ sdlog.info(\n+ \"Tails 3 Python 3 virtualenv detected. \"\n+ \"Removing it.\"\n+ )\n+ shutil.rmtree(virtualenv_dir)\n+ sdlog.info(\"Tails 3 Python 3 virtualenv deleted.\")\n+\n+\n+def checkenv(args):\n+ clean_up_tails3_venv(VENV_DIR)\n+ if not os.path.exists(os.path.join(VENV_DIR, \"bin/activate\")):\n+ sdlog.error('Please run \"securedrop-admin setup\".')\n+ sys.exit(1)\n+\n+\n def maybe_torify():\n if is_tails():\n return ['torify']\n@@ -130,6 +166,9 @@\n Ansible is available to the Admin on subsequent boots without requiring\n installation of packages again.\n \"\"\"\n+ # clean up tails 3.x venv when migrating to tails 4.x\n+ clean_up_tails3_venv(VENV_DIR)\n+\n # virtualenv doesnt exist? Install dependencies and create\n if not os.path.exists(VENV_DIR):\n \n@@ -209,18 +248,30 @@\n help=\"Increase verbosity on output\")\n parser.set_defaults(func=envsetup)\n \n+ subparsers = parser.add_subparsers()\n+\n+ envsetup_parser = subparsers.add_parser(\n+ 'envsetup',\n+ help='Set up the admin virtualenv.'\n+ )\n+ envsetup_parser.set_defaults(func=envsetup)\n+\n+ checkenv_parser = subparsers.add_parser(\n+ 'checkenv',\n+ help='Check that the admin virtualenv is properly set up.'\n+ )\n+ checkenv_parser.set_defaults(func=checkenv)\n+\n return parser.parse_args(argv)\n \n \n if __name__ == \"__main__\":\n args = parse_argv(sys.argv[1:])\n setup_logger(args.v)\n- if args.v:\n+\n+ try:\n args.func(args)\n+ except Exception:\n+ sys.exit(1)\n else:\n- try:\n- args.func(args)\n- except Exception:\n- sys.exit(1)\n- else:\n- sys.exit(0)\n+ sys.exit(0)\n", "issue": "securedrop-admin (setup) fails in Tails 4.0-rc1\n## Description\r\n\r\nRunning `./securedrop-admin setup` in tails 4.0-rc1 (upgraded from 3.16) returns the following error:\r\n\r\n```\r\nINFO: Virtualenv already exists, not creating\r\nINFO: Checking Python dependencies for securedrop-admin\r\nERROR: Failed to install pip dependencies. Check network connection and try again.\r\n```\r\nThis was done in VMs, will try to reproduce on hardware.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Upgrade Tails device from 3.16 to 4.0 (Boot into 4.0-rc1 drive and clone to upgrade 3.16 drive)\r\n2. Boot into newly-upgraded tails drive\r\n3. Verify that the internet is working (tor is bootstrapped, you can reach an external website over tor)\r\n4. check out `1.1.0~rc2` tag\r\n5. Run `./securedrop-admin setup` in ~/Persistent/securedrop\r\n6. Observe error\r\n\r\n## Expected Behavior\r\n\r\nSecuredrop-admin should run and the dependencies should be installed.\r\n\r\n## Actual Behavior\r\n\r\nSecuredrop-admin fails and returns an error, the dependencies are not installed\r\n\r\n## Comments\r\n\r\nPer https://github.com/freedomofpress/securedrop/pull/4852/files#diff-b5e536cc161fcc0d62e661b4d6eae381R70-R73\r\n\r\nWhen running the commands locally, I get\r\n* `lsb_release --id --short` returns `Debian`\r\n* `uname -a` returns `Linux amnesia 5.3.0-trunk-amd64 #1 SMF Debian 5.3.2-1~exp1 (2019-10-02) x86_64 GNU/Linux`\r\n\r\nWhen i run ./securedrop-admin with no parameter, I get:\r\n```\r\namnesia@amnesia:~/Persistent/securedrop$ ./securedrop-admin help\r\nCould not find platform independent libraries <prefix>\r\nCould not find platform dependent libraries <exec_prefix>\r\nConsider setting $PYTHONHOME to <prefix>[:<exec_prefix>]\r\nFatal Python error: Py_Initialize: Unable to get the locale encoding\r\nImportError: No module named 'encodings'\r\n\r\nCurrent thread 0x00007cf687450740 (most recent call first):\r\nAborted\r\n```\nsecuredrop-admin (setup) fails in Tails 4.0-rc1\n## Description\r\n\r\nRunning `./securedrop-admin setup` in tails 4.0-rc1 (upgraded from 3.16) returns the following error:\r\n\r\n```\r\nINFO: Virtualenv already exists, not creating\r\nINFO: Checking Python dependencies for securedrop-admin\r\nERROR: Failed to install pip dependencies. Check network connection and try again.\r\n```\r\nThis was done in VMs, will try to reproduce on hardware.\r\n\r\n## Steps to Reproduce\r\n\r\n1. Upgrade Tails device from 3.16 to 4.0 (Boot into 4.0-rc1 drive and clone to upgrade 3.16 drive)\r\n2. Boot into newly-upgraded tails drive\r\n3. Verify that the internet is working (tor is bootstrapped, you can reach an external website over tor)\r\n4. check out `1.1.0~rc2` tag\r\n5. Run `./securedrop-admin setup` in ~/Persistent/securedrop\r\n6. Observe error\r\n\r\n## Expected Behavior\r\n\r\nSecuredrop-admin should run and the dependencies should be installed.\r\n\r\n## Actual Behavior\r\n\r\nSecuredrop-admin fails and returns an error, the dependencies are not installed\r\n\r\n## Comments\r\n\r\nPer https://github.com/freedomofpress/securedrop/pull/4852/files#diff-b5e536cc161fcc0d62e661b4d6eae381R70-R73\r\n\r\nWhen running the commands locally, I get\r\n* `lsb_release --id --short` returns `Debian`\r\n* `uname -a` returns `Linux amnesia 5.3.0-trunk-amd64 #1 SMF Debian 5.3.2-1~exp1 (2019-10-02) x86_64 GNU/Linux`\r\n\r\nWhen i run ./securedrop-admin with no parameter, I get:\r\n```\r\namnesia@amnesia:~/Persistent/securedrop$ ./securedrop-admin help\r\nCould not find platform independent libraries <prefix>\r\nCould not find platform dependent libraries <exec_prefix>\r\nConsider setting $PYTHONHOME to <prefix>[:<exec_prefix>]\r\nFatal Python error: Py_Initialize: Unable to get the locale encoding\r\nImportError: No module named 'encodings'\r\n\r\nCurrent thread 0x00007cf687450740 (most recent call first):\r\nAborted\r\n```\n", "before_files": [{"content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport argparse\nimport logging\nimport os\nimport subprocess\nimport sys\n\nsdlog = logging.getLogger(__name__)\n\nDIR = os.path.dirname(os.path.realpath(__file__))\nVENV_DIR = os.path.join(DIR, \".venv3\")\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef run_command(command):\n \"\"\"\n Wrapper function to display stdout for running command,\n similar to how shelling out in a Bash script displays rolling output.\n\n Yields a list of the stdout from the `command`, and raises a\n CalledProcessError if `command` returns non-zero.\n \"\"\"\n popen = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n for stdout_line in iter(popen.stdout.readline, b\"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, command)\n\n\ndef is_tails():\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n\n # dirty hack to unreliably detect Tails 4.0~beta2\n if id == 'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n\n return id == 'Tails'\n\n\ndef maybe_torify():\n if is_tails():\n return ['torify']\n else:\n return []\n\n\ndef install_apt_dependencies(args):\n \"\"\"\n Install apt dependencies in Tails. In order to install Ansible in\n a virtualenv, first there are a number of Python prerequisites.\n \"\"\"\n sdlog.info(\"Installing SecureDrop Admin dependencies\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n\n apt_command = ['sudo', 'su', '-c',\n \"apt-get update && \\\n apt-get -q -o=Dpkg::Use-Pty=0 install -y \\\n python3-virtualenv \\\n python3-yaml \\\n python3-pip \\\n ccontrol \\\n virtualenv \\\n libffi-dev \\\n libssl-dev \\\n libpython3-dev\",\n ]\n\n try:\n # Print command results in real-time, to keep Admin apprised\n # of progress during long-running command.\n for output_line in run_command(apt_command):\n print(output_line.decode('utf-8').rstrip())\n except subprocess.CalledProcessError:\n # Tails supports apt persistence, which was used by SecureDrop\n # under Tails 2.x. If updates are being applied, don't try to pile\n # on with more apt requests.\n sdlog.error((\"Failed to install apt dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n\ndef envsetup(args):\n \"\"\"Installs Admin tooling required for managing SecureDrop. Specifically:\n\n * updates apt-cache\n * installs apt packages for Python virtualenv\n * creates virtualenv\n * installs pip packages inside virtualenv\n\n The virtualenv is created within the Persistence volume in Tails, so that\n Ansible is available to the Admin on subsequent boots without requiring\n installation of packages again.\n \"\"\"\n # virtualenv doesnt exist? Install dependencies and create\n if not os.path.exists(VENV_DIR):\n\n install_apt_dependencies(args)\n\n # Technically you can create a virtualenv from within python\n # but pip can only be run over tor on tails, and debugging that\n # along with instaling a third-party dependency is not worth\n # the effort here.\n sdlog.info(\"Setting up virtualenv\")\n try:\n sdlog.debug(subprocess.check_output(\n maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Unable to create virtualenv. Check network settings\"\n \" and try again.\"))\n raise\n else:\n sdlog.info(\"Virtualenv already exists, not creating\")\n\n install_pip_dependencies(args)\n if os.path.exists(os.path.join(DIR, 'setup.py')):\n install_pip_self(args)\n\n sdlog.info(\"Finished installing SecureDrop dependencies\")\n\n\ndef install_pip_self(args):\n pip_install_cmd = [\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install', '-e', DIR\n ]\n try:\n subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error(\"Unable to install self, run with -v for more information\")\n raise\n\n\ndef install_pip_dependencies(args, pip_install_cmd=[\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install',\n # Specify requirements file.\n '-r', os.path.join(DIR, 'requirements.txt'),\n '--require-hashes',\n # Make sure to upgrade packages only if necessary.\n '-U', '--upgrade-strategy', 'only-if-needed',\n]):\n \"\"\"\n Install Python dependencies via pip into virtualenv.\n \"\"\"\n\n sdlog.info(\"Checking Python dependencies for securedrop-admin\")\n try:\n pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Failed to install pip dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n sdlog.debug(pip_output)\n if \"Successfully installed\" in str(pip_output):\n sdlog.info(\"Python dependencies for securedrop-admin upgraded\")\n else:\n sdlog.info(\"Python dependencies for securedrop-admin are up-to-date\")\n\n\ndef parse_argv(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.set_defaults(func=envsetup)\n\n return parser.parse_args(argv)\n\n\nif __name__ == \"__main__\":\n args = parse_argv(sys.argv[1:])\n setup_logger(args.v)\n if args.v:\n args.func(args)\n else:\n try:\n args.func(args)\n except Exception:\n sys.exit(1)\n else:\n sys.exit(0)\n", "path": "admin/bootstrap.py"}], "after_files": [{"content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport argparse\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nsdlog = logging.getLogger(__name__)\n\nDIR = os.path.dirname(os.path.realpath(__file__))\nVENV_DIR = os.path.join(DIR, \".venv3\")\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef run_command(command):\n \"\"\"\n Wrapper function to display stdout for running command,\n similar to how shelling out in a Bash script displays rolling output.\n\n Yields a list of the stdout from the `command`, and raises a\n CalledProcessError if `command` returns non-zero.\n \"\"\"\n popen = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n for stdout_line in iter(popen.stdout.readline, b\"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, command)\n\n\ndef is_tails():\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n id = None\n\n # dirty hack to unreliably detect Tails 4.0~beta2\n if id == b'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n\n return id == 'Tails'\n\n\ndef clean_up_tails3_venv(virtualenv_dir=VENV_DIR):\n \"\"\"\n Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is\n based on Debian Buster and uses libpython3.7. This means that the Tails 3.x\n virtualenv will not work under Tails 4.x, and will need to be destroyed and\n rebuilt. We can detect if the version of libpython is 3.5 in the\n admin/.venv3/ folder, and delete it if that's the case. This will ensure a\n smooth upgrade from Tails 3.x to Tails 4.x.\n \"\"\"\n if is_tails():\n try:\n dist = subprocess.check_output('lsb_release --codename --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n dist = None\n\n # tails4 is based on buster\n if dist == b'buster':\n python_lib_path = os.path.join(virtualenv_dir, \"lib/python3.5\")\n if os.path.exists(os.path.join(python_lib_path)):\n sdlog.info(\n \"Tails 3 Python 3 virtualenv detected. \"\n \"Removing it.\"\n )\n shutil.rmtree(virtualenv_dir)\n sdlog.info(\"Tails 3 Python 3 virtualenv deleted.\")\n\n\ndef checkenv(args):\n clean_up_tails3_venv(VENV_DIR)\n if not os.path.exists(os.path.join(VENV_DIR, \"bin/activate\")):\n sdlog.error('Please run \"securedrop-admin setup\".')\n sys.exit(1)\n\n\ndef maybe_torify():\n if is_tails():\n return ['torify']\n else:\n return []\n\n\ndef install_apt_dependencies(args):\n \"\"\"\n Install apt dependencies in Tails. In order to install Ansible in\n a virtualenv, first there are a number of Python prerequisites.\n \"\"\"\n sdlog.info(\"Installing SecureDrop Admin dependencies\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n\n apt_command = ['sudo', 'su', '-c',\n \"apt-get update && \\\n apt-get -q -o=Dpkg::Use-Pty=0 install -y \\\n python3-virtualenv \\\n python3-yaml \\\n python3-pip \\\n ccontrol \\\n virtualenv \\\n libffi-dev \\\n libssl-dev \\\n libpython3-dev\",\n ]\n\n try:\n # Print command results in real-time, to keep Admin apprised\n # of progress during long-running command.\n for output_line in run_command(apt_command):\n print(output_line.decode('utf-8').rstrip())\n except subprocess.CalledProcessError:\n # Tails supports apt persistence, which was used by SecureDrop\n # under Tails 2.x. If updates are being applied, don't try to pile\n # on with more apt requests.\n sdlog.error((\"Failed to install apt dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n\ndef envsetup(args):\n \"\"\"Installs Admin tooling required for managing SecureDrop. Specifically:\n\n * updates apt-cache\n * installs apt packages for Python virtualenv\n * creates virtualenv\n * installs pip packages inside virtualenv\n\n The virtualenv is created within the Persistence volume in Tails, so that\n Ansible is available to the Admin on subsequent boots without requiring\n installation of packages again.\n \"\"\"\n # clean up tails 3.x venv when migrating to tails 4.x\n clean_up_tails3_venv(VENV_DIR)\n\n # virtualenv doesnt exist? Install dependencies and create\n if not os.path.exists(VENV_DIR):\n\n install_apt_dependencies(args)\n\n # Technically you can create a virtualenv from within python\n # but pip can only be run over tor on tails, and debugging that\n # along with instaling a third-party dependency is not worth\n # the effort here.\n sdlog.info(\"Setting up virtualenv\")\n try:\n sdlog.debug(subprocess.check_output(\n maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Unable to create virtualenv. Check network settings\"\n \" and try again.\"))\n raise\n else:\n sdlog.info(\"Virtualenv already exists, not creating\")\n\n install_pip_dependencies(args)\n if os.path.exists(os.path.join(DIR, 'setup.py')):\n install_pip_self(args)\n\n sdlog.info(\"Finished installing SecureDrop dependencies\")\n\n\ndef install_pip_self(args):\n pip_install_cmd = [\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install', '-e', DIR\n ]\n try:\n subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error(\"Unable to install self, run with -v for more information\")\n raise\n\n\ndef install_pip_dependencies(args, pip_install_cmd=[\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install',\n # Specify requirements file.\n '-r', os.path.join(DIR, 'requirements.txt'),\n '--require-hashes',\n # Make sure to upgrade packages only if necessary.\n '-U', '--upgrade-strategy', 'only-if-needed',\n]):\n \"\"\"\n Install Python dependencies via pip into virtualenv.\n \"\"\"\n\n sdlog.info(\"Checking Python dependencies for securedrop-admin\")\n try:\n pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Failed to install pip dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n sdlog.debug(pip_output)\n if \"Successfully installed\" in str(pip_output):\n sdlog.info(\"Python dependencies for securedrop-admin upgraded\")\n else:\n sdlog.info(\"Python dependencies for securedrop-admin are up-to-date\")\n\n\ndef parse_argv(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.set_defaults(func=envsetup)\n\n subparsers = parser.add_subparsers()\n\n envsetup_parser = subparsers.add_parser(\n 'envsetup',\n help='Set up the admin virtualenv.'\n )\n envsetup_parser.set_defaults(func=envsetup)\n\n checkenv_parser = subparsers.add_parser(\n 'checkenv',\n help='Check that the admin virtualenv is properly set up.'\n )\n checkenv_parser.set_defaults(func=checkenv)\n\n return parser.parse_args(argv)\n\n\nif __name__ == \"__main__\":\n args = parse_argv(sys.argv[1:])\n setup_logger(args.v)\n\n try:\n args.func(args)\n except Exception:\n sys.exit(1)\n else:\n sys.exit(0)\n", "path": "admin/bootstrap.py"}]} | 3,572 | 881 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.